summaryrefslogtreecommitdiff
path: root/src/third_party/wiredtiger/test
diff options
context:
space:
mode:
authorLuke Chen <luke.chen@mongodb.com>2021-05-19 15:49:07 +1000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-05-19 06:02:30 +0000
commit1d20af63ae5e95e0ed1219809c5af571de8e3ae3 (patch)
tree8b046a8403a959b712715fdfd296a38f0c4b1fb4 /src/third_party/wiredtiger/test
parentcc1640581318df61a5fedc5c7ddd1a91c13e0712 (diff)
downloadmongo-1d20af63ae5e95e0ed1219809c5af571de8e3ae3.tar.gz
Import wiredtiger: bae0c1c914bc0fa92f3775c08650b65663094034 from branch mongodb-4.4
ref: aadac22242..bae0c1c914 for: 4.4.7 WT-6403 Restore format non-timestamp transactional testing WT-6576 Fix the aborted on-disk prepared key WT-7106 Increase how often delta encoding is used for history store records WT-7204 Update cursor-backward walks key instantiation support WT-7234 Prefix-compressed keys and memory amplification WT-7296 Merge default configuration with supplied test configuration in test framework WT-7325 Created a script to generate a new test in the WT test framework WT-7381 Cache btree's ckptlist between checkpoints WT-7382 Refactor of database validation in the test framework WT-7407 test/format failure classifier WT-7411 Stats and counter to track prepared updates WT-7416 Imported table requires full copy between incremental backups WT-7446 Fix incorrect duration_seconds value in test framework WT-7486 Coverity explcit NULL dereferenced WT-7487 Coverity explcit NULL dereferenced WT-7497 Add flush component to object metadata WT-7499 Change WT_STORAGE_SOURCE.flush API and add flush_finish WT-7503 Change default compressor for WT HS to Zstandard WT-7506 Allow single and double quotes inside auto-formatted comments WT-7511 Add assert to ensure the history store page is pinned before search WT-7519 Fix flags field overflow in WT_DATA_HANDLE WT-7525 Add key order check right after history store insert WT-7537 Change local tier object suffix to .wtobj WT-7546 Coverity: Minor issues in CppSuite test harness
Diffstat (limited to 'src/third_party/wiredtiger/test')
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/configs/config_example_test_default.txt52
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt26
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt13
-rwxr-xr-xsrc/third_party/wiredtiger/test/cppsuite/create_test.sh81
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/core/component.h2
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/core/configuration.h114
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h4
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/test.h12
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/util/debug_utils.h1
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h15
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h111
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h18
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h66
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h397
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h16
-rwxr-xr-xsrc/third_party/wiredtiger/test/cppsuite/tests/run.cxx2
-rw-r--r--src/third_party/wiredtiger/test/csuite/incr_backup/main.c7
-rw-r--r--src/third_party/wiredtiger/test/csuite/wt2719_reconfig/main.c5
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen.yml4
-rw-r--r--src/third_party/wiredtiger/test/format/bulk.c17
-rw-r--r--src/third_party/wiredtiger/test/format/config.c171
-rw-r--r--src/third_party/wiredtiger/test/format/config.h34
-rw-r--r--src/third_party/wiredtiger/test/format/config_compat.c4
-rw-r--r--src/third_party/wiredtiger/test/format/config_compat.sed1
-rw-r--r--src/third_party/wiredtiger/test/format/format.h16
-rwxr-xr-xsrc/third_party/wiredtiger/test/format/format.sh33
-rw-r--r--src/third_party/wiredtiger/test/format/kv.c67
-rw-r--r--src/third_party/wiredtiger/test/format/ops.c230
-rw-r--r--src/third_party/wiredtiger/test/format/snap.c78
-rw-r--r--src/third_party/wiredtiger/test/format/util.c2
-rw-r--r--src/third_party/wiredtiger/test/format/wts.c26
-rw-r--r--src/third_party/wiredtiger/test/suite/test_backup22.py93
-rw-r--r--src/third_party/wiredtiger/test/suite/test_prepare15.py204
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/test_tiered04.py2
-rwxr-xr-xsrc/third_party/wiredtiger/test/suite/test_tiered06.py255
35 files changed, 1177 insertions, 1002 deletions
diff --git a/src/third_party/wiredtiger/test/cppsuite/configs/config_example_test_default.txt b/src/third_party/wiredtiger/test/cppsuite/configs/config_example_test_default.txt
index b46fed225eb..f5d5c916bdc 100644
--- a/src/third_party/wiredtiger/test/cppsuite/configs/config_example_test_default.txt
+++ b/src/third_party/wiredtiger/test/cppsuite/configs/config_example_test_default.txt
@@ -1,48 +1,4 @@
-# Same parameters as config_poc_test_default
-duration_seconds=10,
-cache_size_mb=1000,
-enable_logging=true,
-runtime_monitor=
-(
- op_count=3,
- interval=s,
- stat_cache_size=
- (
- enabled=true,
- limit=100
- )
-),
-timestamp_manager=
-(
- enabled=true,
- oldest_lag=1,
- stable_lag=1
-),
-workload_generator=
-(
- collection_count=2,
- key_count=5,
- key_size=1,
- ops_per_transaction=
- (
- min=5,
- max=50
- ),
- read_threads=1,
- update_threads=1,
- value_size=10,
- update_config=
- (
- op_count=1,
- interval=s
- ),
- insert_config=
- (
- op_count=1,
- interval=s
- )
-),
-workload_tracking=
-(
- enabled=true
-)
+# Example configuration file, as default are added automatically only non default configurations
+# need to be defined.
+duration_seconds=5,
+cache_size_mb=250
diff --git a/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt b/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt
index c677142234d..6caaa4d4456 100644
--- a/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt
+++ b/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_default.txt
@@ -3,23 +3,14 @@
# Used as a basic test for the framework.
duration_seconds=10,
cache_size_mb=1000,
-enable_logging=true,
runtime_monitor=
(
- op_count=3,
- interval=s,
stat_cache_size=
(
enabled=true,
limit=100
)
),
-timestamp_manager=
-(
- enabled=true,
- oldest_lag=1,
- stable_lag=1
-),
workload_generator=
(
collection_count=2,
@@ -31,20 +22,5 @@ workload_generator=
max=50
),
read_threads=1,
- update_threads=1,
- update_config=
- (
- op_count=1,
- interval=s
- ),
- insert_config=
- (
- op_count=1,
- interval=s
- ),
- value_size=10
+ update_threads=1
),
-workload_tracking=
-(
- enabled=true
-)
diff --git a/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt b/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt
index 6067bea3983..6eeda0ab7c0 100644
--- a/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt
+++ b/src/third_party/wiredtiger/test/cppsuite/configs/config_poc_test_stress.txt
@@ -6,19 +6,12 @@ cache_size_mb=5000,
enable_logging=true,
runtime_monitor=
(
- rate_per_second=3,
stat_cache_size=
(
enabled=true,
limit=100
)
),
-timestamp_manager=
-(
- enabled=true,
- oldest_lag=1,
- stable_lag=1
-),
workload_generator=
(
collection_count=2,
@@ -32,8 +25,4 @@ workload_generator=
read_threads=1,
update_threads=1,
value_size=2000
-),
-workload_tracking=
-(
- enabled=true
-)
+), \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/cppsuite/create_test.sh b/src/third_party/wiredtiger/test/cppsuite/create_test.sh
new file mode 100755
index 00000000000..91f506e39e9
--- /dev/null
+++ b/src/third_party/wiredtiger/test/cppsuite/create_test.sh
@@ -0,0 +1,81 @@
+#! /bin/bash
+
+# First argument needs to be the name of the script.
+if [ $# -eq 0 ]
+ then
+ echo "Please give a name to your test i.e ./s_new_test my_test"
+ exit 128
+fi
+
+# Check the test name
+if [[ $1 =~ ^[0-9a-zA-Z_-]+$ ]];then
+ echo "Generating test: $1..."
+else
+ echo "Invalid test name. Only alphanumeric characters are allowed. \"_\" and \"-\" can be used too."
+ exit 128
+fi
+
+# Check if the test already exists.
+FILE=tests/$1.cxx
+if test -f "$FILE"; then
+ echo "$FILE cannot be created as it already exists."
+ exit 1
+fi
+
+# Check if default configuration associated to the test already exists.
+CONFIG=configs/config_$1_default.txt
+if test -f "$CONFIG"; then
+ echo "$CONFIG cannot be created as it already exists."
+ exit 1
+fi
+
+# Copy the default template.
+cp tests/example_test.cxx $FILE
+echo "Created $FILE."
+cp configs/config_example_test_default.txt $CONFIG
+echo "Created $CONFIG."
+
+# Replace example_test with the new test name.
+SEARCH="example_test"
+sed -i "s/$SEARCH/$1/" $FILE
+echo "Updated $FILE."
+
+# Replace the first line of the configuration file.
+REPLACE="# Configuration for $1."
+sed -i "1s/.*/$REPLACE/" $CONFIG
+echo "Updated $CONFIG."
+
+# Include the new test in run.cxx
+FILE=tests/run.cxx
+SEARCH="#include \"example_test.cxx\""
+VALUE="#include \"$1.cxx\""
+sed -i "/$SEARCH/a $VALUE" $FILE
+
+# Add the new test to the run_test() method
+SEARCH="example_test(config, test_name).run()"
+LINE_1="\ else if (test_name == \"$1\")\n"
+LINE_2="\ $1(config, test_name).run();"
+sed -i "/$SEARCH/a $LINE_1$LINE_2" $FILE
+
+# Add the new test to all existing tests.
+SEARCH="all_tests = {\"example_test\""
+REPLACE="$SEARCH, \"$1\""
+sed -i "s/$SEARCH/$REPLACE/" $FILE
+echo "Updated $FILE."
+
+# Add the new test to test_data.py
+FILE=../../dist/test_data.py
+SEARCH="example_test"
+LINE_1="\ '$1' : Method(test_config),"
+sed -i "/$SEARCH/a $LINE_1" $FILE
+echo "Updated $FILE."
+
+# Trigger s_all
+echo "Running s_all.."
+cd ../../dist
+./s_all
+
+# Last changes to be done manually
+echo "Follow the next steps to execute your new test:"
+echo "1. Start editing $1.cxx"
+echo "2. Compile your changes, go to build_posix/test/cppsuite and run your test with ./run -t $1"
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/core/component.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/core/component.h
index 91b165d8f29..341932a0236 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/core/component.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/core/component.h
@@ -88,7 +88,7 @@ class component {
}
bool
- is_enabled() const
+ enabled() const
{
return _enabled;
}
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/core/configuration.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/core/configuration.h
index c2b9494487f..7eaa96214cb 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/core/configuration.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/core/configuration.h
@@ -29,7 +29,9 @@
#ifndef CONFIGURATION_H
#define CONFIGURATION_H
+#include <algorithm>
#include <string>
+#include <stack>
extern "C" {
#include "test_util.h"
@@ -40,14 +42,22 @@ enum class types { BOOL, INT, STRING, STRUCT };
namespace test_harness {
class configuration {
public:
- configuration(const std::string &test_config_name, const std::string &config) : _config(config)
+ configuration(const std::string &test_config_name, const std::string &config)
{
+ const auto *config_entry = __wt_test_config_match(test_config_name.c_str());
+ if (config_entry == nullptr)
+ testutil_die(EINVAL, "failed to match test config name");
+ std::string default_config = std::string(config_entry->base);
+ /* Merge in the default configuration. */
+ _config = merge_default_config(default_config, config);
+ debug_print("Running with enriched config: " + _config, DEBUG_INFO);
+
int ret = wiredtiger_test_config_validate(
- nullptr, nullptr, test_config_name.c_str(), config.c_str());
+ nullptr, nullptr, test_config_name.c_str(), _config.c_str());
if (ret != 0)
testutil_die(EINVAL, "failed to validate given config, ensure test config exists");
ret =
- wiredtiger_config_parser_open(nullptr, config.c_str(), config.size(), &_config_parser);
+ wiredtiger_config_parser_open(nullptr, _config.c_str(), _config.size(), &_config_parser);
if (ret != 0)
testutil_die(EINVAL, "failed to create configuration parser for provided config");
}
@@ -173,6 +183,104 @@ class configuration {
return func(value);
}
+ /*
+ * Merge together two configuration strings, the user one and the default one.
+ */
+ static std::string
+ merge_default_config(const std::string &default_config, const std::string &user_config)
+ {
+ std::string merged_config;
+ auto split_default_config = split_config(default_config);
+ auto split_user_config = split_config(user_config);
+ auto user_it = split_user_config.begin();
+ for (auto default_it = split_default_config.begin();
+ default_it != split_default_config.end(); ++default_it) {
+ if (user_it->first != default_it->first)
+ /* The default does not exist in the user configuration, add it. */
+ merged_config += default_it->first + "=" + default_it->second;
+ else {
+ /* If we have a sub config merge it in. */
+ if (user_it->second[0] == '(')
+ merged_config += default_it->first + "=(" +
+ merge_default_config(default_it->second, user_it->second) + ')';
+ else
+ /* Add the user configuration as it exists. */
+ merged_config += user_it->first + "=" + user_it->second;
+ ++user_it;
+ }
+ /* Add a comma after every item we add except the last one. */
+ if (split_default_config.end() - default_it != 1)
+ merged_config += ",";
+ }
+ return (merged_config);
+ }
+
+ /*
+ * Split a config string into keys and values, taking care to not split incorrectly when we have
+ * a sub config.
+ */
+ static std::vector<std::pair<std::string, std::string>>
+ split_config(const std::string &config)
+ {
+ std::string cut_config = config;
+ std::vector<std::pair<std::string, std::string>> split_config;
+ std::string key = "", value = "";
+ bool in_subconfig = false;
+ bool expect_value = false;
+ std::stack<char> subconfig_parens;
+
+ /* All configuration strings must be at least 2 characters. */
+ testutil_assert(config.size() > 1);
+
+ /* Remove prefix and trailing "()". */
+ if (config[0] == '(')
+ cut_config = config.substr(1, config.size() - 2);
+
+ size_t start = 0, len = 0;
+ for (size_t i = 0; i < cut_config.size(); ++i) {
+ if (cut_config[i] == '(') {
+ subconfig_parens.push(cut_config[i]);
+ in_subconfig = true;
+ }
+ if (cut_config[i] == ')') {
+ subconfig_parens.pop();
+ in_subconfig = !subconfig_parens.empty();
+ }
+ if (cut_config[i] == '=' && !in_subconfig) {
+ expect_value = true;
+ key = cut_config.substr(start, len);
+ start += len + 1;
+ len = 0;
+ continue;
+ }
+ if (cut_config[i] == ',' && !in_subconfig) {
+ expect_value = false;
+ if (start + len >= cut_config.size())
+ break;
+ value = cut_config.substr(start, len);
+ start += len + 1;
+ len = 0;
+ split_config.push_back(std::make_pair(key, value));
+ continue;
+ }
+ ++len;
+ }
+ if (expect_value) {
+ value = cut_config.substr(start, len);
+ split_config.push_back(std::make_pair(key, value));
+ }
+
+ /* We have to sort the config here otherwise we will match incorrectly while merging. */
+ std::sort(split_config.begin(), split_config.end(), comparator);
+ return (split_config);
+ }
+
+ static bool
+ comparator(std::pair<std::string, std::string> a, std::pair<std::string, std::string> b)
+ {
+ return (a.first < b.first);
+ }
+
std::string _config;
WT_CONFIG_PARSER *_config_parser = nullptr;
};
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h
index b7897eb39f1..bc559a03104 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/runtime_monitor.h
@@ -64,7 +64,7 @@ class statistic {
virtual ~statistic() {}
bool
- is_enabled() const
+ enabled() const
{
return _enabled;
}
@@ -154,7 +154,7 @@ class runtime_monitor : public component {
do_work()
{
for (const auto &it : _stats) {
- if (it->is_enabled())
+ if (it->enabled())
it->check(_cursor);
}
}
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/test.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/test.h
index a753e131f0f..f5049df074d 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/test.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/test.h
@@ -102,7 +102,7 @@ class test : public database_operation {
run()
{
int64_t cache_size_mb, duration_seconds;
- bool enable_logging, is_success = true;
+ bool enable_logging;
/* Build the database creation config string. */
std::string db_create_config = CONNECTION_CREATE;
@@ -124,6 +124,10 @@ class test : public database_operation {
for (const auto &it : _components)
_thread_manager->add_thread(&component::run, it);
+ /* The initial population phase needs to be finished before starting the actual test. */
+ while (_workload_generator->enabled() && !_workload_generator->db_populated())
+ std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
/* The test will run for the duration as defined in the config. */
duration_seconds = _config->get_int(DURATION_SECONDS);
testutil_assert(duration_seconds >= 0);
@@ -135,13 +139,13 @@ class test : public database_operation {
_thread_manager->join();
/* Validation stage. */
- if (_workload_tracking->is_enabled()) {
+ if (_workload_tracking->enabled()) {
workload_validation wv;
- is_success = wv.validate(_workload_tracking->get_operation_table_name(),
+ wv.validate(_workload_tracking->get_operation_table_name(),
_workload_tracking->get_schema_table_name(), _workload_generator->get_database());
}
- debug_print(is_success ? "SUCCESS" : "FAILED", DEBUG_INFO);
+ debug_print("SUCCESS", DEBUG_INFO);
connection_manager::instance().close();
}
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/util/debug_utils.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/util/debug_utils.h
index da09a08c9d8..a2694f6987c 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/util/debug_utils.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/util/debug_utils.h
@@ -34,7 +34,6 @@
/* Define helpful functions related to debugging. */
namespace test_harness {
-#define DEBUG_ABORT -1
#define DEBUG_ERROR 0
#define DEBUG_INFO 1
#define DEBUG_TRACE 2
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h
index 07e7c007ea7..c2a7ed9f6a6 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_model.h
@@ -42,9 +42,6 @@ struct key_t {
bool exists;
};
-/* Iterator type used to iterate over keys that are stored in the data model. */
-typedef std::map<test_harness::key_value_t, test_harness::key_t>::const_iterator keys_iterator_t;
-
/* Representation of a value. */
struct value_t {
key_value_t value;
@@ -59,18 +56,6 @@ struct collection_t {
/* Representation of the collections in memory. */
class database {
public:
- const keys_iterator_t
- get_collection_keys_begin(const std::string &collection_name) const
- {
- return (collections.at(collection_name).keys.begin());
- }
-
- const keys_iterator_t
- get_collection_keys_end(const std::string &collection_name) const
- {
- return (collections.at(collection_name).keys.end());
- }
-
const std::vector<std::string>
get_collection_names() const
{
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h
index 7a88ed9b662..fc97c1e381c 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/database_operation.h
@@ -46,7 +46,7 @@ class database_operation {
* - Open a cursor on each collection.
* - Insert m key/value pairs in each collection. Values are random strings which size is
* defined by the configuration.
- * - Store in memory the created collections and the generated keys that were inserted.
+ * - Store in memory the created collections.
*/
virtual void
populate(database &database, timestamp_manager *timestamp_manager, configuration *config,
@@ -58,7 +58,7 @@ class database_operation {
int64_t collection_count, key_count, key_cpt, key_size, value_size;
std::string collection_name, cfg, home;
key_value_t generated_key, generated_value;
- bool ts_enabled = timestamp_manager->is_enabled();
+ bool ts_enabled = timestamp_manager->enabled();
cursor = nullptr;
collection_count = key_count = key_size = value_size = 0;
@@ -67,13 +67,14 @@ class database_operation {
session = connection_manager::instance().create_session();
/* Create n collections as per the configuration and store each collection name. */
collection_count = config->get_int(COLLECTION_COUNT);
- for (int i = 0; i < collection_count; ++i) {
+ for (size_t i = 0; i < collection_count; ++i) {
collection_name = "table:collection" + std::to_string(i);
database.collections[collection_name] = {};
testutil_check(
session->create(session, collection_name.c_str(), DEFAULT_FRAMEWORK_SCHEMA));
ts = timestamp_manager->get_next_ts();
- testutil_check(tracking->save(tracking_operation::CREATE, collection_name, 0, "", ts));
+ tracking->save_schema_operation(
+ tracking_operation::CREATE_COLLECTION, collection_name, ts);
}
debug_print(std::to_string(collection_count) + " collections created", DEBUG_TRACE);
@@ -89,11 +90,13 @@ class database_operation {
for (const auto &it_collections : database.collections) {
collection_name = it_collections.first;
key_cpt = 0;
- /* WiredTiger lets you open a cursor on a collection using the same pointer. When a
- * session is closed, WiredTiger APIs close the cursors too. */
+ /*
+ * WiredTiger lets you open a cursor on a collection using the same pointer. When a
+ * session is closed, WiredTiger APIs close the cursors too.
+ */
testutil_check(
session->open_cursor(session, collection_name.c_str(), NULL, NULL, &cursor));
- for (size_t j = 0; j < key_count; ++j) {
+ for (size_t i = 0; i < key_count; ++i) {
/* Generation of a unique key. */
generated_key = number_to_string(key_size, key_cpt);
++key_cpt;
@@ -106,16 +109,12 @@ class database_operation {
ts = timestamp_manager->get_next_ts();
if (ts_enabled)
testutil_check(session->begin_transaction(session, ""));
- testutil_check(insert(cursor, tracking, collection_name, generated_key.c_str(),
- generated_value.c_str(), ts));
+ insert(cursor, tracking, collection_name, generated_key.c_str(),
+ generated_value.c_str(), ts);
if (ts_enabled) {
cfg = std::string(COMMIT_TS) + "=" + timestamp_manager->decimal_to_hex(ts);
testutil_check(session->commit_transaction(session, cfg.c_str()));
}
- /* Update the memory representation of the collections. */
- database.collections[collection_name].keys[generated_key].exists = true;
- /* Values are not stored here. */
- database.collections[collection_name].values = nullptr;
}
}
debug_print("Populate stage done", DEBUG_TRACE);
@@ -150,13 +149,15 @@ class database_operation {
virtual void
update_operation(thread_context &context, WT_SESSION *session)
{
+ WT_DECL_RET;
WT_CURSOR *cursor;
wt_timestamp_t ts;
std::vector<WT_CURSOR *> cursors;
- std::string collection_name;
std::vector<std::string> collection_names = context.get_collection_names();
- key_value_t generated_value, key;
- int64_t cpt, value_size = context.get_value_size();
+ key_value_t key, generated_value;
+ const char *key_tmp;
+ int64_t value_size = context.get_value_size();
+ uint64_t i;
testutil_assert(session != nullptr);
/* Get a cursor for each collection in collection_names. */
@@ -165,17 +166,31 @@ class database_operation {
cursors.push_back(cursor);
}
- cpt = 0;
- /* Walk each cursor. */
- for (const auto &it : cursors) {
- collection_name = collection_names[cpt];
- /* Walk each key. */
- for (keys_iterator_t iter_key = context.get_collection_keys_begin(collection_name);
- iter_key != context.get_collection_keys_end(collection_name); ++iter_key) {
- /* Do not process removed keys. */
- if (!iter_key->second.exists)
- continue;
-
+ /*
+ * Update each collection while the test is running.
+ */
+ i = 0;
+ while (context.is_running() && !collection_names.empty()) {
+ if (i >= collection_names.size())
+ i = 0;
+ ret = cursors[i]->next(cursors[i]);
+ /* If we have reached the end of the collection, reset. */
+ if (ret == WT_NOTFOUND) {
+ testutil_check(cursors[i]->reset(cursors[i]));
+ ++i;
+ } else if (ret != 0)
+ /* Stop updating in case of an error. */
+ testutil_die(DEBUG_ERROR, "update_operation: cursor->next() failed: %d", ret);
+ else {
+ testutil_check(cursors[i]->get_key(cursors[i], &key_tmp));
+ /*
+ * The retrieved key needs to be passed inside the update function. However, the
+ * update API doesn't guarantee our buffer will still be valid once it is called, as
+ * such we copy the buffer and then pass it into the API.
+ */
+ key = key_value_t(key_tmp);
+ generated_value =
+ random_generator::random_generator::instance().generate_string(value_size);
ts = context.get_timestamp_manager()->get_next_ts();
/* Start a transaction if possible. */
@@ -183,17 +198,15 @@ class database_operation {
context.begin_transaction(session, "");
context.set_commit_timestamp(session, ts);
}
- generated_value =
- random_generator::random_generator::instance().generate_string(value_size);
- testutil_check(update(context.get_tracking(), it, collection_name,
- iter_key->first.c_str(), generated_value.c_str(), ts));
+
+ update(context.get_tracking(), cursors[i], collection_names[i], key.c_str(),
+ generated_value.c_str(), ts);
/* Commit the current transaction if possible. */
context.increment_operation_count();
if (context.can_commit_transaction())
context.commit_transaction(session, "");
}
- ++cpt;
}
/*
@@ -211,48 +224,34 @@ class database_operation {
private:
/* WiredTiger APIs wrappers for single operations. */
template <typename K, typename V>
- int
+ void
insert(WT_CURSOR *cursor, workload_tracking *tracking, const std::string &collection_name,
const K &key, const V &value, wt_timestamp_t ts)
{
- int error_code;
-
testutil_assert(cursor != nullptr);
+
cursor->set_key(cursor, key);
cursor->set_value(cursor, value);
- error_code = cursor->insert(cursor);
+ testutil_check(cursor->insert(cursor));
+ debug_print("key/value inserted", DEBUG_TRACE);
- if (error_code == 0) {
- debug_print("key/value inserted", DEBUG_TRACE);
- error_code =
- tracking->save(tracking_operation::INSERT, collection_name, key, value, ts);
- } else
- debug_print("key/value insertion failed", DEBUG_ERROR);
-
- return (error_code);
+ tracking->save_operation(tracking_operation::INSERT, collection_name, key, value, ts);
}
template <typename K, typename V>
- static int
+ static void
update(workload_tracking *tracking, WT_CURSOR *cursor, const std::string &collection_name,
K key, V value, wt_timestamp_t ts)
{
- int error_code;
-
testutil_assert(tracking != nullptr);
testutil_assert(cursor != nullptr);
+
cursor->set_key(cursor, key);
cursor->set_value(cursor, value);
- error_code = cursor->update(cursor);
-
- if (error_code == 0) {
- debug_print("key/value update", DEBUG_TRACE);
- error_code =
- tracking->save(tracking_operation::UPDATE, collection_name, key, value, ts);
- } else
- debug_print("key/value update failed", DEBUG_ERROR);
+ testutil_check(cursor->update(cursor));
+ debug_print("key/value updated", DEBUG_TRACE);
- return (error_code);
+ tracking->save_operation(tracking_operation::UPDATE, collection_name, key, value, ts);
}
/*
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h
index e5275bc7819..2cf20066504 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/thread_context.h
@@ -69,18 +69,6 @@ class thread_context {
return (_database.get_collection_names());
}
- const keys_iterator_t
- get_collection_keys_begin(const std::string &collection_name) const
- {
- return (_database.get_collection_keys_begin(collection_name));
- }
-
- const keys_iterator_t
- get_collection_keys_end(const std::string &collection_name) const
- {
- return (_database.get_collection_keys_end(collection_name));
- }
-
thread_operation
get_thread_operation() const
{
@@ -132,7 +120,7 @@ class thread_context {
void
begin_transaction(WT_SESSION *session, const std::string &config)
{
- if (!_in_txn && _timestamp_manager->is_enabled()) {
+ if (!_in_txn && _timestamp_manager->enabled()) {
testutil_check(
session->begin_transaction(session, config.empty() ? nullptr : config.c_str()));
/* This randomizes the number of operations to be executed in one transaction. */
@@ -154,7 +142,7 @@ class thread_context {
bool
can_commit_transaction() const
{
- return (_timestamp_manager->is_enabled() && _in_txn &&
+ return (_timestamp_manager->enabled() && _in_txn &&
(!_running || (_current_op_count > _max_op_count)));
}
@@ -180,7 +168,7 @@ class thread_context {
void
set_commit_timestamp(WT_SESSION *session, wt_timestamp_t ts)
{
- if (!_timestamp_manager->is_enabled())
+ if (!_timestamp_manager->enabled())
return;
std::string config = std::string(COMMIT_TS) + "=" + _timestamp_manager->decimal_to_hex(ts);
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h
index 4d1b2d755a8..41efadb440b 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_tracking.h
@@ -49,7 +49,7 @@
namespace test_harness {
/* Tracking operations. */
-enum class tracking_operation { CREATE, DELETE_COLLECTION, DELETE_KEY, INSERT, UPDATE };
+enum class tracking_operation { CREATE_COLLECTION, DELETE_COLLECTION, DELETE_KEY, INSERT, UPDATE };
/* Class used to track operations performed on collections */
class workload_tracking : public component {
@@ -107,41 +107,49 @@ class workload_tracking : public component {
/* Does not do anything. */
}
- template <typename K, typename V>
- int
- save(const tracking_operation &operation, const std::string &collection_name, const K &key,
- const V &value, wt_timestamp_t ts)
+ void
+ save_schema_operation(
+ const tracking_operation &operation, const std::string &collection_name, wt_timestamp_t ts)
{
- WT_CURSOR *cursor;
- int error_code = 0;
+ std::string error_message;
if (!_enabled)
- return (error_code);
-
- /* Select the correct cursor to save in the collection associated to specific operations. */
- switch (operation) {
- case tracking_operation::CREATE:
- case tracking_operation::DELETE_COLLECTION:
- cursor = _cursor_schema;
- cursor->set_key(cursor, collection_name.c_str(), ts);
- cursor->set_value(cursor, static_cast<int>(operation));
- break;
-
- default:
- cursor = _cursor_operations;
- cursor->set_key(cursor, collection_name.c_str(), key, ts);
- cursor->set_value(cursor, static_cast<int>(operation), value);
- break;
+ return;
+
+ if (operation == tracking_operation::CREATE_COLLECTION ||
+ operation == tracking_operation::DELETE_COLLECTION) {
+ _cursor_schema->set_key(_cursor_schema, collection_name.c_str(), ts);
+ _cursor_schema->set_value(_cursor_schema, static_cast<int>(operation));
+ testutil_check(_cursor_schema->insert(_cursor_schema));
+ } else {
+ error_message = "save_schema_operation: invalid operation " +
+ std::to_string(static_cast<int>(operation));
+ testutil_die(EINVAL, error_message.c_str());
}
+ debug_print("save_schema_operation: workload tracking saved operation.", DEBUG_TRACE);
+ }
- error_code = cursor->insert(cursor);
+ template <typename K, typename V>
+ void
+ save_operation(const tracking_operation &operation, const std::string &collection_name,
+ const K &key, const V &value, wt_timestamp_t ts)
+ {
+ std::string error_message;
- if (error_code == 0)
- debug_print("Workload tracking saved operation.", DEBUG_TRACE);
- else
- debug_print("Workload tracking failed to save operation !", DEBUG_ERROR);
+ if (!_enabled)
+ return;
- return error_code;
+ if (operation == tracking_operation::CREATE_COLLECTION ||
+ operation == tracking_operation::DELETE_COLLECTION) {
+ error_message =
+ "save_operation: invalid operation " + std::to_string(static_cast<int>(operation));
+ testutil_die(EINVAL, error_message.c_str());
+ } else {
+ _cursor_operations->set_key(_cursor_operations, collection_name.c_str(), key, ts);
+ _cursor_operations->set_value(_cursor_operations, static_cast<int>(operation), value);
+ testutil_check(_cursor_operations->insert(_cursor_operations));
+ }
+ debug_print("save_operation: workload tracking saved operation.", DEBUG_TRACE);
}
private:
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h
index 5ef7992e773..aaab9ad25a9 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload/workload_validation.h
@@ -45,95 +45,113 @@ namespace test_harness {
class workload_validation {
public:
/*
- * Validate the on disk data against what has been tracked during the test.
- * - The first step is to replay the tracked operations so a representation in memory of the
- * collections is created. This representation is then compared to what is on disk.
- * - The second step is to go through what has been saved on disk and make sure the memory
- * representation has the same data.
- * operation_table_name is the collection that contains all the operations about the key/value
- * pairs in the different collections used during the test. schema_table_name is the collection
- * that contains all the operations about the creation or deletion of collections during the
- * test.
+ * Validate the on disk data against what has been tracked during the test. This is done by
+ * replaying the tracked operations so a representation in memory of the collections is created.
+ * This representation is then compared to what is on disk. operation_table_name: collection
+ * that contains all the operations about the key/value pairs in the different collections used
+ * during the test. schema_table_name: collection that contains all the operations about the
+ * creation or deletion of collections during the test.
*/
- bool
+ void
validate(const std::string &operation_table_name, const std::string &schema_table_name,
database &database)
{
+ WT_DECL_RET;
+ WT_CURSOR *cursor;
WT_SESSION *session;
- std::string collection_name;
- /* Existing collections after the test. */
+ wt_timestamp_t key_timestamp;
std::vector<std::string> created_collections, deleted_collections;
- bool is_valid = true;
+ const char *key, *key_collection_name, *value;
+ int value_operation_type;
+ std::string collection_name;
session = connection_manager::instance().create_session();
/* Retrieve the collections that were created and deleted during the test. */
- collection_name = schema_table_name;
parse_schema_tracking_table(
- session, collection_name, created_collections, deleted_collections);
-
- /* Make sure they exist in memory. */
- for (auto const &it : created_collections) {
- if (database.collections.count(it) == 0) {
- debug_print("Collection missing in memory: " + it, DEBUG_ERROR);
- is_valid = false;
- break;
- }
- }
-
- if (!is_valid)
- return (is_valid);
+ session, schema_table_name, created_collections, deleted_collections);
- /* Make sure they don't exist in memory nor on disk. */
+ /*
+ * Make sure the deleted collections do not exist on disk. The created collections are
+ * checked in check_reference.
+ */
for (auto const &it : deleted_collections) {
- if (database.collections.count(it) > 0) {
- debug_print(
- "Collection present in memory while it has been tracked as deleted: " + it,
- DEBUG_ERROR);
- is_valid = false;
- break;
- }
- if (!verify_collection_state(session, it, false)) {
- debug_print(
- "Collection present on disk while it has been tracked as deleted: " + it,
- DEBUG_ERROR);
- is_valid = false;
- break;
- }
+ if (!verify_collection_state(session, it, false))
+ testutil_die(DEBUG_ERROR,
+ "validate: collection %s present on disk while it has been tracked as deleted.",
+ it.c_str());
}
- for (auto const &collection_name : database.get_collection_names()) {
- if (!is_valid)
- break;
-
- /* Get the values associated to the different keys in the current collection. */
- parse_operation_tracking_table(
- session, operation_table_name, collection_name, database);
- /* Check all tracked operations in memory against the database on disk. */
- if (!check_reference(session, collection_name, database)) {
- debug_print(
- "check_reference failed for collection " + collection_name, DEBUG_ERROR);
- is_valid = false;
- }
- /* Check what has been saved on disk against what has been tracked. */
- else if (!check_disk_state(session, collection_name, database)) {
- debug_print(
- "check_disk_state failed for collection " + collection_name, DEBUG_ERROR);
- is_valid = false;
+ /* Parse the tracking table. */
+ testutil_check(
+ session->open_cursor(session, operation_table_name.c_str(), NULL, NULL, &cursor));
+ while ((ret = cursor->next(cursor)) == 0) {
+ testutil_check(cursor->get_key(cursor, &key_collection_name, &key, &key_timestamp));
+ testutil_check(cursor->get_value(cursor, &value_operation_type, &value));
+
+ debug_print("Collection name is " + std::string(key_collection_name), DEBUG_TRACE);
+ debug_print("Key is " + std::string(key), DEBUG_TRACE);
+ debug_print("Timestamp is " + std::to_string(key_timestamp), DEBUG_TRACE);
+ debug_print("Operation type is " + std::to_string(value_operation_type), DEBUG_TRACE);
+ debug_print("Value is " + std::string(value), DEBUG_TRACE);
+
+ /*
+ * If the cursor points to values from a collection that has been created during the
+ * test, update the data model.
+ */
+ if (std::find(created_collections.begin(), created_collections.end(),
+ key_collection_name) != created_collections.end())
+ update_data_model(static_cast<tracking_operation>(value_operation_type),
+ key_collection_name, key, value, database);
+ /*
+ * The collection should be part of the deleted collections if it has not be found in
+ * the created ones.
+ */
+ else if (std::find(deleted_collections.begin(), deleted_collections.end(),
+ key_collection_name) == deleted_collections.end())
+ testutil_die(DEBUG_ERROR,
+ "validate: The collection %s is not part of the created or deleted collections.",
+ key_collection_name);
+
+ if (collection_name.empty())
+ collection_name = key_collection_name;
+ else if (collection_name != key_collection_name) {
+ /*
+ * The data model is now fully updated for the last read collection. It can be
+ * checked.
+ */
+ check_reference(session, collection_name, database.collections.at(collection_name));
+ /* Clear memory. */
+ delete database.collections[collection_name].values;
+ database.collections[collection_name].values = nullptr;
+
+ collection_name = key_collection_name;
}
+ };
+
+ /* The value of ret should be WT_NOTFOUND once the cursor has read all rows. */
+ if (ret != WT_NOTFOUND)
+ testutil_die(DEBUG_ERROR, "validate: cursor->next() %d.", ret);
+
+ /*
+ * Once the cursor has read the entire table, the last parsed collection has not been
+ * checked yet. We still have to make sure collection_name has been updated. It will remain
+ * empty if there is no collections to check after the end of the test (no collections
+ * created or all deleted).
+ */
+ if (!collection_name.empty()) {
+ check_reference(session, collection_name, database.collections.at(collection_name));
/* Clear memory. */
delete database.collections[collection_name].values;
database.collections[collection_name].values = nullptr;
}
-
- return (is_valid);
}
private:
/*
* Read the tracking table to retrieve the created and deleted collections during the test.
- * collection_name is the collection that contains the operations on the different collections
- * during the test.
+ * collection_name: collection that contains the operations on the different collections during
+ * the test.
*/
void
parse_schema_tracking_table(WT_SESSION *session, const std::string &collection_name,
@@ -155,7 +173,7 @@ class workload_validation {
debug_print("Operation type is " + std::to_string(value_operation_type), DEBUG_TRACE);
if (static_cast<tracking_operation>(value_operation_type) ==
- tracking_operation::CREATE) {
+ tracking_operation::CREATE_COLLECTION) {
deleted_collections.erase(std::remove(deleted_collections.begin(),
deleted_collections.end(), key_collection_name),
deleted_collections.end());
@@ -170,211 +188,95 @@ class workload_validation {
}
}
- /*
- * Parse the tracked operations to build a representation in memory of the collections at the
- * end of the test. tracking_collection_name is the tracking collection used to save the
- * operations performed on the collections during the test. collection_name is the collection
- * that needs to be represented in memory.
- */
+ /* Update the data model. */
void
- parse_operation_tracking_table(WT_SESSION *session, const std::string &tracking_collection_name,
- const std::string &collection_name, database &database)
+ update_data_model(const tracking_operation &operation, const std::string &collection_name,
+ const char *key, const char *value, database &database)
{
- WT_CURSOR *cursor;
- wt_timestamp_t key_timestamp;
- int exact, value_operation_type;
- const char *key, *key_collection_name, *value;
- std::vector<key_value_t> collection_keys;
- std::string key_str;
-
- /* Retrieve all keys from the given collection. */
- for (auto const &it : database.collections.at(collection_name).keys)
- collection_keys.push_back(it.first);
- /* There must be at least a key. */
- testutil_assert(!collection_keys.empty());
- /* Sort keys. */
- std::sort(collection_keys.begin(), collection_keys.end());
- /* Use the first key as a parameter for search_near. */
- key_str = collection_keys[0];
-
- testutil_check(
- session->open_cursor(session, tracking_collection_name.c_str(), NULL, NULL, &cursor));
-
- cursor->set_key(cursor, collection_name.c_str(), key_str.c_str());
- testutil_check(cursor->search_near(cursor, &exact));
- /*
- * Since the timestamp which is part of the key is not provided, exact cannot be 0. If it is
- * -1, we need to go to the next key.
- */
- testutil_assert(exact != 0);
- if (exact < 0)
- testutil_check(cursor->next(cursor));
-
- do {
- testutil_check(cursor->get_key(cursor, &key_collection_name, &key, &key_timestamp));
- testutil_check(cursor->get_value(cursor, &value_operation_type, &value));
-
- debug_print("Collection name is " + std::string(key_collection_name), DEBUG_TRACE);
- debug_print("Key is " + std::string(key), DEBUG_TRACE);
- debug_print("Timestamp is " + std::to_string(key_timestamp), DEBUG_TRACE);
- debug_print("Operation type is " + std::to_string(value_operation_type), DEBUG_TRACE);
- debug_print("Value is " + std::string(value), DEBUG_TRACE);
-
+ switch (operation) {
+ case tracking_operation::DELETE_KEY:
/*
- * If the cursor is reading an operation for a different collection, we know all the
- * operations have been parsed for the collection we were interested in.
+ * Operations are parsed from the oldest to the most recent one. It is safe to assume
+ * the key has been inserted previously in an existing collection and can be safely
+ * deleted.
*/
- if (std::string(key_collection_name) != collection_name)
- break;
-
- /* Replay the current operation. */
- switch (static_cast<tracking_operation>(value_operation_type)) {
- case tracking_operation::DELETE_KEY:
- /*
- * Operations are parsed from the oldest to the most recent one. It is safe to
- * assume the key has been inserted previously in an existing collection and can be
- * safely deleted.
- */
- database.collections.at(key_collection_name).keys.at(std::string(key)).exists =
- false;
- delete database.collections.at(key_collection_name).values;
- database.collections.at(key_collection_name).values = nullptr;
- break;
- case tracking_operation::INSERT: {
- /* Keys are unique, it is safe to assume the key has not been encountered before. */
- database.collections[key_collection_name].keys[std::string(key)].exists = true;
- if (database.collections[key_collection_name].values == nullptr) {
- database.collections[key_collection_name].values =
- new std::map<key_value_t, value_t>();
- }
- value_t v;
- v.value = key_value_t(value);
- std::pair<key_value_t, value_t> pair(key_value_t(key), v);
- database.collections[key_collection_name].values->insert(pair);
- break;
- }
- case tracking_operation::UPDATE:
- database.collections[key_collection_name].values->at(key).value =
- key_value_t(value);
- break;
- default:
- testutil_die(DEBUG_ABORT, "Unexpected operation in the tracking table: %d",
- value_operation_type);
- break;
- }
-
- } while (cursor->next(cursor) == 0);
-
- if (cursor->reset(cursor) != 0)
- debug_print("Cursor could not be reset !", DEBUG_ERROR);
+ database.collections.at(collection_name).keys.at(key).exists = false;
+ delete database.collections.at(collection_name).values;
+ database.collections.at(collection_name).values = nullptr;
+ break;
+ case tracking_operation::INSERT: {
+ /*
+ * Keys are unique, it is safe to assume the key has not been encountered before.
+ */
+ database.collections[collection_name].keys[key].exists = true;
+ if (database.collections[collection_name].values == nullptr)
+ database.collections[collection_name].values = new std::map<key_value_t, value_t>();
+ value_t v;
+ v.value = key_value_t(value);
+ std::pair<key_value_t, value_t> pair(key_value_t(key), v);
+ database.collections[collection_name].values->insert(pair);
+ break;
+ }
+ case tracking_operation::UPDATE:
+ database.collections[collection_name].values->at(key).value = key_value_t(value);
+ break;
+ default:
+ testutil_die(DEBUG_ERROR, "Unexpected operation in the tracking table: %d",
+ static_cast<tracking_operation>(operation));
+ break;
+ }
}
/*
- * Compare the tracked operations against what has been saved on disk. database is the
- * representation in memory of the collections after the test according to the tracking table.
+ * Compare the tracked operations against what has been saved on disk. collection:
+ * representation in memory of the collection values and keys according to the tracking table.
*/
- bool
+ void
check_reference(
- WT_SESSION *session, const std::string &collection_name, const database &database)
+ WT_SESSION *session, const std::string &collection_name, const collection_t &collection)
{
bool is_valid;
- collection_t collection;
key_t key;
key_value_t key_str;
/* Check the collection exists on disk. */
- is_valid = verify_collection_state(session, collection_name, true);
-
- if (is_valid) {
- collection = database.collections.at(collection_name);
- /* Walk through each key/value pair of the current collection. */
- for (const auto &keys : collection.keys) {
- key_str = keys.first;
- key = keys.second;
- /* The key/value pair exists. */
- if (key.exists)
- is_valid = (is_key_present(session, collection_name, key_str.c_str()) == true);
- /* The key has been deleted. */
- else
- is_valid = (is_key_present(session, collection_name, key_str.c_str()) == false);
-
- /* Check the associated value is valid. */
- if (is_valid && key.exists) {
- testutil_assert(collection.values != nullptr);
- is_valid = verify_value(session, collection_name, key_str.c_str(),
- collection.values->at(key_str).value);
- }
-
- if (!is_valid) {
- debug_print("check_reference failed for key " + key_str, DEBUG_ERROR);
- break;
- }
- }
- }
-
- if (!is_valid)
- debug_print("check_reference failed for collection " + collection_name, DEBUG_ERROR);
-
- return (is_valid);
- }
-
- /* Check what is present on disk against what has been tracked. */
- bool
- check_disk_state(
- WT_SESSION *session, const std::string &collection_name, const database &database)
- {
- WT_CURSOR *cursor;
- collection_t collection;
- bool is_valid = true;
- /* Key/value pairs on disk. */
- const char *key_on_disk, *value_on_disk;
- key_value_t key_str, value_str;
-
- testutil_check(session->open_cursor(session, collection_name.c_str(), NULL, NULL, &cursor));
-
- collection = database.collections.at(collection_name);
-
- /* Read the collection on disk. */
- while (is_valid && (cursor->next(cursor) == 0)) {
- testutil_check(cursor->get_key(cursor, &key_on_disk));
- testutil_check(cursor->get_value(cursor, &value_on_disk));
+ if (!verify_collection_state(session, collection_name, true))
+ testutil_die(DEBUG_ERROR,
+ "check_reference: collection %s not present on disk while it has been tracked as "
+ "created.",
+ collection_name.c_str());
+
+ /* Walk through each key/value pair of the current collection. */
+ for (const auto &keys : collection.keys) {
+ key_str = keys.first;
+ key = keys.second;
+ /* The key/value pair exists. */
+ if (key.exists)
+ is_valid = (is_key_present(session, collection_name, key_str.c_str()) == true);
+ /* The key has been deleted. */
+ else
+ is_valid = (is_key_present(session, collection_name, key_str.c_str()) == false);
- key_str = std::string(key_on_disk);
-
- debug_print("Key on disk is " + key_str, DEBUG_TRACE);
- debug_print("Value on disk is " + std::string(value_on_disk), DEBUG_TRACE);
+ if (!is_valid)
+ testutil_die(DEBUG_ERROR, "check_reference: failed for key %s in collection %s.",
+ key_str.c_str(), collection_name.c_str());
- /* Check the key on disk has been saved in memory too. */
- if ((collection.keys.count(key_str) > 0) && collection.keys.at(key_str).exists) {
- /* Memory should be allocated for values. */
+ /* Check the associated value is valid. */
+ if (key.exists) {
testutil_assert(collection.values != nullptr);
- value_str = collection.values->at(key_str).value;
- /*
- * Check the key/value pair on disk matches the one in memory from the tracked
- * operations.
- */
- is_valid = (value_str == key_value_t(value_on_disk));
- if (!is_valid)
- debug_print(" Key/Value pair mismatch.\n Disk key: " + key_str +
- "\n Disk value: " + std ::string(value_on_disk) +
- "\n Tracking table key: " + key_str + "\n Tracking table value exists: " +
- std::to_string(collection.keys.at(key_str).exists) +
- "\n Tracking table value: " + value_str,
- DEBUG_ERROR);
- } else {
- is_valid = false;
- debug_print(
- "The key " + std::string(key_on_disk) + " present on disk has not been tracked",
- DEBUG_ERROR);
+ if (!verify_value(session, collection_name, key_str.c_str(),
+ collection.values->at(key_str).value))
+ testutil_die(DEBUG_ERROR,
+ "check_reference: failed for key %s / value %s in collection %s.",
+ key_str.c_str(), collection.values->at(key_str).value.c_str(),
+ collection_name.c_str());
}
}
-
- return (is_valid);
}
/*
- * Check whether a collection exists on disk. collection_name is the collection to check. exists
- * needs to be set to true if the collection is expected to be existing, false otherwise.
+ * Check whether a collection exists on disk. exists: needs to be set to true if the collection
+ * is expected to be existing, false otherwise.
*/
bool
verify_collection_state(
@@ -385,6 +287,7 @@ class workload_validation {
return (exists ? (ret == 0) : (ret != 0));
}
+ /* Check whether a keys exists in a collection on disk. */
template <typename K>
bool
is_key_present(WT_SESSION *session, const std::string &collection_name, const K &key)
diff --git a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h
index 9413834ba31..5e084229123 100644
--- a/src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h
+++ b/src/third_party/wiredtiger/test/cppsuite/test_harness/workload_generator.h
@@ -72,6 +72,7 @@ class workload_generator : public component {
/* Populate the database. */
_database_operation->populate(_database, _timestamp_manager, _config, _tracking);
+ _db_populated = true;
/* Retrieve useful parameters from the test configuration. */
transaction_config = _config->get_subconfig(OPS_PER_TRANSACTION);
@@ -87,7 +88,7 @@ class workload_generator : public component {
testutil_assert(value_size >= 0);
/* Generate threads to execute read operations on the collections. */
- for (int i = 0; i < read_threads; ++i) {
+ for (size_t i = 0; i < read_threads && _running; ++i) {
thread_context *tc = new thread_context(_timestamp_manager, _tracking, _database,
thread_operation::READ, max_operation_per_transaction, min_operation_per_transaction,
value_size, throttle());
@@ -96,7 +97,7 @@ class workload_generator : public component {
}
/* Generate threads to execute update operations on the collections. */
- for (int i = 0; i < update_threads; ++i) {
+ for (size_t i = 0; i < update_threads && _running; ++i) {
thread_context *tc = new thread_context(_timestamp_manager, _tracking, _database,
thread_operation::UPDATE, max_operation_per_transaction,
min_operation_per_transaction, value_size, throttle(update_config));
@@ -123,7 +124,13 @@ class workload_generator : public component {
database &
get_database()
{
- return _database;
+ return (_database);
+ }
+
+ bool
+ db_populated() const
+ {
+ return (_db_populated);
}
/* Workload threaded operations. */
@@ -148,7 +155,7 @@ class workload_generator : public component {
db_operation.update_operation(context, session);
break;
default:
- testutil_die(DEBUG_ABORT, "system: thread_operation is unknown : %d",
+ testutil_die(DEBUG_ERROR, "system: thread_operation is unknown : %d",
static_cast<int>(context.get_thread_operation()));
break;
}
@@ -161,6 +168,7 @@ class workload_generator : public component {
timestamp_manager *_timestamp_manager;
workload_tracking *_tracking;
std::vector<thread_context *> _workers;
+ bool _db_populated = false;
};
} // namespace test_harness
diff --git a/src/third_party/wiredtiger/test/cppsuite/tests/run.cxx b/src/third_party/wiredtiger/test/cppsuite/tests/run.cxx
index 5fe6641cc3b..67d77116cf1 100755
--- a/src/third_party/wiredtiger/test/cppsuite/tests/run.cxx
+++ b/src/third_party/wiredtiger/test/cppsuite/tests/run.cxx
@@ -140,7 +140,7 @@ main(int argc, char *argv[])
* -l : Trace level.
* -t : Test to run. All tests are run if not specified.
*/
- for (int i = 1; (i < argc) && (error_code == 0); ++i) {
+ for (size_t i = 1; (i < argc) && (error_code == 0); ++i) {
if (std::string(argv[i]) == "-h") {
print_help();
return 0;
diff --git a/src/third_party/wiredtiger/test/csuite/incr_backup/main.c b/src/third_party/wiredtiger/test/csuite/incr_backup/main.c
index 9b535d7bc54..b09e1b44da4 100644
--- a/src/third_party/wiredtiger/test/csuite/incr_backup/main.c
+++ b/src/third_party/wiredtiger/test/csuite/incr_backup/main.c
@@ -74,8 +74,8 @@ static bool do_rename = true;
} while (0)
/*
- * We keep an array of tables, each one may or may not be in use.
- * "In use" means it has been created, and will be updated from time to time.
+ * We keep an array of tables, each one may or may not be in use. "In use" means it has been
+ * created, and will be updated from time to time.
*/
typedef struct {
char *name; /* non-null entries represent tables in use */
@@ -189,8 +189,7 @@ key_value(uint64_t change_count, char *key, size_t key_size, WT_ITEM *item, OPER
* is inserted, it is all the letter 'a'. When the value is updated, is it mostly 'b', with some
* 'c' mixed in. When the value is to modified, we'll end up with a value with mostly 'b' and
* 'M' mixed in, in different spots. Thus the modify operation will have both additions ('M')
- * and
- * subtractions ('c') from the previous version.
+ * and subtractions ('c') from the previous version.
*/
if (op_type == INSERT)
ch = 'a';
diff --git a/src/third_party/wiredtiger/test/csuite/wt2719_reconfig/main.c b/src/third_party/wiredtiger/test/csuite/wt2719_reconfig/main.c
index 5434aa191ef..27d1a58ccbd 100644
--- a/src/third_party/wiredtiger/test/csuite/wt2719_reconfig/main.c
+++ b/src/third_party/wiredtiger/test/csuite/wt2719_reconfig/main.c
@@ -183,9 +183,8 @@ main(int argc, char *argv[])
/*
* A linear pass through the list, adding random elements.
*
- * WiredTiger configurations are usually "the last one set wins", but
- * "shared_cache" and "cache_set" options aren't allowed in the same
- * configuration string.
+ * WiredTiger configurations are usually "the last one set wins", but "shared_cache" and
+ * "cache_set" options aren't allowed in the same configuration string.
*/
for (i = 0; i < WT_ELEMENTS(list); ++i) {
p = list[i];
diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml
index b4ab0507399..6e8c3f01d11 100755
--- a/src/third_party/wiredtiger/test/evergreen.yml
+++ b/src/third_party/wiredtiger/test/evergreen.yml
@@ -2061,10 +2061,6 @@ tasks:
- func: "format test"
vars:
extra_args: checkpoints=1 leak_memory=0 mmap=1 file_type=row compression=zlib huffman_value=1
- # FIXME-WT-6668: temporarily disable lower isolation level test
- # - func: "format test"
- # vars:
- # extra_args: checkpoints=1 leak_memory=0 mmap=1 file_type=row isolation=random transaction_timestamps=0
- func: "format test"
vars:
extra_args: checkpoints=1 leak_memory=0 mmap=1 file_type=row data_source=lsm bloom=1
diff --git a/src/third_party/wiredtiger/test/format/bulk.c b/src/third_party/wiredtiger/test/format/bulk.c
index 5af036d1495..c1decf24b6f 100644
--- a/src/third_party/wiredtiger/test/format/bulk.c
+++ b/src/third_party/wiredtiger/test/format/bulk.c
@@ -38,7 +38,8 @@ bulk_begin_transaction(WT_SESSION *session)
uint64_t ts;
char buf[64];
- wiredtiger_begin_transaction(session, "isolation=snapshot");
+ /* Writes require snapshot isolation. */
+ wiredtiger_begin_transaction(session, NULL);
ts = __wt_atomic_addv64(&g.timestamp, 1);
testutil_check(__wt_snprintf(buf, sizeof(buf), "read_timestamp=%" PRIx64, ts));
testutil_check(session->timestamp_transaction(session, buf));
@@ -113,7 +114,6 @@ wts_load(void)
bulk_begin_transaction(session);
for (committed_keyno = keyno = 0; ++keyno <= g.c_rows;) {
- key_gen(&key, keyno);
val_gen(NULL, &value, keyno);
switch (g.type) {
@@ -132,6 +132,7 @@ wts_load(void)
trace_msg("bulk %" PRIu32 " {%.*s}", keyno, (int)value.size, (char *)value.data);
break;
case ROW:
+ key_gen(&key, keyno);
cursor->set_key(cursor, &key);
cursor->set_value(cursor, &value);
if (g.trace_all)
@@ -188,22 +189,22 @@ wts_load(void)
}
}
+ if (g.c_txn_timestamps)
+ bulk_commit_transaction(session);
+
/*
* Ideally, the insert loop runs until the number of rows plus one, in which case row counts are
* correct. If the loop exited early, reset the counters and rewrite the CONFIG file (so reopens
* aren't surprised).
*/
if (keyno != g.c_rows + 1) {
- testutil_assert(committed_keyno > 0);
+ g.c_rows = g.c_txn_timestamps ? committed_keyno : (keyno - 1);
+ testutil_assert(g.c_rows > 0);
+ g.rows = g.c_rows;
- g.rows = committed_keyno;
- g.c_rows = (uint32_t)committed_keyno;
config_print(false);
}
- if (g.c_txn_timestamps)
- bulk_commit_transaction(session);
-
testutil_check(cursor->close(cursor));
trace_msg("%s", "=============== bulk load stop");
diff --git a/src/third_party/wiredtiger/test/format/config.c b/src/third_party/wiredtiger/test/format/config.c
index 20431b3f1ab..f7321f77c99 100644
--- a/src/third_party/wiredtiger/test/format/config.c
+++ b/src/third_party/wiredtiger/test/format/config.c
@@ -51,8 +51,8 @@ static void config_map_checksum(const char *, u_int *);
static void config_map_compression(const char *, u_int *);
static void config_map_encryption(const char *, u_int *);
static void config_map_file_type(const char *, u_int *);
-static void config_map_isolation(const char *, u_int *);
static void config_pct(void);
+static void config_prefix(void);
static void config_reset(void);
static void config_transaction(void);
@@ -202,6 +202,7 @@ config_run(void)
config_compression("btree.compression");
config_compression("logging.compression");
config_encryption();
+ config_prefix();
/* Configuration based on the configuration already chosen. */
config_directio();
@@ -876,23 +877,6 @@ config_pct(void)
}
/*
- * Cursor modify isn't possible for anything besides snapshot isolation transactions. If both
- * forced, it's an error. The run-time operations code converts modify operations into updates
- * if we're in some other transaction type, but if we're never going to be able to do a modify,
- * turn it off in the CONFIG output to avoid misleading debuggers.
- */
- if (g.c_isolation_flag == ISOLATION_READ_COMMITTED ||
- g.c_isolation_flag == ISOLATION_READ_UNCOMMITTED) {
- if (config_is_perm("transaction.isolation") && config_is_perm("ops.pct.modify") &&
- g.c_modify_pct != 0)
- testutil_die(
- EINVAL, "WT_CURSOR.modify only supported with snapshot isolation transactions");
-
- list[CONFIG_MODIFY_ENTRY].order = 0;
- *list[CONFIG_MODIFY_ENTRY].vp = 0;
- }
-
- /*
* Walk the list, allocating random numbers of operations in a random order.
*
* If the "order" field is non-zero, we need to create a value for this operation. Find the
@@ -924,116 +908,70 @@ config_pct(void)
}
/*
+ * config_prefix --
+ * Prefix configuration.
+ */
+static void
+config_prefix(void)
+{
+ /* Add prefix compression if prefixes are configured and no explicit choice was made. */
+ if (g.c_prefix != 0 && g.c_prefix_compression == 0 &&
+ !config_is_perm("btree.prefix_compression"))
+ config_single("btree.prefix_compression=on", false);
+}
+
+/*
* config_transaction --
* Transaction configuration.
*/
static void
config_transaction(void)
{
- /*
- * WiredTiger cannot support relaxed isolation levels. Turn off everything but timestamps with
- * snapshot isolation.
- */
- if ((!g.c_txn_timestamps && config_is_perm("transaction.timestamps")) ||
- (g.c_isolation_flag != ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation")))
- testutil_die(EINVAL, "format limited to timestamp and snapshot-isolation testing");
- if (!g.c_txn_timestamps)
- config_single("transaction.timestamps=on", false);
- if (g.c_isolation_flag != ISOLATION_SNAPSHOT)
- config_single("transaction.isolation=snapshot", false);
-
- /*
- * Check the permanent configuration. We can't prepare a transaction if logging is configured or
- * timestamps aren't configured. For repeatable reads to work in timestamp testing, all updates
- * must be done in a snapshot isolation transaction.
- */
+ /* Transaction prepare requires timestamps and is incompatible with logging. */
if (g.c_prepare && config_is_perm("ops.prepare")) {
if (g.c_logging && config_is_perm("logging"))
testutil_die(EINVAL, "prepare is incompatible with logging");
if (!g.c_txn_timestamps && config_is_perm("transaction.timestamps"))
testutil_die(EINVAL, "prepare requires transaction timestamps");
- if (g.c_isolation_flag != ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation"))
- testutil_die(EINVAL, "prepare requires snapshot isolation");
- if (g.c_txn_freq != 100 && config_is_perm("transaction.frequency"))
- testutil_die(EINVAL, "prepare requires transaction frequency set to 100");
}
+
+ /* Transaction timestamps are incompatible with implicit transactions. */
if (g.c_txn_timestamps && config_is_perm("transaction.timestamps")) {
- if (g.c_isolation_flag != ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation"))
- testutil_die(EINVAL, "timestamps require snapshot isolation");
- if (g.c_txn_freq != 100 && config_is_perm("transaction.frequency"))
- testutil_die(EINVAL, "timestamps require transaction frequency set to 100");
- }
- if (g.c_logging && config_is_perm("logging") && g.c_prepare)
- config_single("ops.prepare=off", false);
+ if (g.c_txn_implicit && config_is_perm("transaction.implicit"))
+ testutil_die(
+ EINVAL, "transaction.timestamps is incompatible with implicit transactions");
- /* FIXME-WT-6431: temporarily disable salvage with timestamps. */
- if (g.c_txn_timestamps && g.c_salvage) {
- if (config_is_perm("ops.salvage"))
- testutil_die(EINVAL, "salvage cannot run with timestamps");
- config_single("ops.salvage=off", false);
+ /* FIXME-WT-6431: temporarily disable salvage with timestamps. */
+ if (g.c_salvage && config_is_perm("ops.salvage"))
+ testutil_die(EINVAL, "transaction.timestamps is incompatible with salvage");
}
- if (g.c_isolation_flag == ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation")) {
- if (!g.c_txn_timestamps && config_is_perm("transaction.timestamps"))
- testutil_die(EINVAL, "snapshot isolation requires timestamps");
- if (g.c_txn_freq != 100 && config_is_perm("transaction.frequency"))
- testutil_die(EINVAL, "snapshot isolation requires transaction frequency set to 100");
- }
- if (g.c_txn_rollback_to_stable && config_is_perm("transaction.rollback_to_stable") &&
- g.c_isolation_flag != ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation"))
- testutil_die(EINVAL, "rollback to stable requires snapshot isolation");
/*
- * The permanent configuration has no incompatible settings, adjust the temporary configuration
- * as necessary. Prepare overrides timestamps, overrides isolation, for no reason other than
- * prepare is the least configured and timestamps are the option we want to test the most.
+ * Incompatible permanent configurations have been checked, now turn off any incompatible flags.
+ * The choices are inclined to prepare (it's only rarely configured), then timestamps. Note any
+ * of the options may still be set as required for the run, so we still have to check if that's
+ * the case until we run out of combinations (for example, prepare turns off logging, so by the
+ * time we check logging, logging must have been required by the run if both logging and prepare
+ * are still set, so we can just turn off prepare in that case).
*/
if (g.c_prepare) {
- if (g.c_logging)
+ if (!config_is_perm("logging"))
config_single("logging=off", false);
- if (!g.c_txn_timestamps)
- config_single("transaction.timestamps=on", false);
- if (g.c_isolation_flag != ISOLATION_SNAPSHOT)
- config_single("transaction.isolation=snapshot", false);
- if (g.c_txn_freq != 100)
- config_single("transaction.frequency=100", false);
- }
- if (g.c_txn_rollback_to_stable) {
- if (!g.c_txn_timestamps)
+ if (!config_is_perm("transaction.timestamps"))
config_single("transaction.timestamps=on", false);
}
if (g.c_txn_timestamps) {
- if (g.c_isolation_flag != ISOLATION_SNAPSHOT)
- config_single("transaction.isolation=snapshot", false);
- if (g.c_txn_freq != 100)
- config_single("transaction.frequency=100", false);
- }
- if (g.c_isolation_flag == ISOLATION_NOT_SET) {
- switch (mmrand(NULL, 1, 20)) {
- case 1: /* 5% */
- config_single("transaction.isolation=random", false);
- break;
- case 2: /* 5% */
- config_single("transaction.isolation=read-uncommitted", false);
- break;
- case 3: /* 5% */
- config_single("transaction.isolation=read-committed", false);
- break;
- default: /* 85% */
- config_single("transaction.isolation=snapshot", false);
- break;
- }
- if (g.c_isolation_flag == ISOLATION_SNAPSHOT) {
- if (!g.c_txn_timestamps)
- config_single("transaction.timestamps=on", false);
- if (g.c_txn_freq != 100)
- config_single("transaction.frequency=100", false);
- } else {
- if (g.c_prepare)
- config_single("ops.prepare=off", false);
- if (g.c_txn_timestamps)
- config_single("transaction.timestamps=off", false);
- }
+ if (!config_is_perm("transaction.implicit"))
+ config_single("transaction.implicit=0", false);
+ if (!config_is_perm("ops.salvage"))
+ config_single("ops.salvage=off", false);
}
+ if (g.c_logging)
+ config_single("ops.prepare=off", false);
+ if (g.c_txn_implicit)
+ config_single("transaction.timestamps=off", false);
+ if (g.c_salvage)
+ config_single("transaction.timestamps=off", false);
}
/*
@@ -1175,9 +1113,6 @@ config_reset(void)
{
CONFIG *cp;
- if (!config_is_perm("transaction.isolation"))
- g.c_isolation_flag = ISOLATION_NOT_SET;
-
/* Clear temporary allocated configuration data. */
for (cp = c; cp->name != NULL; ++cp) {
F_CLR(cp, C_TEMP);
@@ -1289,9 +1224,6 @@ config_single(const char *s, bool perm)
} else if (strncmp(s, "runs.type", strlen("runs.type")) == 0) {
config_map_file_type(equalp, &g.type);
*cp->vstr = dstrdup(config_file_type(g.type));
- } else if (strncmp(s, "transaction.isolation", strlen("transaction.isolation")) == 0) {
- config_map_isolation(equalp, &g.c_isolation_flag);
- *cp->vstr = dstrdup(equalp);
} else if (strncmp(s, "logging.compression", strlen("logging.compression")) == 0) {
config_map_compression(equalp, &g.c_logging_compression_flag);
*cp->vstr = dstrdup(equalp);
@@ -1475,25 +1407,6 @@ config_map_encryption(const char *s, u_int *vp)
}
/*
- * config_map_isolation --
- * Map an isolation configuration to a flag.
- */
-static void
-config_map_isolation(const char *s, u_int *vp)
-{
- if (strcmp(s, "random") == 0)
- *vp = ISOLATION_RANDOM;
- else if (strcmp(s, "read-uncommitted") == 0)
- *vp = ISOLATION_READ_UNCOMMITTED;
- else if (strcmp(s, "read-committed") == 0)
- *vp = ISOLATION_READ_COMMITTED;
- else if (strcmp(s, "snapshot") == 0)
- *vp = ISOLATION_SNAPSHOT;
- else
- testutil_die(EINVAL, "illegal isolation configuration: %s", s);
-}
-
-/*
* config_is_perm
* Return if a specific configuration entry was permanently set.
*/
diff --git a/src/third_party/wiredtiger/test/format/config.h b/src/third_party/wiredtiger/test/format/config.h
index a06509b0dba..0feb22f202c 100644
--- a/src/third_party/wiredtiger/test/format/config.h
+++ b/src/third_party/wiredtiger/test/format/config.h
@@ -60,14 +60,14 @@ typedef struct {
#define COMPRESSION_LIST " (none | lz4 | snappy | zlib | zstd)"
static CONFIG c[] = {
- /* 5% */
- {"assert.commit_timestamp", "assert commit_timestamp", C_BOOL, 5, 0, 0,
- &g.c_assert_commit_timestamp, NULL},
-
- /* 5% */
- {"assert.read_timestamp", "assert read_timestamp", C_BOOL, 5, 0, 0, &g.c_assert_read_timestamp,
+ /* 2% */
+ {"assert.read_timestamp", "assert read_timestamp", C_BOOL, 2, 0, 0, &g.c_assert_read_timestamp,
NULL},
+ /* 2% */
+ {"assert.write_timestamp", "set write_timestamp_usage and assert write_timestamp", C_BOOL, 2, 0,
+ 0, &g.c_assert_write_timestamp, NULL},
+
/* 20% */
{"backup", "configure backups", C_BOOL, 20, 0, 0, &g.c_backups, NULL},
@@ -98,8 +98,6 @@ static CONFIG c[] = {
{"btree.internal_page_max", "btree internal node maximum size", 0x0, 9, 17, 27,
&g.c_intl_page_max, NULL},
- {"btree.key_gap", "btree page instantiated key gap", 0x0, 0, 20, 20, &g.c_key_gap, NULL},
-
{"btree.key_max", "maximum key size", 0x0, 20, 128, MEGABYTE(10), &g.c_key_max, NULL},
/*
@@ -113,6 +111,8 @@ static CONFIG c[] = {
{"btree.memory_page_max", "maximum cache page size", 0x0, 1, 10, 128, &g.c_memory_page_max, NULL},
+ {"btree.prefix", "common key prefix", C_BOOL, 3, 0, 0, &g.c_prefix, NULL},
+
/* 80% */
{"btree.prefix_compression", "configure prefix compressed keys", C_BOOL, 80, 0, 0,
&g.c_prefix_compression, NULL},
@@ -184,8 +184,8 @@ static CONFIG c[] = {
/*
* 0%
- * FIXME-WT-7418 and FIXME-WT-7416: Temporarily disable import until WT_ROLLBACK error and
- * interaction with backup thread is fixed. Should be 20%
+ * FIXME-WT-7418 and FIXME-WT-7510: Temporarily disable import until WT_ROLLBACK error and
+ * wt_copy_and_sync error is fixed. It should be (C_BOOL, 20, 0, 0).
*/
{"import", "import table from newly created database", C_BOOL, 0, 0, 0, &g.c_import, NULL},
@@ -340,19 +340,11 @@ static CONFIG c[] = {
/* 2% */
{"stress.split_8", "stress splits (#8)", C_BOOL, 2, 0, 0, &g.c_timing_stress_split_8, NULL},
- {"transaction.frequency", "operations inside an explicit transaction (percentage)", 0x0, 1, 100,
- 100, &g.c_txn_freq, NULL},
-
- {"transaction.isolation",
- "isolation level (random | read-uncommitted | read-committed | snapshot)", C_IGNORE | C_STRING,
- 0, 0, 0, NULL, &g.c_isolation},
-
- /* 0% - By default, turned off until fallout has been debugged. */
- {"transaction.rollback_to_stable", "configure rollback_to_stable", C_BOOL, 0, 0, 0,
- &g.c_txn_rollback_to_stable, NULL},
+ {"transaction.implicit", "implicit, without timestamps, transactions (percentage)", 0x0, 0, 100,
+ 100, &g.c_txn_implicit, NULL},
/* 70% */
- {"transaction.timestamps", "configure transaction timestamps", C_BOOL, 70, 0, 0,
+ {"transaction.timestamps", "all transactions (or none), have timestamps", C_BOOL, 80, 0, 0,
&g.c_txn_timestamps, NULL},
{"wiredtiger.config", "wiredtiger_open API configuration string", C_IGNORE | C_STRING, 0, 0, 0,
diff --git a/src/third_party/wiredtiger/test/format/config_compat.c b/src/third_party/wiredtiger/test/format/config_compat.c
index 4a5789bf854..2926d54ca4b 100644
--- a/src/third_party/wiredtiger/test/format/config_compat.c
+++ b/src/third_party/wiredtiger/test/format/config_compat.c
@@ -101,10 +101,6 @@ static const char *list[] = {
"btree.internal_key_truncation",
"internal_page_max=",
"btree.internal_page_max",
- "isolation=",
- "transaction.isolation",
- "key_gap=",
- "btree.key_gap",
"key_max=",
"btree.key_max",
"key_min=",
diff --git a/src/third_party/wiredtiger/test/format/config_compat.sed b/src/third_party/wiredtiger/test/format/config_compat.sed
index 0f43b19fc6a..b90b21332e8 100644
--- a/src/third_party/wiredtiger/test/format/config_compat.sed
+++ b/src/third_party/wiredtiger/test/format/config_compat.sed
@@ -8,7 +8,6 @@ s/^btree.dictionary=/dictionary=/
s/^btree.huffman_value=/huffman_value=/
s/^btree.internal_key_truncation=/internal_key_truncation=/
s/^btree.internal_page_max=/internal_page_max=/
-s/^btree.key_gap=/key_gap=/
s/^btree.key_max=/key_max=/
s/^btree.key_min=/key_min=/
s/^btree.leaf_page_max=/leaf_page_max=/
diff --git a/src/third_party/wiredtiger/test/format/format.h b/src/third_party/wiredtiger/test/format/format.h
index 7aefc071396..cd46f43a781 100644
--- a/src/third_party/wiredtiger/test/format/format.h
+++ b/src/third_party/wiredtiger/test/format/format.h
@@ -140,8 +140,8 @@ typedef struct {
uint32_t c_abort; /* Config values */
uint32_t c_alter;
- uint32_t c_assert_commit_timestamp;
uint32_t c_assert_read_timestamp;
+ uint32_t c_assert_write_timestamp;
uint32_t c_auto_throttle;
char *c_backup_incremental;
uint32_t c_backup_incr_granularity;
@@ -178,8 +178,6 @@ typedef struct {
uint32_t c_insert_pct;
uint32_t c_internal_key_truncation;
uint32_t c_intl_page_max;
- char *c_isolation;
- uint32_t c_key_gap;
uint32_t c_key_max;
uint32_t c_key_min;
uint32_t c_leaf_page_max;
@@ -197,6 +195,7 @@ typedef struct {
uint32_t c_mmap_all;
uint32_t c_modify_pct;
uint32_t c_ops;
+ uint32_t c_prefix;
uint32_t c_prefix_compression;
uint32_t c_prefix_compression_min;
uint32_t c_prepare;
@@ -228,8 +227,7 @@ typedef struct {
uint32_t c_timing_stress_split_7;
uint32_t c_timing_stress_split_8;
uint32_t c_truncate;
- uint32_t c_txn_freq;
- uint32_t c_txn_rollback_to_stable;
+ uint32_t c_txn_implicit;
uint32_t c_txn_timestamps;
uint32_t c_value_max;
uint32_t c_value_min;
@@ -270,13 +268,6 @@ typedef struct {
#define ENCRYPT_ROTN_7 2
u_int c_encryption_flag; /* Encryption flag value */
-#define ISOLATION_NOT_SET 0
-#define ISOLATION_RANDOM 1
-#define ISOLATION_READ_UNCOMMITTED 2
-#define ISOLATION_READ_COMMITTED 3
-#define ISOLATION_SNAPSHOT 4
- u_int c_isolation_flag; /* Isolation flag value */
-
/* The page must be a multiple of the allocation size, and 512 always works. */
#define BLOCK_ALLOCATION_SIZE 512
uint32_t intl_page_max; /* Maximum page sizes */
@@ -284,6 +275,7 @@ typedef struct {
uint64_t rows; /* Total rows */
+ uint32_t prefix_len; /* Common key prefix length */
uint32_t key_rand_len[1031]; /* Key lengths */
} GLOBAL;
extern GLOBAL g;
diff --git a/src/third_party/wiredtiger/test/format/format.sh b/src/third_party/wiredtiger/test/format/format.sh
index 9d462aed0df..a2fcc71c93e 100755
--- a/src/third_party/wiredtiger/test/format/format.sh
+++ b/src/third_party/wiredtiger/test/format/format.sh
@@ -256,6 +256,37 @@ skip_known_errors()
return 1
}
+# Categorize the failures
+# $1 Log file
+categorize_failure()
+{
+ log=$1
+
+ # Add any important configs to be picked from the detailed failed configuration.
+ configs=("backup=" "runs.source" "runs.type" "transaction.isolation" "transaction.rollback_to_stable"
+ "ops.prepare" "transaction.timestamps")
+ count=${#configs[@]}
+
+ search_string=""
+
+ # now loop through the config array
+ for ((i=0; i<$count; i++))
+ do
+ if [ $i == $(($count - 1)) ]
+ then
+ search_string+=${configs[i]}
+ else
+ search_string+="${configs[i]}|"
+ fi
+ done
+
+ echo "############################################"
+ echo "test/format run configuration highlights"
+ echo "############################################"
+ grep -E "$search_string" $log
+ echo "############################################"
+}
+
# Report a failure.
# $1 directory name
report_failure()
@@ -288,6 +319,8 @@ report_failure()
echo "$name: $dir/CONFIG:"
sed 's/^/ /' < $dir/CONFIG
+ categorize_failure $log
+
echo "$name: failure status reported" > $dir/$status
}
diff --git a/src/third_party/wiredtiger/test/format/kv.c b/src/third_party/wiredtiger/test/format/kv.c
index 04e9e0fc46c..32788b86ffb 100644
--- a/src/third_party/wiredtiger/test/format/kv.c
+++ b/src/third_party/wiredtiger/test/format/kv.c
@@ -75,6 +75,10 @@ key_init(void)
for (i = 0; i < WT_ELEMENTS(g.key_rand_len); ++i)
fprintf(fp, "%" PRIu32 "\n", g.key_rand_len[i]);
fclose_and_clear(&fp);
+
+ /* Fill in the common key prefix length (which is added to the key min/max). */
+ if (g.c_prefix != 0)
+ g.prefix_len = mmrand(NULL, 15, 80);
}
/*
@@ -87,7 +91,7 @@ key_gen_init(WT_ITEM *key)
size_t i, len;
char *p;
- len = WT_MAX(KILOBYTE(100), g.c_key_max);
+ len = WT_MAX(KILOBYTE(100), g.c_key_max + g.prefix_len);
p = dmalloc(len);
for (i = 0; i < len; ++i)
p[i] = "abcdefghijklmnopqrstuvwxyz"[i % 26];
@@ -111,45 +115,62 @@ key_gen_teardown(WT_ITEM *key)
/*
* key_gen_common --
- * Key generation code shared between normal and insert key generation.
+ * Row-store key generation code shared between normal and insert key generation.
*/
void
key_gen_common(WT_ITEM *key, uint64_t keyno, const char *const suffix)
{
- int len;
+ uint64_t n;
char *p;
+ const char *bucket;
+
+ testutil_assert(g.type == ROW);
p = key->mem;
/*
- * The key always starts with a 10-digit string (the specified row) followed by two digits, a
- * random number between 1 and 15 if it's an insert, otherwise 00.
+ * The workload we're trying to mimic with a prefix is a long common prefix followed by a record
+ * number, the tricks are creating a prefix that won't re-order keys, and to change the prefix
+ * with some regularity to test prefix boundaries. Split the key space into power-of-2 buckets:
+ * that results in tiny runs of prefix strings at the beginning of the tree, and increasingly
+ * large common prefixes as the tree grows (with a testing sweet spot in the middle). After the
+ * bucket value, append a string of common bytes. The standard, zero-padded key itself sorts
+ * lexicographically, meaning the common key prefix will grow and shrink by a few bytes as the
+ * number increments, which is a good thing for testing.
+ */
+ if (g.prefix_len > 0) {
+ bucket = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
+ for (n = keyno; n > 0; n >>= 1) {
+ if (*bucket == 'z')
+ break;
+ ++bucket;
+ }
+ p[0] = *bucket;
+ memset(p + 1, 'C', g.prefix_len - 1);
+ p += g.prefix_len;
+ }
+
+ /*
+ * After any common prefix, the key starts with a 10-digit string (the specified row) followed
+ * by two digits (a random number between 1 and 15 if it's an insert, otherwise 00).
*/
- u64_to_string_zf(keyno, key->mem, 11);
+ u64_to_string_zf(keyno, p, 11);
p[10] = '.';
p[11] = suffix[0];
p[12] = suffix[1];
- len = 13;
+ p[13] = '/';
/*
- * In a column-store, the key isn't used, it doesn't need a random length.
+ * Because we're doing table lookup for key sizes, we can't set overflow key sizes in the table,
+ * the table isn't big enough to keep our hash from selecting too many big keys and blowing out
+ * the cache. Handle that here, use a really big key 1 in 2500 times.
*/
- if (g.type == ROW) {
- p[len] = '/';
-
- /*
- * Because we're doing table lookup for key sizes, we weren't able to set really big keys
- * sizes in the table, the table isn't big enough to keep our hash from selecting too many
- * big keys and blowing out the cache. Handle that here, use a really big key 1 in 2500
- * times.
- */
- len = keyno % 2500 == 0 && g.c_key_max < KILOBYTE(80) ?
- KILOBYTE(80) :
- (int)g.key_rand_len[keyno % WT_ELEMENTS(g.key_rand_len)];
- }
-
key->data = key->mem;
- key->size = (size_t)len;
+ key->size = g.prefix_len;
+ key->size += keyno % 2500 == 0 && g.c_key_max < KILOBYTE(80) ?
+ KILOBYTE(80) :
+ g.key_rand_len[keyno % WT_ELEMENTS(g.key_rand_len)];
+ testutil_assert(key->size <= key->memsize);
}
static char *val_base; /* Base/original value */
diff --git a/src/third_party/wiredtiger/test/format/ops.c b/src/third_party/wiredtiger/test/format/ops.c
index 0e5f8a30422..3fd5706efad 100644
--- a/src/third_party/wiredtiger/test/format/ops.c
+++ b/src/third_party/wiredtiger/test/format/ops.c
@@ -189,35 +189,20 @@ tinfo_teardown(void)
}
/*
- * Command used before rollback to stable to save the interesting files so we can replay the command
- * as necessary.
- *
- * Redirect the "cd" command to /dev/null so chatty cd implementations don't add the new working
- * directory to our output.
- */
-#define ROLLBACK_STABLE_COPY_CMD \
- "cd %s > /dev/null && " \
- "rm -rf ROLLBACK.copy && mkdir ROLLBACK.copy && " \
- "cp WiredTiger* wt* ROLLBACK.copy/"
-
-/*
- * tinfo_rollback_to_stable_and_check --
- * Do a rollback to stable, then check that changes are correct from what we know in the worker
- * thread structures.
+ * tinfo_rollback_to_stable --
+ * Do a rollback to stable and verify operations.
*/
static void
-tinfo_rollback_to_stable_and_check(WT_SESSION *session)
+tinfo_rollback_to_stable(WT_SESSION *session)
{
WT_CURSOR *cursor;
- WT_DECL_RET;
- char cmd[512];
- testutil_check(__wt_snprintf(cmd, sizeof(cmd), ROLLBACK_STABLE_COPY_CMD, g.home));
- if ((ret = system(cmd)) != 0)
- testutil_die(ret, "rollback to stable copy (\"%s\") failed", cmd);
- trace_msg("%-10s ts=%" PRIu64, "rts", g.stable_timestamp);
+ /* Rollback-to-stable only makes sense for timestamps and on-disk stores. */
+ if (g.c_txn_timestamps == 0 || g.c_in_memory != 0)
+ return;
- g.wts_conn->rollback_to_stable(g.wts_conn, NULL);
+ trace_msg("%-10s ts=%" PRIu64, "rts", g.stable_timestamp);
+ testutil_check(g.wts_conn->rollback_to_stable(g.wts_conn, NULL));
/* Check the saved snap operations for consistency. */
testutil_check(session->open_cursor(session, g.uri, NULL, NULL, &cursor));
@@ -402,8 +387,13 @@ operations(u_int ops_seconds, bool lastrun)
trace_msg("%s", "=============== thread ops stop");
- if (g.c_txn_rollback_to_stable)
- tinfo_rollback_to_stable_and_check(session);
+ /*
+ * The system should be quiescent at this point, call rollback to stable. Generally, we expect
+ * applications to do rollback-to-stable as part of the database open, but calling it outside of
+ * the open path is expected in the case of applications that are "restarting" but skipping the
+ * close/re-open pair.
+ */
+ tinfo_rollback_to_stable(session);
if (lastrun) {
tinfo_teardown();
@@ -418,20 +408,16 @@ operations(u_int ops_seconds, bool lastrun)
* Begin a timestamped transaction.
*/
static void
-begin_transaction_ts(TINFO *tinfo, u_int *iso_configp)
+begin_transaction_ts(TINFO *tinfo)
{
TINFO **tlp;
WT_DECL_RET;
WT_SESSION *session;
uint64_t ts;
- const char *config;
char buf[64];
session = tinfo->session;
- config = "isolation=snapshot";
- *iso_configp = ISOLATION_SNAPSHOT;
-
/*
* Transaction reads are normally repeatable, but WiredTiger timestamps allow rewriting commits,
* that is, applications can specify at commit time the timestamp at which the commit happens.
@@ -444,7 +430,7 @@ begin_transaction_ts(TINFO *tinfo, u_int *iso_configp)
for (ts = UINT64_MAX, tlp = tinfo_list; *tlp != NULL; ++tlp)
ts = WT_MIN(ts, (*tlp)->commit_ts);
if (ts != 0) {
- wiredtiger_begin_transaction(session, config);
+ wiredtiger_begin_transaction(session, NULL);
/*
* If the timestamp has aged out of the system, we'll get EINVAL when we try and set it.
@@ -463,7 +449,7 @@ begin_transaction_ts(TINFO *tinfo, u_int *iso_configp)
testutil_check(session->rollback_transaction(session, NULL));
}
- wiredtiger_begin_transaction(session, config);
+ wiredtiger_begin_transaction(session, NULL);
/*
* Otherwise, pick a current timestamp.
@@ -487,40 +473,19 @@ begin_transaction_ts(TINFO *tinfo, u_int *iso_configp)
/*
* begin_transaction --
- * Choose an isolation configuration and begin a transaction.
+ * Begin a non-timestamp transaction.
*/
static void
-begin_transaction(TINFO *tinfo, u_int *iso_configp)
+begin_transaction(TINFO *tinfo, const char *iso_config)
{
WT_SESSION *session;
- u_int v;
- const char *config;
session = tinfo->session;
- if ((v = g.c_isolation_flag) == ISOLATION_RANDOM)
- v = mmrand(&tinfo->rnd, 1, 3);
- switch (v) {
- case 1:
- v = ISOLATION_READ_UNCOMMITTED;
- config = "isolation=read-uncommitted";
- break;
- case 2:
- v = ISOLATION_READ_COMMITTED;
- config = "isolation=read-committed";
- break;
- case 3:
- default:
- v = ISOLATION_SNAPSHOT;
- config = "isolation=snapshot";
- break;
- }
- *iso_configp = v;
-
- wiredtiger_begin_transaction(session, config);
+ wiredtiger_begin_transaction(session, iso_config);
snap_op_init(tinfo, WT_TS_NONE, false);
- trace_op(tinfo, "begin %s", config);
+ trace_op(tinfo, "begin %s", iso_config);
}
/*
@@ -641,7 +606,7 @@ prepare_transaction(TINFO *tinfo)
#define OP_FAILED(notfound_ok) \
do { \
positioned = false; \
- if (intxn && (ret == WT_CACHE_FULL || ret == WT_ROLLBACK || ret == WT_CACHE_FULL)) \
+ if (intxn && (ret == WT_CACHE_FULL || ret == WT_ROLLBACK)) \
goto rollback; \
testutil_assert( \
(notfound_ok && ret == WT_NOTFOUND) || ret == WT_CACHE_FULL || ret == WT_ROLLBACK); \
@@ -660,16 +625,6 @@ prepare_transaction(TINFO *tinfo)
} while (0)
/*
- * When in a transaction on the live table with snapshot isolation, track operations for later
- * repetition.
- */
-#define SNAP_TRACK(tinfo, op) \
- do { \
- if (intxn && iso_config == ISOLATION_SNAPSHOT) \
- snap_track(tinfo, op); \
- } while (0)
-
-/*
* ops_open_session --
* Create a new session/cursor pair for the thread.
*/
@@ -702,6 +657,21 @@ ops_open_session(TINFO *tinfo)
tinfo->cursor = cursor;
}
+/* Isolation configuration. */
+typedef enum {
+ ISOLATION_READ_COMMITTED,
+ ISOLATION_READ_UNCOMMITTED,
+ ISOLATION_SNAPSHOT
+} iso_level_t;
+
+/* When in an explicit snapshot isolation transaction, track operations for later
+ * repetition. */
+#define SNAP_TRACK(tinfo, op) \
+ do { \
+ if (intxn && iso_level == ISOLATION_SNAPSHOT) \
+ snap_track(tinfo, op); \
+ } while (0)
+
/*
* ops --
* Per-thread operations.
@@ -713,10 +683,12 @@ ops(void *arg)
WT_CURSOR *cursor;
WT_DECL_RET;
WT_SESSION *session;
+ iso_level_t iso_level;
thread_op op;
uint64_t reset_op, session_op, truncate_op;
uint32_t range, rnd;
- u_int i, j, iso_config;
+ u_int i, j;
+ const char *iso_config;
bool greater_than, intxn, next, positioned, prepared;
tinfo = arg;
@@ -733,7 +705,7 @@ ops(void *arg)
else
__wt_random_init(&tinfo->rnd);
- iso_config = ISOLATION_RANDOM; /* -Wconditional-uninitialized */
+ iso_level = ISOLATION_SNAPSHOT; /* -Wconditional-uninitialized */
/* Set the first operation where we'll create sessions and cursors. */
cursor = NULL;
@@ -769,9 +741,9 @@ ops(void *arg)
}
/*
- * If not in a transaction, reset the session now and then, just to make sure that operation
- * gets tested. The test is not for equality, we have to do the reset outside of a
- * transaction so we aren't likely to get an exact match.
+ * If not in a transaction, reset the session periodically to make sure that operation is
+ * tested. The test is not for equality, resets must be done outside of transactions so we
+ * aren't likely to get an exact match.
*/
if (!intxn && tinfo->ops > reset_op) {
testutil_check(session->reset(session));
@@ -781,42 +753,66 @@ ops(void *arg)
}
/*
- * If not in a transaction, have a live handle and running in a timestamp world,
- * occasionally repeat a timestamped operation.
+ * If not in a transaction and in a timestamp world, occasionally repeat a timestamped
+ * operation.
*/
if (!intxn && g.c_txn_timestamps && mmrand(&tinfo->rnd, 1, 15) == 1) {
++tinfo->search;
snap_repeat_single(cursor, tinfo);
}
+ /* If not in a transaction and in a timestamp world, start a transaction. */
+ if (!intxn && g.c_txn_timestamps) {
+ iso_level = ISOLATION_SNAPSHOT;
+ begin_transaction_ts(tinfo);
+ intxn = true;
+ }
+
/*
- * If not in a transaction and have a live handle, choose an isolation level and start a
- * transaction some percentage of the time.
+ * If not in a transaction and not in a timestamp world, start a transaction some percentage
+ * of the time.
*/
- if (!intxn && (g.c_txn_timestamps || mmrand(&tinfo->rnd, 1, 100) <= g.c_txn_freq)) {
- if (g.c_txn_timestamps)
- begin_transaction_ts(tinfo, &iso_config);
- else
- begin_transaction(tinfo, &iso_config);
+ if (!intxn && mmrand(&tinfo->rnd, 1, 100) < g.c_txn_implicit) {
+ iso_level = ISOLATION_SNAPSHOT;
+ iso_config = "isolation=snapshot";
+
+ /* Occasionally do reads at an isolation level lower than snapshot. */
+ switch (mmrand(NULL, 1, 20)) {
+ case 1:
+ iso_level = ISOLATION_READ_COMMITTED; /* 5% */
+ iso_config = "isolation=read-committed";
+ break;
+ case 2:
+ iso_level = ISOLATION_READ_UNCOMMITTED; /* 5% */
+ iso_config = "isolation=read-uncommitted";
+ break;
+ }
+
+ begin_transaction(tinfo, iso_config);
intxn = true;
}
- /* Select an operation. */
+ /*
+ * Select an operation: all updates must be in snapshot isolation, modify must be in an
+ * explicit transaction.
+ */
op = READ;
- i = mmrand(&tinfo->rnd, 1, 100);
- if (i < g.c_delete_pct && tinfo->ops > truncate_op) {
- op = TRUNCATE;
-
- /* Pick the next truncate operation. */
- truncate_op += mmrand(&tinfo->rnd, 20000, 100000);
- } else if (i < g.c_delete_pct)
- op = REMOVE;
- else if (i < g.c_delete_pct + g.c_insert_pct)
- op = INSERT;
- else if (i < g.c_delete_pct + g.c_insert_pct + g.c_modify_pct)
- op = MODIFY;
- else if (i < g.c_delete_pct + g.c_insert_pct + g.c_modify_pct + g.c_write_pct)
- op = UPDATE;
+ if (iso_level == ISOLATION_SNAPSHOT) {
+ i = mmrand(&tinfo->rnd, 1, 100);
+ if (i < g.c_delete_pct && tinfo->ops > truncate_op) {
+ op = TRUNCATE;
+
+ /* Pick the next truncate operation. */
+ truncate_op += mmrand(&tinfo->rnd, 20000, 100000);
+ } else if (i < g.c_delete_pct)
+ op = REMOVE;
+ else if (i < g.c_delete_pct + g.c_insert_pct)
+ op = INSERT;
+ else if (intxn && i < g.c_delete_pct + g.c_insert_pct + g.c_modify_pct)
+ op = MODIFY;
+ else if (i < g.c_delete_pct + g.c_insert_pct + g.c_modify_pct + g.c_write_pct)
+ op = UPDATE;
+ }
/* Select a row. */
tinfo->keyno = mmrand(&tinfo->rnd, 1, (u_int)g.rows);
@@ -838,10 +834,10 @@ ops(void *arg)
}
/*
- * Optionally reserve a row. Reserving a row before a read isn't all that sensible, but not
- * unexpected, either.
+ * Optionally reserve a row, it's an update so it requires snapshot isolation. Reserving a
+ * row before a read isn't all that sensible, but not unexpected, either.
*/
- if (intxn && mmrand(&tinfo->rnd, 0, 20) == 1) {
+ if (intxn && iso_level == ISOLATION_SNAPSHOT && mmrand(&tinfo->rnd, 0, 20) == 1) {
switch (g.type) {
case ROW:
ret = row_reserve(tinfo, cursor, positioned);
@@ -853,8 +849,7 @@ ops(void *arg)
}
if (ret == 0) {
positioned = true;
-
- __wt_yield(); /* Let other threads proceed. */
+ __wt_yield(); /* Encourage races */
} else
WRITE_OP_FAILED(true);
}
@@ -888,13 +883,6 @@ ops(void *arg)
WRITE_OP_FAILED(false);
break;
case MODIFY:
- /*
- * Change modify into update if not part of a snapshot isolation transaction, modify
- * isn't supported in those cases.
- */
- if (!intxn || iso_config != ISOLATION_SNAPSHOT)
- goto update_instead_of_chosen_op;
-
++tinfo->update;
switch (g.type) {
case ROW:
@@ -1050,17 +1038,17 @@ update_instead_of_chosen_op:
testutil_check(cursor->reset(cursor));
/*
- * Continue if not in a transaction, else add more operations to the transaction half the
- * time.
+ * No post-operation work is needed outside of a transaction. If in a transaction, add more
+ * operations to the transaction half the time.
*/
if (!intxn || (rnd = mmrand(&tinfo->rnd, 1, 10)) > 5)
continue;
/*
- * Ending a transaction. If on a live handle and the transaction was configured for snapshot
- * isolation, repeat the operations and confirm the results are unchanged.
+ * Ending a transaction. If the transaction was configured for snapshot isolation, repeat
+ * the operations and confirm the results are unchanged.
*/
- if (intxn && iso_config == ISOLATION_SNAPSHOT) {
+ if (intxn && iso_level == ISOLATION_SNAPSHOT) {
__wt_yield(); /* Encourage races */
ret = snap_repeat_txn(cursor, tinfo);
@@ -1069,13 +1057,10 @@ update_instead_of_chosen_op:
goto rollback;
}
- /*
- * If prepare configured, prepare the transaction 10% of the time.
- */
+ /* If prepare configured, prepare the transaction 10% of the time. */
prepared = false;
if (g.c_prepare && mmrand(&tinfo->rnd, 1, 10) == 1) {
- ret = prepare_transaction(tinfo);
- if (ret != 0)
+ if ((ret = prepare_transaction(tinfo)) != 0)
WRITE_OP_FAILED(false);
__wt_yield(); /* Encourage races */
@@ -1083,7 +1068,8 @@ update_instead_of_chosen_op:
}
/*
- * If we're in a transaction, commit 40% of the time and rollback 10% of the time.
+ * If we're in a transaction, commit 40% of the time and rollback 10% of the time (we
+ * continued to add operations to the transaction the remaining 50% of the time).
*/
switch (rnd) {
case 1:
@@ -1351,8 +1337,8 @@ order_error_col:
* to the row's key.) Keys are strings with terminating '/' values, so absent key
* corruption, we can simply do the underlying string conversion on the key string.
*/
- keyno_prev = strtoul(tinfo->key->data, NULL, 10);
- keyno = strtoul(key.data, NULL, 10);
+ keyno_prev = strtoul((char *)tinfo->key->data + g.prefix_len, NULL, 10);
+ keyno = strtoul((char *)key.data + g.prefix_len, NULL, 10);
if (incrementing) {
if (keyno_prev != keyno && keyno_prev + 1 != keyno)
goto order_error_row;
diff --git a/src/third_party/wiredtiger/test/format/snap.c b/src/third_party/wiredtiger/test/format/snap.c
index bc2b58b2f3c..1c934a5d187 100644
--- a/src/third_party/wiredtiger/test/format/snap.c
+++ b/src/third_party/wiredtiger/test/format/snap.c
@@ -29,11 +29,6 @@
#include "format.h"
/*
- * Issue a warning when there enough consecutive unsuccessful checks for rollback to stable.
- */
-#define WARN_RTS_NO_CHECK 5
-
-/*
* snap_init --
* Initialize the repeatable operation tracking.
*/
@@ -41,14 +36,16 @@ void
snap_init(TINFO *tinfo)
{
/*
- * We maintain two snap lists. The current one is indicated by tinfo->s, and keeps the most
- * recent operations. The other one is used when we are running with rollback_to_stable. When
- * each thread notices that the stable timestamp has changed, it stashes the current snap list
- * and starts fresh with the other snap list. After we've completed a rollback_to_stable, we can
- * the secondary snap list to see the state of keys/values seen and updated at the time of the
- * rollback.
+ * We maintain two snap lists, where the current one is indicated by tinfo->s, and keeps the
+ * most recent operations.
+ *
+ * The other one is used when we are running timestamp transactions with rollback_to_stable.
+ * When each thread notices that the stable timestamp has changed, it stashes the current snap
+ * list and starts fresh with the other snap list. After we've completed a rollback_to_stable,
+ * we can the secondary snap list to see the state of keys/values seen and updated at the time
+ * of the rollback.
*/
- if (g.c_txn_rollback_to_stable) {
+ if (g.c_txn_timestamps) {
tinfo->s = &tinfo->snap_states[1];
tinfo->snap_list = dcalloc(SNAP_LIST_SIZE, sizeof(SNAP_OPS));
tinfo->snap_end = &tinfo->snap_list[SNAP_LIST_SIZE];
@@ -113,7 +110,7 @@ snap_op_init(TINFO *tinfo, uint64_t read_ts, bool repeatable_reads)
++tinfo->opid;
- if (g.c_txn_rollback_to_stable) {
+ if (g.c_txn_timestamps) {
/*
* If the stable timestamp has changed and we've advanced beyond it, preserve the current
* snapshot history up to this point, we'll use it verify rollback_to_stable. Switch our
@@ -528,40 +525,45 @@ snap_repeat_update(TINFO *tinfo, bool committed)
* Repeat one operation.
*/
static void
-snap_repeat(WT_CURSOR *cursor, TINFO *tinfo, SNAP_OPS *snap, bool rollback_allowed)
+snap_repeat(WT_CURSOR *cursor, TINFO *tinfo, SNAP_OPS *snap)
{
WT_DECL_RET;
WT_SESSION *session;
+#define MAX_RETRY_ON_ROLLBACK 1000
+ u_int max_retry;
char buf[64];
session = cursor->session;
- /*
- * Start a new transaction. Set the read timestamp. Verify the record. Discard the transaction.
- */
- wiredtiger_begin_transaction(session, "isolation=snapshot");
+ trace_op(tinfo, "repeat %" PRIu64 " ts=%" PRIu64 " {%s}", snap->keyno, snap->ts,
+ trace_bytes(tinfo, snap->vdata, snap->vsize));
- /*
- * If the timestamp has aged out of the system, we'll get EINVAL when we try and set it.
- */
+ /* Start a transaction with a read-timestamp and verify the record. */
testutil_check(__wt_snprintf(buf, sizeof(buf), "read_timestamp=%" PRIx64, snap->ts));
- ret = session->timestamp_transaction(session, buf);
- if (ret == 0) {
- trace_op(tinfo, "repeat %" PRIu64 " ts=%" PRIu64 " {%s}", snap->keyno, snap->ts,
- trace_bytes(tinfo, snap->vdata, snap->vsize));
-
- /* The only expected error is rollback. */
- ret = snap_verify(cursor, tinfo, snap);
+ for (max_retry = 0; max_retry < MAX_RETRY_ON_ROLLBACK; ++max_retry, __wt_yield()) {
+ wiredtiger_begin_transaction(session, "isolation=snapshot");
- if (ret != 0 && (!rollback_allowed || (ret != WT_ROLLBACK && ret != WT_CACHE_FULL)))
- testutil_check(ret);
- } else if (ret == EINVAL)
- snap_ts_clear(tinfo, snap->ts);
- else
+ /* EINVAL means the timestamp has aged out of the system. */
+ if ((ret = session->timestamp_transaction(session, buf)) == EINVAL) {
+ snap_ts_clear(tinfo, snap->ts);
+ break;
+ }
testutil_check(ret);
- /* Discard the transaction. */
+ /*
+ * The only expected error is rollback (as a read-only transaction, cache-full shouldn't
+ * matter to us). Persist after rollback, as a repeatable read we should succeed, yield to
+ * let eviction catch up.
+ */
+ if ((ret = snap_verify(cursor, tinfo, snap)) == 0)
+ break;
+ testutil_assert(ret == WT_ROLLBACK);
+
+ testutil_check(session->rollback_transaction(session, NULL));
+ }
+ testutil_assert(max_retry < MAX_RETRY_ON_ROLLBACK);
+
testutil_check(session->rollback_transaction(session, NULL));
}
@@ -593,7 +595,7 @@ snap_repeat_single(WT_CURSOR *cursor, TINFO *tinfo)
if (count == 0)
return;
- snap_repeat(cursor, tinfo, snap, true);
+ snap_repeat(cursor, tinfo, snap);
}
/*
@@ -626,9 +628,8 @@ snap_repeat_rollback(WT_CURSOR *cursor, TINFO **tinfo_array, size_t tinfo_count)
for (statenum = 0; statenum < WT_ELEMENTS(tinfo->snap_states); statenum++) {
state = &tinfo->snap_states[statenum];
for (snap = state->snap_state_list; snap < state->snap_state_end; ++snap) {
- if (snap->repeatable && snap->ts <= g.stable_timestamp &&
- snap->ts >= g.oldest_timestamp) {
- snap_repeat(cursor, tinfo, snap, false);
+ if (snap->repeatable && snap->ts <= g.stable_timestamp) {
+ snap_repeat(cursor, tinfo, snap);
++count;
if (count % 100 == 0) {
testutil_check(__wt_snprintf(
@@ -646,6 +647,7 @@ snap_repeat_rollback(WT_CURSOR *cursor, TINFO **tinfo_array, size_t tinfo_count)
__wt_snprintf(buf, sizeof(buf), "rollback_to_stable: %" PRIu32 " ops repeated", count));
track(buf, 0ULL, NULL);
if (count == 0) {
+#define WARN_RTS_NO_CHECK 5
if (++g.rts_no_check >= WARN_RTS_NO_CHECK)
fprintf(stderr,
"Warning: %" PRIu32 " consecutive runs with no rollback_to_stable checking\n", count);
diff --git a/src/third_party/wiredtiger/test/format/util.c b/src/third_party/wiredtiger/test/format/util.c
index f35b2a8416c..8c5efd007ee 100644
--- a/src/third_party/wiredtiger/test/format/util.c
+++ b/src/third_party/wiredtiger/test/format/util.c
@@ -282,7 +282,7 @@ timestamp_once(bool allow_lag, bool final)
/*
* If a lag is permitted, move the oldest timestamp half the way to the current
- * "all_durable" timestamp. Move the stable timestamp to "all_durable".
+ * "all_durable" timestamp. Move the stable timestamp to "all_durable".
*/
if (allow_lag)
g.oldest_timestamp = (all_durable + g.oldest_timestamp) / 2;
diff --git a/src/third_party/wiredtiger/test/format/wts.c b/src/third_party/wiredtiger/test/format/wts.c
index 3b37b3a43d1..f95d7903c94 100644
--- a/src/third_party/wiredtiger/test/format/wts.c
+++ b/src/third_party/wiredtiger/test/format/wts.c
@@ -319,10 +319,8 @@ create_object(WT_CONNECTION *conn)
CONFIG_APPEND(p, ",value_format=%" PRIu32 "t", g.c_bitcnt);
break;
case ROW:
- if (g.c_prefix_compression)
- CONFIG_APPEND(p, ",prefix_compression_min=%" PRIu32, g.c_prefix_compression_min);
- else
- CONFIG_APPEND(p, ",prefix_compression=false");
+ CONFIG_APPEND(p, ",prefix_compression=%s,prefix_compression_min=%" PRIu32,
+ g.c_prefix_compression == 0 ? "false" : "true", g.c_prefix_compression_min);
if (g.c_reverse)
CONFIG_APPEND(p, ",collator=reverse");
/* FALLTHROUGH */
@@ -351,22 +349,16 @@ create_object(WT_CONNECTION *conn)
if (g.c_compression_flag != COMPRESS_NONE)
CONFIG_APPEND(p, ",block_compressor=\"%s\"", compressor(g.c_compression_flag));
- /* Configure Btree internal key truncation. */
+ /* Configure Btree. */
CONFIG_APPEND(p, ",internal_key_truncate=%s", g.c_internal_key_truncation ? "true" : "false");
-
- /* Configure Btree page key gap. */
- CONFIG_APPEND(p, ",key_gap=%" PRIu32, g.c_key_gap);
-
- /* Configure Btree split page percentage. */
CONFIG_APPEND(p, ",split_pct=%" PRIu32, g.c_split_pct);
- /*
- * Assertions. Assertions slow down the code for additional diagnostic checking.
- */
- if (g.c_txn_timestamps && g.c_assert_commit_timestamp)
- CONFIG_APPEND(p, ",write_timestamp_usage=key_consistent,assert=(write_timestamp=on)");
- if (g.c_txn_timestamps && g.c_assert_read_timestamp)
- CONFIG_APPEND(p, ",assert=(read_timestamp=always)");
+ /* Assertions: assertions slow down the code for additional diagnostic checking. */
+ if (g.c_assert_read_timestamp)
+ CONFIG_APPEND(p, ",assert=(read_timestamp=%s)", g.c_txn_timestamps ? "always" : "never");
+ if (g.c_assert_write_timestamp)
+ CONFIG_APPEND(p, ",assert=(write_timestamp=on),write_timestamp_usage=%s",
+ g.c_txn_timestamps ? "always" : "never");
/* Configure LSM. */
if (DATASOURCE("lsm")) {
diff --git a/src/third_party/wiredtiger/test/suite/test_backup22.py b/src/third_party/wiredtiger/test/suite/test_backup22.py
new file mode 100644
index 00000000000..06d1a81ef7c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_backup22.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-present MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+import wiredtiger, os
+from wtscenario import make_scenarios
+from wtbackup import backup_base
+
+# test_backup22.py
+# Test interaction between import and incremental backup.
+# Test the functionality of importing dropped tables in incremental backup.
+#
+class test_backup22(backup_base):
+ create_config = 'allocation_size=512,key_format=i,value_format=i'
+ # Backup directory name
+ dir='backup.dir'
+ incr_dir = 'incr_backup.dir'
+ uri = 'test_backup22'
+ scenarios = make_scenarios([
+ ('import_with_metadata', dict(repair=False,checkpoint=False)),
+ ('import_repair', dict(repair=True,checkpoint=False)),
+ ('import_with_metadata_ckpt', dict(repair=False,checkpoint=True)),
+ ('import_repair_ckpt', dict(repair=True,checkpoint=True)),
+ ])
+
+ def test_import_with_open_backup_cursor(self):
+ os.mkdir(self.dir)
+ os.mkdir(self.incr_dir)
+
+ # Create and populate the table.
+ table_uri = 'table:' + self.uri
+ self.session.create(table_uri, self.create_config)
+ cursor = self.session.open_cursor(table_uri)
+ for i in range(1, 1000):
+ cursor[i] = i
+ cursor.close()
+ self.session.checkpoint()
+
+ # Export the metadata for the file.
+ file_uri = 'file:' + self.uri + '.wt'
+ c = self.session.open_cursor('metadata:', None, None)
+ original_db_table_config = c[table_uri]
+ original_db_file_config = c[file_uri]
+ c.close()
+
+ config = 'incremental=(enabled,granularity=4k,this_id="ID1")'
+ bkup_c = self.session.open_cursor('backup:', None, config)
+ self.take_full_backup(self.dir, bkup_c)
+ bkup_c.close()
+ self.session.drop(table_uri, 'remove_files=false')
+
+ # First construct the config string for the default or repair import scenario,
+ # then call create to import the table.
+ if self.repair:
+ import_config = 'import=(enabled,repair=true)'
+ else:
+ import_config = '{},import=(enabled,repair=false,file_metadata=({}))'.format(
+ original_db_table_config, original_db_file_config)
+ self.session.create(table_uri, import_config)
+
+ if self.checkpoint:
+ self.session.checkpoint()
+ # Perform incremental backup with id 2 on empty directory. We want empty directory
+ # because we expect all files to be copied over in it's entirety.
+ self.take_incr_backup(self.incr_dir, 2)
+ self.compare_backups(self.uri, self.dir, self.incr_dir)
+
+if __name__ == '__main__':
+ wttest.run()
diff --git a/src/third_party/wiredtiger/test/suite/test_prepare15.py b/src/third_party/wiredtiger/test/suite/test_prepare15.py
new file mode 100644
index 00000000000..4c4ba49a182
--- /dev/null
+++ b/src/third_party/wiredtiger/test/suite/test_prepare15.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+#
+# Public Domain 2014-present MongoDB, Inc.
+# Public Domain 2008-2014 WiredTiger, Inc.
+#
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+import wttest
+from wiredtiger import WT_NOTFOUND
+from wtscenario import make_scenarios
+
+def timestamp_str(t):
+ return '%x' % t
+
+# test_prepare15.py
+# Test that the prepare transaction rollback removes the on-disk key
+# or replace it with history store and commit retains the changes when
+# both insert and remove operations are from the same transaction.
+class test_prepare15(wttest.WiredTigerTestCase):
+ in_memory_values = [
+ ('no_inmem', dict(in_memory=False)),
+ ('inmem', dict(in_memory=True))
+ ]
+
+ key_format_values = [
+ ('column', dict(key_format='r')),
+ ('integer_row', dict(key_format='i')),
+ ]
+
+ txn_end_values = [
+ ('commit', dict(commit=True)),
+ ('rollback', dict(commit=False)),
+ ]
+
+ scenarios = make_scenarios(in_memory_values, key_format_values, txn_end_values)
+
+ def conn_config(self):
+ config = 'cache_size=50MB'
+ if self.in_memory:
+ config += ',in_memory=true'
+ else:
+ config += ',in_memory=false'
+ return config
+
+ def test_prepare_restore_hs_update(self):
+ # Prepare transactions for column store table is not yet supported.
+ if self.key_format == 'r':
+ self.skipTest('Prepare transactions for column store table is not yet supported')
+
+ # Create a table without logging.
+ uri = "table:prepare15"
+ create_config = 'allocation_size=512,key_format=S,value_format=S'
+ self.session.create(uri, create_config)
+
+ # Pin oldest and stable timestamps to 10.
+ self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
+ ',stable_timestamp=' + timestamp_str(10))
+
+ valuea = 'a'
+ valueb = 'a'
+
+ # Perform an update and remove.
+ cursor = self.session.open_cursor(uri)
+ self.session.begin_transaction()
+ cursor[str(0)] = valuea
+ self.session.commit_transaction('commit_timestamp=' + timestamp_str(20))
+
+ self.session.begin_transaction()
+ cursor.set_key(str(0))
+ cursor.remove()
+ self.session.commit_transaction('commit_timestamp=' + timestamp_str(30))
+ cursor.close()
+
+ # Perform an update and remove.
+ s = self.conn.open_session()
+ cursor = s.open_cursor(uri)
+ s.begin_transaction()
+ cursor[str(0)] = valueb
+ cursor.set_key(str(0))
+ cursor.remove()
+ cursor.close()
+ s.prepare_transaction('prepare_timestamp=' + timestamp_str(40))
+
+ # Configure debug behavior on a cursor to evict the page positioned on when the reset API is used.
+ evict_cursor = self.session.open_cursor(uri, None, "debug=(release_evict)")
+
+ # Search for the key so we position our cursor on the page that we want to evict.
+ self.session.begin_transaction('ignore_prepare = true')
+ evict_cursor.set_key(str(0))
+ self.assertEquals(evict_cursor.search(), WT_NOTFOUND)
+ evict_cursor.reset()
+ evict_cursor.close()
+ self.session.commit_transaction()
+
+ if self.commit:
+ # Commit the prepared transaction
+ s.timestamp_transaction('commit_timestamp=' + timestamp_str(50))
+ s.timestamp_transaction('durable_timestamp=' + timestamp_str(60))
+ s.commit_transaction()
+ else:
+ # Rollback the prepared transaction
+ s.rollback_transaction()
+
+ # Configure debug behavior on a cursor to evict the page positioned on when the reset API is used.
+ evict_cursor = self.session.open_cursor(uri, None, "debug=(release_evict)")
+
+ # Search for the key so we position our cursor on the page that we want to evict.
+ self.session.begin_transaction()
+ evict_cursor.set_key(str(0))
+ self.assertEquals(evict_cursor.search(), WT_NOTFOUND)
+ evict_cursor.reset()
+ evict_cursor.close()
+ self.session.commit_transaction()
+
+ self.session.begin_transaction('read_timestamp=' + timestamp_str(20))
+ cursor2 = self.session.open_cursor(uri)
+ cursor2.set_key(str(0))
+ self.assertEquals(cursor2.search(), 0)
+ self.assertEqual(cursor2.get_value(), valuea)
+ self.session.commit_transaction()
+
+ def test_prepare_not_found(self):
+ # Prepare transactions for column store table is not yet supported.
+ if self.key_format == 'r':
+ self.skipTest('Prepare transactions for column store table is not yet supported')
+
+ # Create a table without logging.
+ uri = "table:prepare15"
+ create_config = 'allocation_size=512,key_format=S,value_format=S'
+ self.session.create(uri, create_config)
+
+ # Pin oldest and stable timestamps to 10.
+ self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
+ ',stable_timestamp=' + timestamp_str(10))
+
+ value = 'a'
+
+ # Perform an update and remove.
+ s = self.conn.open_session()
+ cursor = s.open_cursor(uri)
+ s.begin_transaction()
+ cursor[str(0)] = value
+ cursor.set_key(str(0))
+ cursor.remove()
+ cursor.close()
+ s.prepare_transaction('prepare_timestamp=' + timestamp_str(20))
+
+ # Configure debug behavior on a cursor to evict the page positioned on when the reset API is used.
+ evict_cursor = self.session.open_cursor(uri, None, "debug=(release_evict)")
+
+ # Search for the key so we position our cursor on the page that we want to evict.
+ self.session.begin_transaction("ignore_prepare = true")
+ evict_cursor.set_key(str(0))
+ self.assertEquals(evict_cursor.search(), WT_NOTFOUND)
+ evict_cursor.reset()
+ evict_cursor.close()
+ self.session.commit_transaction()
+
+ if self.commit:
+ # Commit the prepared transaction
+ s.timestamp_transaction('commit_timestamp=' + timestamp_str(30))
+ s.timestamp_transaction('durable_timestamp=' + timestamp_str(40))
+ s.commit_transaction()
+ else:
+ # Rollback the prepared transaction
+ s.rollback_transaction()
+
+ # Configure debug behavior on a cursor to evict the page positioned on when the reset API is used.
+ evict_cursor = self.session.open_cursor(uri, None, "debug=(release_evict)")
+
+ # Search for the key so we position our cursor on the page that we want to evict.
+ self.session.begin_transaction()
+ evict_cursor.set_key(str(0))
+ self.assertEquals(evict_cursor.search(), WT_NOTFOUND)
+ evict_cursor.reset()
+ evict_cursor.close()
+ self.session.commit_transaction()
+
+ self.session.begin_transaction()
+ cursor2 = self.session.open_cursor(uri)
+ cursor2.set_key(str(0))
+ self.assertEquals(cursor2.search(), WT_NOTFOUND)
+ self.session.commit_transaction()
diff --git a/src/third_party/wiredtiger/test/suite/test_tiered04.py b/src/third_party/wiredtiger/test/suite/test_tiered04.py
index 0347647031f..75d4fac1b19 100755
--- a/src/third_party/wiredtiger/test/suite/test_tiered04.py
+++ b/src/third_party/wiredtiger/test/suite/test_tiered04.py
@@ -35,7 +35,7 @@ StorageSource = wiredtiger.StorageSource # easy access to constants
class test_tiered04(wttest.WiredTigerTestCase):
# If the 'uri' changes all the other names must change with it.
- fileuri = 'file:test_tiered04-0000000001.wt'
+ fileuri = 'file:test_tiered04-0000000001.wtobj'
objuri = 'object:test_tiered04-0000000001.wtobj'
tiereduri = "tiered:test_tiered04"
uri = "table:test_tiered04"
diff --git a/src/third_party/wiredtiger/test/suite/test_tiered06.py b/src/third_party/wiredtiger/test/suite/test_tiered06.py
index e0614cd8c1b..c797936a82b 100755
--- a/src/third_party/wiredtiger/test/suite/test_tiered06.py
+++ b/src/third_party/wiredtiger/test/suite/test_tiered06.py
@@ -64,7 +64,7 @@ class test_tiered06(wttest.WiredTigerTestCase):
local = self.get_local_storage_source()
os.mkdir("objects")
- fs = local.ss_customize_file_system(session, "./objects", "cluster1-", "Secret", None)
+ fs = local.ss_customize_file_system(session, "./objects", "Secret", None)
# The object doesn't exist yet.
self.assertFalse(fs.fs_exist(session, 'foobar'))
@@ -95,33 +95,29 @@ class test_tiered06(wttest.WiredTigerTestCase):
fh.fh_lock(session, False)
fh.close(session)
- self.assertEquals(fs.fs_directory_list(session, '', ''), ['foobar'])
+ # Nothing is in the directory list until a flush.
+ self.assertEquals(fs.fs_directory_list(session, '', ''), [])
- # Newly created objects are in the list.
fh = fs.fs_open_file(session, 'zzz', FileSystem.open_file_type_data, FileSystem.open_create)
- # TODO: tiered: the newly created file should be visible, but it is not yet.
- # self.assertEquals(sorted(fs.fs_directory_list(session, '', '')), ['foobar', 'zzz' ])
-
# Sync merely syncs to the local disk.
fh.fh_sync(session)
fh.close(session) # zero length
- self.assertEquals(sorted(fs.fs_directory_list(session, '', '')), ['foobar', 'zzz' ])
+ self.assertEquals(sorted(fs.fs_directory_list(session, '', '')), [])
# See that we can rename objects.
fs.fs_rename(session, 'zzz', 'yyy', 0)
- self.assertEquals(sorted(fs.fs_directory_list(session, '', '')), ['foobar', 'yyy' ])
+ self.assertEquals(sorted(fs.fs_directory_list(session, '', '')), [])
# See that we can remove objects.
fs.fs_remove(session, 'yyy', 0)
- self.assertEquals(fs.fs_directory_list(session, '', ''), ['foobar'])
- # TODO: tiered: flush tests disabled, as the interface
- # for flushing will be changed.
- return
+ # Nothing is in the directory list until a flush.
+ self.assertEquals(fs.fs_directory_list(session, '', ''), [])
- # Flushing doesn't do anything that's visible.
- local.ss_flush(session, fs, None, '')
+ # Flushing moves the file.
+ local.ss_flush(session, fs, 'foobar', 'foobar', None)
+ local.ss_flush_finish(session, fs, 'foobar', 'foobar', None)
self.assertEquals(fs.fs_directory_list(session, '', ''), ['foobar'])
# Files that have been flushed cannot be manipulated.
@@ -145,7 +141,7 @@ class test_tiered06(wttest.WiredTigerTestCase):
local = self.get_local_storage_source()
os.mkdir("objects")
- fs = local.ss_customize_file_system(session, "./objects", "cluster1-", "Secret", None)
+ fs = local.ss_customize_file_system(session, "./objects", "Secret", None)
# We call these 4K chunks of data "blocks" for this test, but that doesn't
# necessarily relate to WT block sizing.
@@ -208,11 +204,23 @@ class test_tiered06(wttest.WiredTigerTestCase):
cachedir1 = "./cache1"
cachedir2 = "./cache2"
- def check(self, fs, prefix, expect):
+ # Add a suffix to each in a list
+ def suffix(self, lst, sfx):
+ return [x + '.' + sfx for x in lst]
+
+ def check_dirlist(self, fs, prefix, expect):
# We don't require any sorted output for directory lists,
# so we'll sort before comparing.'
got = sorted(fs.fs_directory_list(self.session, '', prefix))
- expect = sorted(expect)
+ expect = sorted(self.suffix(expect, 'wtobj'))
+ self.assertEquals(got, expect)
+
+ # Check for data files in the WiredTiger home directory.
+ def check_home(self, expect):
+ # Get list of all .wt files in home, prune out the WiredTiger produced ones
+ got = sorted(list(os.listdir(self.home)))
+ got = [x for x in got if not x.startswith('WiredTiger') and x.endswith('.wt')]
+ expect = sorted(self.suffix(expect, 'wt'))
self.assertEquals(got, expect)
# Check that objects are "in the cloud" after a flush.
@@ -220,12 +228,25 @@ class test_tiered06(wttest.WiredTigerTestCase):
# objectdir1 or objectdir2
def check_objects(self, expect1, expect2):
got = sorted(list(os.listdir(self.objectdir1)))
- expect = sorted(expect1)
+ expect = sorted(self.suffix(expect1, 'wtobj'))
self.assertEquals(got, expect)
got = sorted(list(os.listdir(self.objectdir2)))
- expect = sorted(expect2)
+ expect = sorted(self.suffix(expect2, 'wtobj'))
self.assertEquals(got, expect)
+ # Check that objects are in the cache directory after flush_finish.
+ def check_caches(self, expect1, expect2):
+ got = sorted(list(os.listdir(self.cachedir1)))
+ expect = sorted(self.suffix(expect1, 'wtobj'))
+ self.assertEquals(got, expect)
+ got = sorted(list(os.listdir(self.cachedir2)))
+ expect = sorted(self.suffix(expect2, 'wtobj'))
+ self.assertEquals(got, expect)
+
+ def create_wt_file(self, name):
+ with open(name + '.wt', 'w') as f:
+ f.write('hello')
+
def test_local_file_systems(self):
# Test using various buckets, hosts
@@ -244,11 +265,11 @@ class test_tiered06(wttest.WiredTigerTestCase):
errmsg = '/No such file or directory/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: local.ss_customize_file_system(
- session, "./objects1", "pre1-", "k1", bad_config), errmsg)
+ session, "./objects1", "k1", bad_config), errmsg)
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: local.ss_customize_file_system(
- session, "./objects_BAD", "pre1-", "k1", config1), errmsg)
+ session, "./objects_BAD", "k1", config1), errmsg)
# Create an empty file, try to use it as a directory.
with open("some_file", "w"):
@@ -256,143 +277,75 @@ class test_tiered06(wttest.WiredTigerTestCase):
errmsg = '/Invalid argument/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: local.ss_customize_file_system(
- session, "some_file", "pre1-", "k1", config1), errmsg)
+ session, "some_file", "k1", config1), errmsg)
# Now create some file systems that should succeed.
# Use either different bucket directories or different prefixes,
# so activity that happens in the various file systems should be independent.
- fs1 = local.ss_customize_file_system(session, "./objects1", "pre1-", "k1", config1)
- fs2 = local.ss_customize_file_system(session, "./objects2", "pre1-", "k2", config2)
- fs3 = local.ss_customize_file_system(session, "./objects1", "pre2-", "k3", config1)
- fs4 = local.ss_customize_file_system(session, "./objects2", "pre2-", "k4", config2)
-
- # Create files in the file systems with some name overlap
- self.create_with_fs(fs1, 'alpaca')
- self.create_with_fs(fs2, 'bear')
- self.create_with_fs(fs3, 'crab')
- self.create_with_fs(fs4, 'deer')
+ fs1 = local.ss_customize_file_system(session, "./objects1", "k1", config1)
+ fs2 = local.ss_customize_file_system(session, "./objects2", "k2", config2)
+
+ # Create files in the wt home directory.
for a in ['beagle', 'bird', 'bison', 'bat']:
- self.create_with_fs(fs1, a)
- for a in ['bird', 'bison', 'bat', 'badger']:
- self.create_with_fs(fs2, a)
- for a in ['bison', 'bat', 'badger', 'baboon']:
- self.create_with_fs(fs3, a)
- for a in ['bat', 'badger', 'baboon', 'beagle']:
- self.create_with_fs(fs4, a)
-
- # Make sure we see the expected file names
- self.check(fs1, '', ['alpaca', 'beagle', 'bird', 'bison', 'bat'])
- self.check(fs1, 'a', ['alpaca'])
- self.check(fs1, 'b', ['beagle', 'bird', 'bison', 'bat'])
- self.check(fs1, 'c', [])
- self.check(fs1, 'd', [])
-
- self.check(fs2, '', ['bear', 'bird', 'bison', 'bat', 'badger'])
- self.check(fs2, 'a', [])
- self.check(fs2, 'b', ['bear', 'bird', 'bison', 'bat', 'badger'])
- self.check(fs2, 'c', [])
- self.check(fs2, 'd', [])
-
- self.check(fs3, '', ['crab', 'bison', 'bat', 'badger', 'baboon'])
- self.check(fs3, 'a', [])
- self.check(fs3, 'b', ['bison', 'bat', 'badger', 'baboon'])
- self.check(fs3, 'c', ['crab'])
- self.check(fs3, 'd', [])
-
- self.check(fs4, '', ['deer', 'bat', 'badger', 'baboon', 'beagle'])
- self.check(fs4, 'a', [])
- self.check(fs4, 'b', ['bat', 'badger', 'baboon', 'beagle'])
- self.check(fs4, 'c', [])
- self.check(fs4, 'd', ['deer'])
-
- # Flushing copies files to one of the subdirectories:
- # "./objects1" (for fs1 and fs3)
- # "./objects2" (for fs2 and fs4)
- #
- # After every flush, we'll check that the right objects appear in the right directory.
- # check_objects takes two lists: objects expected to be in ./objects1,
- # and objects expected to be in ./objects2 .
+ self.create_wt_file(a)
+ for a in ['cat', 'cougar', 'coyote', 'cub']:
+ self.create_wt_file(a)
+
+ # Everything is in wt home, nothing in the file system yet.
+ self.check_home(['beagle', 'bird', 'bison', 'bat', 'cat', 'cougar', 'coyote', 'cub'])
+ self.check_dirlist(fs1, '', [])
+ self.check_dirlist(fs2, '', [])
+ self.check_caches([], [])
self.check_objects([], [])
- # TODO: tiered: flush tests disabled, as the interface
- # for flushing will be changed.
- enable_fs_flush_tests = False
- if enable_fs_flush_tests:
- local.ss_flush(session, fs4, None, '')
- self.check_objects([], ['pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle'])
-
- local.ss_flush(session, fs3, 'badger', '')
- self.check_objects(['pre2-badger'],
- ['pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle'])
-
- #local.ss_flush(session, fs3, 'c', '') # make sure we don't flush prefixes
- self.check_objects(['pre2-badger'],
- ['pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle'])
-
- local.ss_flush(session, fs3, 'b', '') # or suffixes
- self.check_objects(['pre2-badger'],
- ['pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle'])
-
- local.ss_flush(session, fs3, 'crab', '')
- self.check_objects(['pre2-crab', 'pre2-badger'],
- ['pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle'])
-
- local.ss_flush(session, fs3, 'crab', '') # should do nothing
- self.check_objects(['pre2-crab', 'pre2-badger'],
- ['pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle'])
-
- local.ss_flush(session, None, None, '') # flush everything else
- self.check_objects(['pre1-alpaca', 'pre1-beagle', 'pre1-bird', 'pre1-bison', 'pre1-bat',
- 'pre2-crab', 'pre2-bison', 'pre2-bat', 'pre2-badger', 'pre2-baboon'],
- ['pre1-bear', 'pre1-bird', 'pre1-bison', 'pre1-bat', 'pre1-badger',
- 'pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle'])
-
- local.ss_flush(session, None, None, '') # should do nothing
- self.check_objects(['pre1-alpaca', 'pre1-beagle', 'pre1-bird', 'pre1-bison', 'pre1-bat',
- 'pre2-crab', 'pre2-bison', 'pre2-bat', 'pre2-badger', 'pre2-baboon'],
- ['pre1-bear', 'pre1-bird', 'pre1-bison', 'pre1-bat', 'pre1-badger',
- 'pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle'])
-
- self.create_with_fs(fs4, 'zebra') # should do nothing in the objects directories
- self.create_with_fs(fs4, 'yeti') # should do nothing in the objects directories
- self.check_objects(['pre1-alpaca', 'pre1-beagle', 'pre1-bird', 'pre1-bison', 'pre1-bat',
- 'pre2-crab', 'pre2-bison', 'pre2-bat', 'pre2-badger', 'pre2-baboon'],
- ['pre1-bear', 'pre1-bird', 'pre1-bison', 'pre1-bat', 'pre1-badger',
- 'pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle'])
-
- # Try remove and rename, should be possible until we flush
- self.check(fs4, '', ['deer', 'bat', 'badger', 'baboon', 'beagle', 'yeti', 'zebra'])
- fs4.fs_remove(session, 'yeti', 0)
- self.check(fs4, '', ['deer', 'bat', 'badger', 'baboon', 'beagle', 'zebra'])
- fs4.fs_rename(session, 'zebra', 'okapi', 0)
- self.check(fs4, '', ['deer', 'bat', 'badger', 'baboon', 'beagle', 'okapi'])
- local.ss_flush(session, None, None, '')
- self.check(fs4, '', ['deer', 'bat', 'badger', 'baboon', 'beagle', 'okapi'])
- self.check_objects(['pre1-alpaca', 'pre1-beagle', 'pre1-bird', 'pre1-bison', 'pre1-bat',
- 'pre2-crab', 'pre2-bison', 'pre2-bat', 'pre2-badger', 'pre2-baboon'],
- ['pre1-bear', 'pre1-bird', 'pre1-bison', 'pre1-bat', 'pre1-badger',
- 'pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle',
- 'pre2-okapi'])
-
- errmsg = '/rename of flushed file not allowed/'
- self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
- lambda: fs4.fs_rename(session, 'okapi', 'zebra', 0), errmsg)
-
- # XXX
- # At the moment, removal of flushed files is not allowed - as flushed files are immutable.
- # We may need to explicitly evict flushed files from cache directory via the API, if so,
- # the API to do that might be on the local store object, not the file system.
- errmsg = '/remove of flushed file not allowed/'
- self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
- lambda: fs4.fs_remove(session, 'okapi', 0), errmsg)
-
- # No change since last time.
- self.check(fs4, '', ['deer', 'bat', 'badger', 'baboon', 'beagle', 'okapi'])
- self.check_objects(['pre1-alpaca', 'pre1-beagle', 'pre1-bird', 'pre1-bison', 'pre1-bat',
- 'pre2-crab', 'pre2-bison', 'pre2-bat', 'pre2-badger', 'pre2-baboon'],
- ['pre1-bear', 'pre1-bird', 'pre1-bison', 'pre1-bat', 'pre1-badger',
- 'pre2-deer', 'pre2-bat', 'pre2-badger', 'pre2-baboon', 'pre2-beagle',
- 'pre2-okapi'])
+ # A flush copies to the cloud, nothing is removed.
+ local.ss_flush(session, fs1, 'beagle.wt', 'beagle.wtobj')
+ self.check_home(['beagle', 'bird', 'bison', 'bat', 'cat', 'cougar', 'coyote', 'cub'])
+ self.check_dirlist(fs1, '', [])
+ self.check_dirlist(fs2, '', [])
+ self.check_caches([], [])
+ self.check_objects(['beagle'], [])
+
+ # Bad file to flush
+ errmsg = '/No such file/'
+ self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
+ lambda: local.ss_flush(session, fs1, 'bad.wt', 'bad.wtobj'), errmsg)
+
+ # It's okay to flush again, nothing changes
+ local.ss_flush(session, fs1, 'beagle.wt', 'beagle.wtobj')
+ self.check_home(['beagle', 'bird', 'bison', 'bat', 'cat', 'cougar', 'coyote', 'cub'])
+ self.check_dirlist(fs1, '', [])
+ self.check_dirlist(fs2, '', [])
+ self.check_caches([], [])
+ self.check_objects(['beagle'], [])
+
+ # When we flush_finish, the local file will move to the cache directory
+ local.ss_flush_finish(session, fs1, 'beagle.wt', 'beagle.wtobj')
+ self.check_home(['bird', 'bison', 'bat', 'cat', 'cougar', 'coyote', 'cub'])
+ self.check_dirlist(fs1, '', ['beagle'])
+ self.check_dirlist(fs2, '', [])
+ self.check_caches(['beagle'], [])
+ self.check_objects(['beagle'], [])
+
+ # Do a some more in each file ssytem
+ local.ss_flush(session, fs1, 'bison.wt', 'bison.wtobj')
+ local.ss_flush(session, fs2, 'cat.wt', 'cat.wtobj')
+ local.ss_flush(session, fs1, 'bat.wt', 'bat.wtobj')
+ local.ss_flush_finish(session, fs2, 'cat.wt', 'cat.wtobj')
+ local.ss_flush(session, fs2, 'cub.wt', 'cub.wtobj')
+ local.ss_flush_finish(session, fs1, 'bat.wt', 'bat.wtobj')
+
+ self.check_home(['bird', 'bison', 'cougar', 'coyote', 'cub'])
+ self.check_dirlist(fs1, '', ['beagle', 'bat'])
+ self.check_dirlist(fs2, '', ['cat'])
+ self.check_caches(['beagle', 'bat'], ['cat'])
+ self.check_objects(['beagle', 'bat', 'bison'], ['cat', 'cub'])
+
+ # Test directory listing prefixes
+ self.check_dirlist(fs1, '', ['beagle', 'bat'])
+ self.check_dirlist(fs1, 'ba', ['bat'])
+ self.check_dirlist(fs1, 'be', ['beagle'])
+ self.check_dirlist(fs1, 'x', [])
if __name__ == '__main__':
wttest.run()