summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuke Chen <luke.chen@mongodb.com>2020-06-15 17:27:50 +1000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-06-15 08:24:01 +0000
commitcac9d7cc518bea3da884fb84aef9f559e91c76bf (patch)
tree8fa52d3c4f0c7ba81de45f7a5f2562b138644374
parentb2f840c9efca29820fb48a89741fa18ace8a33b1 (diff)
downloadmongo-cac9d7cc518bea3da884fb84aef9f559e91c76bf.tar.gz
Import wiredtiger: 930bbacc3761a10483875585dbd4ecb58271d57e from branch mongodb-4.4
ref: f650b1124b..930bbacc37 for: 4.5.1 WT-6344 Clean-up timestamped updates to cater for globally visible full updates WT-6408 test/format bulk load can set an incorrect row count WT-6413 Remove globally visible check in __wt_checkpoint_close WT-6414 Block running rebalance with timestamp set in test format WT-6418 Account for aborted updates when doing the first scan of updates before inserting them to the history store WT-6419 Make sure we dump core on Evergreen PPC machines
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/src/history/hs.c24
-rw-r--r--src/third_party/wiredtiger/src/txn/txn_ckpt.c12
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen.yml5
-rw-r--r--src/third_party/wiredtiger/test/format/bulk.c45
-rw-r--r--src/third_party/wiredtiger/test/format/config.c6
-rwxr-xr-xsrc/third_party/wiredtiger/test/format/smoke.sh3
7 files changed, 58 insertions, 39 deletions
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index c657e8107df..3c04510d191 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-4.4",
- "commit": "f650b1124b18cb4bccd61ca822ed19157206cc7e"
+ "commit": "930bbacc3761a10483875585dbd4ecb58271d57e"
}
diff --git a/src/third_party/wiredtiger/src/history/hs.c b/src/third_party/wiredtiger/src/history/hs.c
index 8dd46f6889e..03d36bf5d07 100644
--- a/src/third_party/wiredtiger/src/history/hs.c
+++ b/src/third_party/wiredtiger/src/history/hs.c
@@ -615,7 +615,7 @@ __wt_hs_insert_updates(WT_SESSION_IMPL *session, WT_PAGE *page, WT_MULTI *multi)
WT_MODIFY entries[MAX_REVERSE_MODIFY_NUM];
WT_MODIFY_VECTOR modifies;
WT_SAVE_UPD *list;
- WT_UPDATE *first_non_ts_upd, *oldest_upd, *prev_upd, *upd;
+ WT_UPDATE *first_non_ts_upd, *non_aborted_upd, *oldest_upd, *prev_upd, *upd;
WT_HS_TIME_POINT stop_time_point;
wt_off_t hs_size;
uint64_t insert_cnt, max_hs_size;
@@ -689,7 +689,7 @@ __wt_hs_insert_updates(WT_SESSION_IMPL *session, WT_PAGE *page, WT_MULTI *multi)
* newer than a TOMBSTONE must be a full update.
*
* The algorithm walks from the oldest update, or the most recently inserted into history
- * store update. To the newest update and build full updates along the way. It sets the stop
+ * store update, to the newest update and build full updates along the way. It sets the stop
* time point of the update to the start time point of the next update, squashes the updates
* that are from the same transaction and of the same start timestamp, calculates reverse
* modification if prev_upd is a MODIFY, and inserts the update to the history store.
@@ -707,10 +707,13 @@ __wt_hs_insert_updates(WT_SESSION_IMPL *session, WT_PAGE *page, WT_MULTI *multi)
* tombstone.
* 4) We have a single tombstone on the chain, it is simply ignored.
*/
- for (prev_upd = NULL; upd != NULL; prev_upd = upd, upd = upd->next) {
+ for (non_aborted_upd = prev_upd = NULL; upd != NULL;
+ prev_upd = non_aborted_upd, upd = upd->next) {
if (upd->txnid == WT_TXN_ABORTED)
continue;
+ non_aborted_upd = upd;
+
WT_ERR(__wt_modify_vector_push(&modifies, upd));
/*
@@ -752,6 +755,21 @@ __wt_hs_insert_updates(WT_SESSION_IMPL *session, WT_PAGE *page, WT_MULTI *multi)
prev_upd = upd = NULL;
+ /*
+ * Trim from the end until there is a full update. We need this if we are dealing with
+ * updates without timestamps, and there are timestamped modify updates at the end of update
+ * chain that are not relevant due to newer full updates without timestamps.
+ */
+ for (; modifies.size > 0;) {
+ __wt_modify_vector_peek(&modifies, &upd);
+ if (upd->type == WT_UPDATE_MODIFY) {
+ WT_ASSERT(session, F_ISSET(upd, WT_UPDATE_MASKED_BY_NON_TS_UPDATE));
+ __wt_modify_vector_pop(&modifies, &upd);
+ } else
+ break;
+ }
+ upd = NULL;
+
/* Construct the oldest full update. */
WT_ASSERT(session, modifies.size > 0);
diff --git a/src/third_party/wiredtiger/src/txn/txn_ckpt.c b/src/third_party/wiredtiger/src/txn/txn_ckpt.c
index f8914a33dac..50df011eb0b 100644
--- a/src/third_party/wiredtiger/src/txn/txn_ckpt.c
+++ b/src/third_party/wiredtiger/src/txn/txn_ckpt.c
@@ -1831,15 +1831,9 @@ __wt_checkpoint_close(WT_SESSION_IMPL *session, bool final)
if (final && !metadata)
return (__wt_evict_file(session, WT_SYNC_DISCARD));
- /*
- * If closing an unmodified file, check that no update is required for active readers.
- */
- if (!btree->modified && !bulk) {
- WT_RET(__wt_txn_update_oldest(session, WT_TXN_OLDEST_STRICT | WT_TXN_OLDEST_WAIT));
- return (__wt_txn_visible_all(session, btree->rec_max_txn, btree->rec_max_timestamp) ?
- __wt_evict_file(session, WT_SYNC_DISCARD) :
- EBUSY);
- }
+ /* Closing an unmodified file. */
+ if (!btree->modified && !bulk)
+ return (__wt_evict_file(session, WT_SYNC_DISCARD));
/*
* Don't flush data from modified trees independent of system-wide checkpoint when either there
diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml
index b3b16040e9d..9bf45be39dd 100755
--- a/src/third_party/wiredtiger/test/evergreen.yml
+++ b/src/third_party/wiredtiger/test/evergreen.yml
@@ -171,8 +171,8 @@ functions:
script: |
set -o errexit
set -o verbose
+ ${format_test_setting|}
for i in $(seq ${times|1}); do
- ${format_test_setting|}
${test_env_vars|} ./format.sh ${smp_command|} ${format_test_script_args|} 2>&1
done
"many dbs test":
@@ -2204,8 +2204,6 @@ tasks:
- func: "compile wiredtiger with builtins"
- func: "format test script"
vars:
- # Make sure we dump core on failure
- format_test_setting: ulimit -c unlimited
#run for 2 hours ( 2 * 60 = 120 minutes), use default config
format_test_script_args: -e "SEGFAULT_SIGNALS=all" -b "catchsegv ./t" -t 120
@@ -2536,6 +2534,7 @@ buildvariants:
run_on:
- ubuntu1804-power8-test
expansions:
+ format_test_setting: ulimit -c unlimited
smp_command: -j $(grep -c ^processor /proc/cpuinfo)
make_command: PATH=/opt/mongodbtoolchain/v3/bin:$PATH make
test_env_vars:
diff --git a/src/third_party/wiredtiger/test/format/bulk.c b/src/third_party/wiredtiger/test/format/bulk.c
index d11d0060664..89e0406c3cb 100644
--- a/src/third_party/wiredtiger/test/format/bulk.c
+++ b/src/third_party/wiredtiger/test/format/bulk.c
@@ -80,7 +80,7 @@ wts_load(void)
WT_DECL_RET;
WT_ITEM key, value;
WT_SESSION *session;
- uint32_t keyno;
+ uint32_t committed_keyno, keyno, v;
bool is_bulk;
conn = g.wts_conn;
@@ -112,19 +112,7 @@ wts_load(void)
if (g.c_txn_timestamps)
bulk_begin_transaction(session);
- for (keyno = 0; ++keyno <= g.c_rows;) {
- /* Do some checking every 10K operations. */
- if (keyno % 10000 == 0) {
- /* Report on progress. */
- track("bulk load", keyno, NULL);
-
- /* Restart the enclosing transaction so we don't overflow the cache. */
- if (g.c_txn_timestamps) {
- bulk_commit_transaction(session);
- bulk_begin_transaction(session);
- }
- }
-
+ for (committed_keyno = keyno = 0; ++keyno <= g.c_rows;) {
key_gen(&key, keyno);
val_gen(NULL, &value, keyno);
@@ -176,21 +164,34 @@ wts_load(void)
g.c_delete_pct += g.c_insert_pct - 5;
g.c_insert_pct = 5;
}
- g.c_delete_pct += g.c_write_pct / 2;
- g.c_write_pct = g.c_write_pct / 2;
+ v = g.c_write_pct / 2;
+ g.c_delete_pct += v;
+ g.c_write_pct -= v;
break;
}
+
+ /* Restart the enclosing transaction every 5K operations so we don't overflow the cache. */
+ if (keyno % 5000 == 0) {
+ /* Report on progress. */
+ track("bulk load", keyno, NULL);
+
+ if (g.c_txn_timestamps) {
+ bulk_commit_transaction(session);
+ committed_keyno = keyno;
+ bulk_begin_transaction(session);
+ }
+ }
}
/*
- * We may have exited the loop early, reset our counters to match our insert count. If the count
- * changed, rewrite the CONFIG file so reopens aren't surprised.
+ * Ideally, the insert loop runs until the number of rows plus one, in which case row counts are
+ * correct. If the loop exited early, reset the counters and rewrite the CONFIG file (so reopens
+ * aren't surprised).
*/
- --keyno;
- if (g.rows != keyno) {
- g.rows = keyno;
- g.c_rows = (uint32_t)keyno;
+ if (keyno != g.c_rows + 1) {
+ g.rows = committed_keyno;
+ g.c_rows = (uint32_t)committed_keyno;
config_print(false);
}
diff --git a/src/third_party/wiredtiger/test/format/config.c b/src/third_party/wiredtiger/test/format/config.c
index e2f933526bf..1db9cc5854c 100644
--- a/src/third_party/wiredtiger/test/format/config.c
+++ b/src/third_party/wiredtiger/test/format/config.c
@@ -930,6 +930,12 @@ config_transaction(void)
if (g.c_txn_freq != 100 && config_is_perm("transaction.frequency"))
testutil_die(EINVAL, "timestamps require transaction frequency set to 100");
}
+ /* FIXME-WT-6410: temporarily disable rebalance with timestamps. */
+ if (g.c_txn_timestamps && g.c_rebalance) {
+ if (config_is_perm("ops.rebalance"))
+ testutil_die(EINVAL, "rebalance cannot run with timestamps");
+ config_single("ops.rebalance=off", false);
+ }
if (g.c_isolation_flag == ISOLATION_SNAPSHOT && config_is_perm("transaction.isolation")) {
if (!g.c_txn_timestamps && config_is_perm("transaction.timestamps"))
testutil_die(EINVAL, "snapshot isolation requires timestamps");
diff --git a/src/third_party/wiredtiger/test/format/smoke.sh b/src/third_party/wiredtiger/test/format/smoke.sh
index 067ef8e2589..82162874d9e 100755
--- a/src/third_party/wiredtiger/test/format/smoke.sh
+++ b/src/third_party/wiredtiger/test/format/smoke.sh
@@ -18,4 +18,5 @@ args="$args runs.threads=4 "
$TEST_WRAPPER ./t $args runs.type=row
# Force a rebalance to occur with statistics logging to test the utility
-$TEST_WRAPPER ./t $args runs.type=row statistics.server=1 ops.rebalance=1
+# FIXME-WT-6410: temporarily disable running rebalance with timestamps
+# $TEST_WRAPPER ./t $args runs.type=row statistics.server=1 ops.rebalance=1