summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorA. Jesse Jiryu Davis <jesse@mongodb.com>2019-03-29 17:48:58 -0400
committerA. Jesse Jiryu Davis <jesse@mongodb.com>2019-04-08 11:44:59 -0400
commitf8f872e029ba3b1f32d8499c912756d48dc1a03b (patch)
tree8e024a7746bfbf12f296b0bcb45ab73a17779922 /jstests
parent612457dcfc3ba939dc8474f227f9166520a4a583 (diff)
downloadmongo-f8f872e029ba3b1f32d8499c912756d48dc1a03b.tar.gz
SERVER-36494 Test that active txn entries aren't truncated
Add tests for initial sync, recovery, and the inMemory storage engine. Also, avoid taking a global X lock in replSetReconfig, we only need IX.
Diffstat (limited to 'jstests')
-rw-r--r--jstests/core/txns/libs/prepare_helpers.js66
-rw-r--r--jstests/replsets/initial_sync_preserves_active_txns.js76
-rw-r--r--jstests/replsets/inmemory_preserves_active_txns.js110
-rw-r--r--jstests/replsets/recovery_preserves_active_txns.js89
-rw-r--r--jstests/replsets/rollover_preserves_active_txns.js53
5 files changed, 352 insertions, 42 deletions
diff --git a/jstests/core/txns/libs/prepare_helpers.js b/jstests/core/txns/libs/prepare_helpers.js
index 4d87fd9ee07..95efbc8bbea 100644
--- a/jstests/core/txns/libs/prepare_helpers.js
+++ b/jstests/core/txns/libs/prepare_helpers.js
@@ -62,9 +62,75 @@ const PrepareHelpers = (function() {
return session;
}
+ const oplogSizeMB = 1;
+ const oplogSizeBytes = oplogSizeMB * 1024 * 1024;
+ const tenKB = new Array(10 * 1024).join("a");
+
+ /**
+ * Writes until the oplog exceeds its configured maximum, proving that the node keeps as much
+ * oplog as necessary to preserve entries for the oldest active transaction.
+ */
+ function growOplogPastMaxSize(replSet) {
+ const primary = replSet.getPrimary();
+ const oplog = primary.getDB("local").oplog.rs;
+ assert.lte(oplog.dataSize(), oplogSizeBytes);
+ const coll = primary.getDB("growOplogPastMaxSize").growOplogPastMaxSize;
+ const numNodes = replSet.nodeList().length;
+ while (oplog.dataSize() <= 2 * oplogSizeBytes) {
+ assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}}));
+ }
+
+ print(`Oplog on ${primary} dataSize = ${oplog.dataSize()}`);
+ }
+
+ /**
+ * Waits for the oplog to be truncated, proving that once a transaction finishes its oplog
+ * entries can be reclaimed.
+ */
+ function awaitOplogTruncation(replSet) {
+ print(`Waiting for oplog to shrink to ${oplogSizeMB} MB`);
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ const secondary = replSet.getSecondary();
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+
+ // Old entries are reclaimed when oplog size reaches new milestone. With a 1MB oplog,
+ // milestones are every 0.1 MB (see WiredTigerRecordStore::OplogStones::OplogStones) so
+ // write about 0.2 MB to be certain.
+ print("Add writes after transaction finished to trigger oplog reclamation");
+ const tenKB = new Array(10 * 1024).join("a");
+ const coll = primary.getDB("awaitOplogTruncation").awaitOplogTruncation;
+ const numNodes = replSet.nodeList().length;
+ for (let i = 0; i < 20; i++) {
+ assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}}));
+ }
+
+ for (let [nodeName, oplog] of[["primary", primaryOplog], ["secondary", secondaryOplog]]) {
+ assert.soon(function() {
+ const dataSize = oplog.dataSize();
+ const prepareEntryRemoved = (oplog.findOne({prepare: true}) === null);
+ print(`${nodeName} oplog dataSize: ${dataSize},` +
+ ` prepare entry removed: ${prepareEntryRemoved}`);
+
+ // The oplog milestone system allows the oplog to grow to 110% its max size.
+ if (dataSize < 1.1 * oplogSizeBytes && prepareEntryRemoved) {
+ return true;
+ }
+
+ assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}}));
+ return false;
+ }, `waiting for ${nodeName} oplog reclamation`, ReplSetTest.kDefaultTimeoutMS, 1000);
+ }
+ }
+
return {
prepareTransaction: prepareTransaction,
commitTransaction: commitTransaction,
createSessionWithGivenId: createSessionWithGivenId,
+ oplogSizeMB: oplogSizeMB,
+ oplogSizeBytes: oplogSizeBytes,
+ replSetStartSetOptions: {oplogSize: oplogSizeMB},
+ growOplogPastMaxSize: growOplogPastMaxSize,
+ awaitOplogTruncation: awaitOplogTruncation
};
})();
diff --git a/jstests/replsets/initial_sync_preserves_active_txns.js b/jstests/replsets/initial_sync_preserves_active_txns.js
new file mode 100644
index 00000000000..4e9f1ad1dcb
--- /dev/null
+++ b/jstests/replsets/initial_sync_preserves_active_txns.js
@@ -0,0 +1,76 @@
+/**
+ * When the oplog size grows during initial sync to exceed the configured maximum, the node must
+ * truncate the oplog only up to the oldest active transaction timestamp at the time of the last
+ * stable checkpoint. The first oplog entry that belongs to an active transaction is preserved, and
+ * all entries after it.
+ *
+ * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime"
+ * field of documents in the config.transactions collection.
+ *
+ * @tags: [uses_transactions, uses_prepare_transaction]
+ */
+
+(function() {
+ "use strict";
+ load("jstests/core/txns/libs/prepare_helpers.js");
+
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: 1
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ const txnEntry = primary.getDB("config").transactions.findOne();
+ assert.eq(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ jsTestLog("Find prepare oplog entry");
+
+ const oplogEntry = primaryOplog.findOne({prepare: true});
+ assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
+
+ jsTestLog("Add a secondary node");
+
+ const secondary = replSet.add({rsConfig: {votes: 0, priority: 0}});
+ replSet.reInitiate();
+
+ jsTestLog("Reinitiated, awaiting secondary node");
+
+ replSet.awaitSecondaryNodes();
+
+ jsTestLog("Checking secondary oplog and config.transactions");
+
+ // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.gt(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ const secondaryOplogEntry = secondaryOplog.findOne({prepare: true});
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+
+ // TODO(SERVER-36492): commit or abort, await oplog truncation
+ // See recovery_preserves_active_txns.js for example.
+ // Until then, skip validation, the stashed transaction's lock prevents validation.
+ TestData.skipCheckDBHashes = true;
+ replSet.stopSet(undefined, undefined, {skipValidation: true});
+})();
diff --git a/jstests/replsets/inmemory_preserves_active_txns.js b/jstests/replsets/inmemory_preserves_active_txns.js
new file mode 100644
index 00000000000..cac6c5e0bf4
--- /dev/null
+++ b/jstests/replsets/inmemory_preserves_active_txns.js
@@ -0,0 +1,110 @@
+/**
+ * When a primary's oplog size exceeds the configured maximum, it must truncate the oplog only up to
+ * the oldest active transaction timestamp at the time of the last stable checkpoint. The first
+ * oplog entry that belongs to an active transaction is preserved, and all entries after it. A
+ * secondary with the inMemory storage engine must treat its own oplog the same.
+ *
+ * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime"
+ * field of documents in the config.transactions collection.
+ *
+ * @tags: [uses_transactions, uses_prepare_transaction]
+ */
+
+(function() {
+ "use strict";
+ load("jstests/core/txns/libs/prepare_helpers.js");
+
+ // If the test runner passed --storageEngine=inMemory then we know inMemory is compiled into the
+ // server. We'll actually use both inMemory and wiredTiger storage engines.
+ const storageEngine = jsTest.options().storageEngine;
+ if (storageEngine !== 'inMemory') {
+ jsTestLog(`Skip test: storageEngine == "${storageEngine}", not "inMemory"`);
+ return;
+ }
+
+ // A new replica set for both the commit and abort tests to ensure the same clean state.
+ function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions:
+ {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [
+ {storageEngine: "wiredTiger"},
+ // inMemory node must not be a voter, otherwise lastCommitted never advances
+ {storageEngine: "inMemory", rsConfig: {priority: 0, votes: 0}},
+ ],
+ waitForKeys: false
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+ const primary = replSet.getPrimary();
+ const secondary = replSet.getSecondary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ jsTestLog("Get transaction entry from config.transactions");
+
+ const txnEntry = primary.getDB("config").transactions.findOne();
+ assert.eq(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
+
+ assert.soonNoExcept(() => {
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+ return true;
+ });
+
+ jsTestLog("Find prepare oplog entry");
+
+ const oplogEntry = primaryOplog.findOne({prepare: true});
+ assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
+ // Must already be written on secondary, since the config.transactions entry is.
+ const secondaryOplogEntry = secondaryOplog.findOne({prepare: true});
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ jsTestLog(
+ `Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
+
+ assert.eq(oplogEntry, primaryOplog.findOne({prepare: true}));
+ assert.soon(() => {
+ return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
+ });
+ assert.eq(oplogEntry, secondaryOplog.findOne({prepare: true}));
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransactionAfterPrepareTS(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ session.abortTransaction_forTesting();
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
+ }
+
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ replSet.stopSet();
+ }
+
+ doTest("commit");
+ doTest("abort");
+})();
diff --git a/jstests/replsets/recovery_preserves_active_txns.js b/jstests/replsets/recovery_preserves_active_txns.js
new file mode 100644
index 00000000000..7c1c35dbcde
--- /dev/null
+++ b/jstests/replsets/recovery_preserves_active_txns.js
@@ -0,0 +1,89 @@
+/**
+ * When the oplog size grows during recovery to exceed the configured maximum, the node must
+ * truncate the oplog only up to the oldest active transaction timestamp at the time of the last
+ * stable checkpoint. The first oplog entry that belongs to an active transaction is preserved, and
+ * all entries after it.
+ *
+ * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime"
+ * field of documents in the config.transactions collection.
+ *
+ * @tags: [uses_transactions, uses_prepare_transaction]
+ */
+
+(function() {
+ "use strict";
+ load("jstests/core/txns/libs/prepare_helpers.js");
+ load("jstests/libs/check_log.js");
+
+ // A new replica set for both the commit and abort tests to ensure the same clean state.
+ function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions:
+ {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
+ var secondary = replSet.getSecondary();
+ function checkSecondaryOplog() {
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.soon(() => {
+ return secondaryOplog.dataSize() >= PrepareHelpers.oplogSizeBytes;
+ }, "waiting for secondary oplog to grow", ReplSetTest.kDefaultTimeoutMS);
+ const secondaryOplogEntry = secondaryOplog.findOne({prepare: true});
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+ }
+ checkSecondaryOplog();
+
+ jsTestLog("Restart the secondary");
+
+ const secondaryId = replSet.getSecondary().nodeId;
+ // Validation can't complete while the active transaction holds a lock.
+ replSet.stop(secondaryId, undefined, {skipValidation: true});
+ secondary = replSet.start(secondaryId, {}, true /* restart */);
+
+ jsTestLog("Restarted");
+
+ replSet.waitForState(replSet.getSecondaries(), ReplSetTest.State.SECONDARY);
+ checkSecondaryOplog();
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransactionAfterPrepareTS(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ session.abortTransaction_forTesting();
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
+ }
+
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ // ReplSetTest reacts poorly to restarting a node, end it manually.
+ replSet.stopSet(true, false, {});
+ }
+ doTest("commit");
+ doTest("abort");
+})();
diff --git a/jstests/replsets/rollover_preserves_active_txns.js b/jstests/replsets/rollover_preserves_active_txns.js
index 8597d1aa3cc..701c77afaa0 100644
--- a/jstests/replsets/rollover_preserves_active_txns.js
+++ b/jstests/replsets/rollover_preserves_active_txns.js
@@ -1,10 +1,9 @@
/**
* When a primary's oplog size exceeds the configured maximum, it must truncate the oplog only up to
* the oldest active transaction timestamp at the time of the last stable checkpoint. The first
- * oplog entry that belongs to a prepared uncommitted transaction is preserved, and all entries
- * after it.
+ * oplog entry that belongs to an active transaction is preserved, and all entries after it.
*
- * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startTimestamp"
+ * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime"
* field of documents in the config.transactions collection.
*
* @tags: [uses_transactions, uses_prepare_transaction]
@@ -14,27 +13,24 @@
"use strict";
load("jstests/core/txns/libs/prepare_helpers.js");
- const oplogSizeMB = 1;
- const oplogSizeBytes = oplogSizeMB * 1024 * 1024;
- const tenKB = new Array(10 * 1024).join("a");
-
// A new replica set for both the commit and abort tests to ensure the same clean state.
function doTest(commitOrAbort) {
const replSet = new ReplSetTest({
// Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions: {syncdelay: 1},
- nodes: 2
+ nodeOptions:
+ {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
});
- replSet.startSet({oplogSize: oplogSizeMB});
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
replSet.initiate();
const primary = replSet.getPrimary();
const secondary = replSet.getSecondary();
const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), oplogSizeBytes);
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.lte(secondaryOplog.dataSize(), oplogSizeBytes);
+ assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
const coll = primary.getDB("test").test;
assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
@@ -68,16 +64,14 @@
jsTestLog("Insert documents until oplog exceeds oplogSize");
// Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- while (primaryOplog.dataSize() <= 2 * oplogSizeBytes) {
- assert.commandWorked(coll.insert({tenKB: tenKB}));
- }
+ PrepareHelpers.growOplogPastMaxSize(replSet);
jsTestLog(
`Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
assert.eq(oplogEntry, primaryOplog.findOne({prepare: true}));
assert.soon(() => {
- return secondaryOplog.dataSize() > oplogSizeBytes;
+ return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
});
assert.eq(oplogEntry, secondaryOplog.findOne({prepare: true}));
@@ -91,32 +85,7 @@
throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
}
- jsTestLog("Add writes after transaction finished to trigger oplog reclamation");
-
- // Old entries are reclaimed when oplog size reaches new milestone. With a 1MB oplog,
- // milestones are every 0.1 MB (see WiredTigerRecordStore::OplogStones::OplogStones) so
- // write about 0.2 MB to be certain.
- for (var i = 0; i < 200; i++) {
- assert.commandWorked(coll.insert({tenKB: tenKB}));
- }
-
- jsTestLog("Waiting for oplog to shrink to 1MB");
-
- for (let [nodeName, oplog] of[["primary", primaryOplog], ["secondary", secondaryOplog]]) {
- assert.soon(function() {
- const dataSize = oplog.dataSize();
- const prepareEntryRemoved = (oplog.findOne({prepare: true}) === null);
- print(
- `${nodeName} oplog dataSize: ${dataSize}, prepare entry removed: ${prepareEntryRemoved}`);
- // The oplog milestone system allows the oplog to grow to 110% its max size.
- if (dataSize < 1.1 * oplogSizeBytes && prepareEntryRemoved) {
- return true;
- }
-
- assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: "majority"}}));
- return false;
- }, `waiting for ${nodeName} oplog reclamation`, ReplSetTest.kDefaultTimeoutMS, 1000);
- }
+ PrepareHelpers.awaitOplogTruncation(replSet);
replSet.stopSet();
}