summaryrefslogtreecommitdiff
path: root/jstests/replsets
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/replsets')
-rw-r--r--jstests/replsets/initial_sync_preserves_active_txns.js76
-rw-r--r--jstests/replsets/inmemory_preserves_active_txns.js110
-rw-r--r--jstests/replsets/recovery_preserves_active_txns.js89
-rw-r--r--jstests/replsets/rollover_preserves_active_txns.js53
4 files changed, 286 insertions, 42 deletions
diff --git a/jstests/replsets/initial_sync_preserves_active_txns.js b/jstests/replsets/initial_sync_preserves_active_txns.js
new file mode 100644
index 00000000000..4e9f1ad1dcb
--- /dev/null
+++ b/jstests/replsets/initial_sync_preserves_active_txns.js
@@ -0,0 +1,76 @@
+/**
+ * When the oplog size grows during initial sync to exceed the configured maximum, the node must
+ * truncate the oplog only up to the oldest active transaction timestamp at the time of the last
+ * stable checkpoint. The first oplog entry that belongs to an active transaction is preserved, and
+ * all entries after it.
+ *
+ * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime"
+ * field of documents in the config.transactions collection.
+ *
+ * @tags: [uses_transactions, uses_prepare_transaction]
+ */
+
+(function() {
+ "use strict";
+ load("jstests/core/txns/libs/prepare_helpers.js");
+
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: 1
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ const txnEntry = primary.getDB("config").transactions.findOne();
+ assert.eq(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ jsTestLog("Find prepare oplog entry");
+
+ const oplogEntry = primaryOplog.findOne({prepare: true});
+ assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
+
+ jsTestLog("Add a secondary node");
+
+ const secondary = replSet.add({rsConfig: {votes: 0, priority: 0}});
+ replSet.reInitiate();
+
+ jsTestLog("Reinitiated, awaiting secondary node");
+
+ replSet.awaitSecondaryNodes();
+
+ jsTestLog("Checking secondary oplog and config.transactions");
+
+ // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.gt(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ const secondaryOplogEntry = secondaryOplog.findOne({prepare: true});
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+
+ // TODO(SERVER-36492): commit or abort, await oplog truncation
+ // See recovery_preserves_active_txns.js for example.
+ // Until then, skip validation, the stashed transaction's lock prevents validation.
+ TestData.skipCheckDBHashes = true;
+ replSet.stopSet(undefined, undefined, {skipValidation: true});
+})();
diff --git a/jstests/replsets/inmemory_preserves_active_txns.js b/jstests/replsets/inmemory_preserves_active_txns.js
new file mode 100644
index 00000000000..cefc3431a3a
--- /dev/null
+++ b/jstests/replsets/inmemory_preserves_active_txns.js
@@ -0,0 +1,110 @@
+/**
+ * When a primary's oplog size exceeds the configured maximum, it must truncate the oplog only up to
+ * the oldest active transaction timestamp at the time of the last stable checkpoint. The first
+ * oplog entry that belongs to an active transaction is preserved, and all entries after it. A
+ * secondary with the inMemory storage engine must treat its own oplog the same.
+ *
+ * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime"
+ * field of documents in the config.transactions collection.
+ *
+ * @tags: [uses_transactions, uses_prepare_transaction]
+ */
+
+(function() {
+ "use strict";
+ load("jstests/core/txns/libs/prepare_helpers.js");
+
+ // If the test runner passed --storageEngine=inMemory then we know inMemory is compiled into the
+ // server. We'll actually use both inMemory and wiredTiger storage engines.
+ const storageEngine = jsTest.options().storageEngine;
+ if (storageEngine !== 'inMemory') {
+ jsTestLog(`Skip test: storageEngine == "${storageEngine}", not "inMemory"`);
+ return;
+ }
+
+ // A new replica set for both the commit and abort tests to ensure the same clean state.
+ function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions:
+ {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [
+ {storageEngine: "wiredTiger"},
+ // inMemory node must not be a voter, otherwise lastCommitted never advances
+ {storageEngine: "inMemory", rsConfig: {priority: 0, votes: 0}},
+ ],
+ waitForKeys: false
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+ const primary = replSet.getPrimary();
+ const secondary = replSet.getSecondary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ jsTestLog("Get transaction entry from config.transactions");
+
+ const txnEntry = primary.getDB("config").transactions.findOne();
+ assert.eq(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
+
+ assert.soonNoExcept(() => {
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+ return true;
+ });
+
+ jsTestLog("Find prepare oplog entry");
+
+ const oplogEntry = primaryOplog.findOne({prepare: true});
+ assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
+ // Must already be written on secondary, since the config.transactions entry is.
+ const secondaryOplogEntry = secondaryOplog.findOne({prepare: true});
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ jsTestLog(
+ `Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
+
+ assert.eq(oplogEntry, primaryOplog.findOne({prepare: true}));
+ assert.soon(() => {
+ return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
+ });
+ assert.eq(oplogEntry, secondaryOplog.findOne({prepare: true}));
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ session.abortTransaction_forTesting();
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
+ }
+
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ replSet.stopSet();
+ }
+
+ doTest("commit");
+ doTest("abort");
+})();
diff --git a/jstests/replsets/recovery_preserves_active_txns.js b/jstests/replsets/recovery_preserves_active_txns.js
new file mode 100644
index 00000000000..36ff0b80db2
--- /dev/null
+++ b/jstests/replsets/recovery_preserves_active_txns.js
@@ -0,0 +1,89 @@
+/**
+ * When the oplog size grows during recovery to exceed the configured maximum, the node must
+ * truncate the oplog only up to the oldest active transaction timestamp at the time of the last
+ * stable checkpoint. The first oplog entry that belongs to an active transaction is preserved, and
+ * all entries after it.
+ *
+ * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime"
+ * field of documents in the config.transactions collection.
+ *
+ * @tags: [uses_transactions, uses_prepare_transaction]
+ */
+
+(function() {
+ "use strict";
+ load("jstests/core/txns/libs/prepare_helpers.js");
+ load("jstests/libs/check_log.js");
+
+ // A new replica set for both the commit and abort tests to ensure the same clean state.
+ function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions:
+ {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
+ var secondary = replSet.getSecondary();
+ function checkSecondaryOplog() {
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.soon(() => {
+ return secondaryOplog.dataSize() >= PrepareHelpers.oplogSizeBytes;
+ }, "waiting for secondary oplog to grow", ReplSetTest.kDefaultTimeoutMS);
+ const secondaryOplogEntry = secondaryOplog.findOne({prepare: true});
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+ }
+ checkSecondaryOplog();
+
+ jsTestLog("Restart the secondary");
+
+ const secondaryId = replSet.getSecondary().nodeId;
+ // Validation can't complete while the active transaction holds a lock.
+ replSet.stop(secondaryId, undefined, {skipValidation: true});
+ secondary = replSet.start(secondaryId, {}, true /* restart */);
+
+ jsTestLog("Restarted");
+
+ replSet.waitForState(replSet.getSecondaries(), ReplSetTest.State.SECONDARY);
+ checkSecondaryOplog();
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ session.abortTransaction_forTesting();
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
+ }
+
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ // ReplSetTest reacts poorly to restarting a node, end it manually.
+ replSet.stopSet(true, false, {});
+ }
+ doTest("commit");
+ doTest("abort");
+})();
diff --git a/jstests/replsets/rollover_preserves_active_txns.js b/jstests/replsets/rollover_preserves_active_txns.js
index 8597d1aa3cc..701c77afaa0 100644
--- a/jstests/replsets/rollover_preserves_active_txns.js
+++ b/jstests/replsets/rollover_preserves_active_txns.js
@@ -1,10 +1,9 @@
/**
* When a primary's oplog size exceeds the configured maximum, it must truncate the oplog only up to
* the oldest active transaction timestamp at the time of the last stable checkpoint. The first
- * oplog entry that belongs to a prepared uncommitted transaction is preserved, and all entries
- * after it.
+ * oplog entry that belongs to an active transaction is preserved, and all entries after it.
*
- * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startTimestamp"
+ * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime"
* field of documents in the config.transactions collection.
*
* @tags: [uses_transactions, uses_prepare_transaction]
@@ -14,27 +13,24 @@
"use strict";
load("jstests/core/txns/libs/prepare_helpers.js");
- const oplogSizeMB = 1;
- const oplogSizeBytes = oplogSizeMB * 1024 * 1024;
- const tenKB = new Array(10 * 1024).join("a");
-
// A new replica set for both the commit and abort tests to ensure the same clean state.
function doTest(commitOrAbort) {
const replSet = new ReplSetTest({
// Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions: {syncdelay: 1},
- nodes: 2
+ nodeOptions:
+ {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
});
- replSet.startSet({oplogSize: oplogSizeMB});
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
replSet.initiate();
const primary = replSet.getPrimary();
const secondary = replSet.getSecondary();
const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), oplogSizeBytes);
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.lte(secondaryOplog.dataSize(), oplogSizeBytes);
+ assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
const coll = primary.getDB("test").test;
assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
@@ -68,16 +64,14 @@
jsTestLog("Insert documents until oplog exceeds oplogSize");
// Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- while (primaryOplog.dataSize() <= 2 * oplogSizeBytes) {
- assert.commandWorked(coll.insert({tenKB: tenKB}));
- }
+ PrepareHelpers.growOplogPastMaxSize(replSet);
jsTestLog(
`Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
assert.eq(oplogEntry, primaryOplog.findOne({prepare: true}));
assert.soon(() => {
- return secondaryOplog.dataSize() > oplogSizeBytes;
+ return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
});
assert.eq(oplogEntry, secondaryOplog.findOne({prepare: true}));
@@ -91,32 +85,7 @@
throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
}
- jsTestLog("Add writes after transaction finished to trigger oplog reclamation");
-
- // Old entries are reclaimed when oplog size reaches new milestone. With a 1MB oplog,
- // milestones are every 0.1 MB (see WiredTigerRecordStore::OplogStones::OplogStones) so
- // write about 0.2 MB to be certain.
- for (var i = 0; i < 200; i++) {
- assert.commandWorked(coll.insert({tenKB: tenKB}));
- }
-
- jsTestLog("Waiting for oplog to shrink to 1MB");
-
- for (let [nodeName, oplog] of[["primary", primaryOplog], ["secondary", secondaryOplog]]) {
- assert.soon(function() {
- const dataSize = oplog.dataSize();
- const prepareEntryRemoved = (oplog.findOne({prepare: true}) === null);
- print(
- `${nodeName} oplog dataSize: ${dataSize}, prepare entry removed: ${prepareEntryRemoved}`);
- // The oplog milestone system allows the oplog to grow to 110% its max size.
- if (dataSize < 1.1 * oplogSizeBytes && prepareEntryRemoved) {
- return true;
- }
-
- assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: "majority"}}));
- return false;
- }, `waiting for ${nodeName} oplog reclamation`, ReplSetTest.kDefaultTimeoutMS, 1000);
- }
+ PrepareHelpers.awaitOplogTruncation(replSet);
replSet.stopSet();
}