diff options
author | A. Jesse Jiryu Davis <jesse@mongodb.com> | 2019-04-08 14:02:36 -0400 |
---|---|---|
committer | A. Jesse Jiryu Davis <jesse@mongodb.com> | 2019-04-08 14:19:08 -0400 |
commit | 02a87ee5b1942d24e1d7a20502c79d36218929fe (patch) | |
tree | 68219f29d940a1eae531e4cedde40e87aab57389 | |
parent | 2db32e10f3e1e5aedb80703104fae838acfcf696 (diff) | |
download | mongo-02a87ee5b1942d24e1d7a20502c79d36218929fe.tar.gz |
Revert "SERVER-36494 Test that active txn entries aren't truncated"
This reverts commit f8f872e029ba3b1f32d8499c912756d48dc1a03b.
-rw-r--r-- | jstests/core/txns/libs/prepare_helpers.js | 66 | ||||
-rw-r--r-- | jstests/replsets/initial_sync_preserves_active_txns.js | 76 | ||||
-rw-r--r-- | jstests/replsets/inmemory_preserves_active_txns.js | 110 | ||||
-rw-r--r-- | jstests/replsets/recovery_preserves_active_txns.js | 89 | ||||
-rw-r--r-- | jstests/replsets/rollover_preserves_active_txns.js | 53 | ||||
-rw-r--r-- | src/mongo/db/repl/repl_set_commands.cpp | 2 |
6 files changed, 43 insertions, 353 deletions
diff --git a/jstests/core/txns/libs/prepare_helpers.js b/jstests/core/txns/libs/prepare_helpers.js index 95efbc8bbea..4d87fd9ee07 100644 --- a/jstests/core/txns/libs/prepare_helpers.js +++ b/jstests/core/txns/libs/prepare_helpers.js @@ -62,75 +62,9 @@ const PrepareHelpers = (function() { return session; } - const oplogSizeMB = 1; - const oplogSizeBytes = oplogSizeMB * 1024 * 1024; - const tenKB = new Array(10 * 1024).join("a"); - - /** - * Writes until the oplog exceeds its configured maximum, proving that the node keeps as much - * oplog as necessary to preserve entries for the oldest active transaction. - */ - function growOplogPastMaxSize(replSet) { - const primary = replSet.getPrimary(); - const oplog = primary.getDB("local").oplog.rs; - assert.lte(oplog.dataSize(), oplogSizeBytes); - const coll = primary.getDB("growOplogPastMaxSize").growOplogPastMaxSize; - const numNodes = replSet.nodeList().length; - while (oplog.dataSize() <= 2 * oplogSizeBytes) { - assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}})); - } - - print(`Oplog on ${primary} dataSize = ${oplog.dataSize()}`); - } - - /** - * Waits for the oplog to be truncated, proving that once a transaction finishes its oplog - * entries can be reclaimed. - */ - function awaitOplogTruncation(replSet) { - print(`Waiting for oplog to shrink to ${oplogSizeMB} MB`); - const primary = replSet.getPrimary(); - const primaryOplog = primary.getDB("local").oplog.rs; - const secondary = replSet.getSecondary(); - const secondaryOplog = secondary.getDB("local").oplog.rs; - - // Old entries are reclaimed when oplog size reaches new milestone. With a 1MB oplog, - // milestones are every 0.1 MB (see WiredTigerRecordStore::OplogStones::OplogStones) so - // write about 0.2 MB to be certain. - print("Add writes after transaction finished to trigger oplog reclamation"); - const tenKB = new Array(10 * 1024).join("a"); - const coll = primary.getDB("awaitOplogTruncation").awaitOplogTruncation; - const numNodes = replSet.nodeList().length; - for (let i = 0; i < 20; i++) { - assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}})); - } - - for (let [nodeName, oplog] of[["primary", primaryOplog], ["secondary", secondaryOplog]]) { - assert.soon(function() { - const dataSize = oplog.dataSize(); - const prepareEntryRemoved = (oplog.findOne({prepare: true}) === null); - print(`${nodeName} oplog dataSize: ${dataSize},` + - ` prepare entry removed: ${prepareEntryRemoved}`); - - // The oplog milestone system allows the oplog to grow to 110% its max size. - if (dataSize < 1.1 * oplogSizeBytes && prepareEntryRemoved) { - return true; - } - - assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}})); - return false; - }, `waiting for ${nodeName} oplog reclamation`, ReplSetTest.kDefaultTimeoutMS, 1000); - } - } - return { prepareTransaction: prepareTransaction, commitTransaction: commitTransaction, createSessionWithGivenId: createSessionWithGivenId, - oplogSizeMB: oplogSizeMB, - oplogSizeBytes: oplogSizeBytes, - replSetStartSetOptions: {oplogSize: oplogSizeMB}, - growOplogPastMaxSize: growOplogPastMaxSize, - awaitOplogTruncation: awaitOplogTruncation }; })(); diff --git a/jstests/replsets/initial_sync_preserves_active_txns.js b/jstests/replsets/initial_sync_preserves_active_txns.js deleted file mode 100644 index 4e9f1ad1dcb..00000000000 --- a/jstests/replsets/initial_sync_preserves_active_txns.js +++ /dev/null @@ -1,76 +0,0 @@ -/** - * When the oplog size grows during initial sync to exceed the configured maximum, the node must - * truncate the oplog only up to the oldest active transaction timestamp at the time of the last - * stable checkpoint. The first oplog entry that belongs to an active transaction is preserved, and - * all entries after it. - * - * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime" - * field of documents in the config.transactions collection. - * - * @tags: [uses_transactions, uses_prepare_transaction] - */ - -(function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - - const replSet = new ReplSetTest({ - // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second. - nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}}, - nodes: 1 - }); - - replSet.startSet(PrepareHelpers.replSetStartSetOptions); - replSet.initiate(); - const primary = replSet.getPrimary(); - const primaryOplog = primary.getDB("local").oplog.rs; - assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes); - - const coll = primary.getDB("test").test; - assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}})); - - jsTestLog("Prepare a transaction"); - - const session = primary.startSession(); - session.startTransaction(); - assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1})); - const prepareTimestamp = PrepareHelpers.prepareTransaction(session); - const txnEntry = primary.getDB("config").transactions.findOne(); - assert.eq(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry)); - - jsTestLog("Insert documents until oplog exceeds oplogSize"); - - // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size. - PrepareHelpers.growOplogPastMaxSize(replSet); - - jsTestLog("Find prepare oplog entry"); - - const oplogEntry = primaryOplog.findOne({prepare: true}); - assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry)); - - jsTestLog("Add a secondary node"); - - const secondary = replSet.add({rsConfig: {votes: 0, priority: 0}}); - replSet.reInitiate(); - - jsTestLog("Reinitiated, awaiting secondary node"); - - replSet.awaitSecondaryNodes(); - - jsTestLog("Checking secondary oplog and config.transactions"); - - // Oplog grew past maxSize, and it includes the oldest active transaction's entry. - const secondaryOplog = secondary.getDB("local").oplog.rs; - assert.gt(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes); - const secondaryOplogEntry = secondaryOplog.findOne({prepare: true}); - assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry)); - - const secondaryTxnEntry = secondary.getDB("config").transactions.findOne(); - assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry)); - - // TODO(SERVER-36492): commit or abort, await oplog truncation - // See recovery_preserves_active_txns.js for example. - // Until then, skip validation, the stashed transaction's lock prevents validation. - TestData.skipCheckDBHashes = true; - replSet.stopSet(undefined, undefined, {skipValidation: true}); -})(); diff --git a/jstests/replsets/inmemory_preserves_active_txns.js b/jstests/replsets/inmemory_preserves_active_txns.js deleted file mode 100644 index cac6c5e0bf4..00000000000 --- a/jstests/replsets/inmemory_preserves_active_txns.js +++ /dev/null @@ -1,110 +0,0 @@ -/** - * When a primary's oplog size exceeds the configured maximum, it must truncate the oplog only up to - * the oldest active transaction timestamp at the time of the last stable checkpoint. The first - * oplog entry that belongs to an active transaction is preserved, and all entries after it. A - * secondary with the inMemory storage engine must treat its own oplog the same. - * - * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime" - * field of documents in the config.transactions collection. - * - * @tags: [uses_transactions, uses_prepare_transaction] - */ - -(function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - - // If the test runner passed --storageEngine=inMemory then we know inMemory is compiled into the - // server. We'll actually use both inMemory and wiredTiger storage engines. - const storageEngine = jsTest.options().storageEngine; - if (storageEngine !== 'inMemory') { - jsTestLog(`Skip test: storageEngine == "${storageEngine}", not "inMemory"`); - return; - } - - // A new replica set for both the commit and abort tests to ensure the same clean state. - function doTest(commitOrAbort) { - const replSet = new ReplSetTest({ - // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second. - nodeOptions: - {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}}, - nodes: [ - {storageEngine: "wiredTiger"}, - // inMemory node must not be a voter, otherwise lastCommitted never advances - {storageEngine: "inMemory", rsConfig: {priority: 0, votes: 0}}, - ], - waitForKeys: false - }); - - replSet.startSet(PrepareHelpers.replSetStartSetOptions); - replSet.initiateWithAnyNodeAsPrimary( - null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true}); - - const primary = replSet.getPrimary(); - const secondary = replSet.getSecondary(); - const primaryOplog = primary.getDB("local").oplog.rs; - assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes); - const secondaryOplog = secondary.getDB("local").oplog.rs; - assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes); - - const coll = primary.getDB("test").test; - assert.commandWorked(coll.insert({})); - - jsTestLog("Prepare a transaction"); - - const session = primary.startSession(); - session.startTransaction(); - assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1})); - const prepareTimestamp = PrepareHelpers.prepareTransaction(session); - - jsTestLog("Get transaction entry from config.transactions"); - - const txnEntry = primary.getDB("config").transactions.findOne(); - assert.eq(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry)); - - assert.soonNoExcept(() => { - const secondaryTxnEntry = secondary.getDB("config").transactions.findOne(); - assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry)); - return true; - }); - - jsTestLog("Find prepare oplog entry"); - - const oplogEntry = primaryOplog.findOne({prepare: true}); - assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry)); - // Must already be written on secondary, since the config.transactions entry is. - const secondaryOplogEntry = secondaryOplog.findOne({prepare: true}); - assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry)); - - jsTestLog("Insert documents until oplog exceeds oplogSize"); - - // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size. - PrepareHelpers.growOplogPastMaxSize(replSet); - - jsTestLog( - `Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`); - - assert.eq(oplogEntry, primaryOplog.findOne({prepare: true})); - assert.soon(() => { - return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes; - }); - assert.eq(oplogEntry, secondaryOplog.findOne({prepare: true})); - - if (commitOrAbort === "commit") { - jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize"); - PrepareHelpers.commitTransactionAfterPrepareTS(session, prepareTimestamp); - } else if (commitOrAbort === "abort") { - jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize"); - session.abortTransaction_forTesting(); - } else { - throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`); - } - - PrepareHelpers.awaitOplogTruncation(replSet); - - replSet.stopSet(); - } - - doTest("commit"); - doTest("abort"); -})(); diff --git a/jstests/replsets/recovery_preserves_active_txns.js b/jstests/replsets/recovery_preserves_active_txns.js deleted file mode 100644 index 7c1c35dbcde..00000000000 --- a/jstests/replsets/recovery_preserves_active_txns.js +++ /dev/null @@ -1,89 +0,0 @@ -/** - * When the oplog size grows during recovery to exceed the configured maximum, the node must - * truncate the oplog only up to the oldest active transaction timestamp at the time of the last - * stable checkpoint. The first oplog entry that belongs to an active transaction is preserved, and - * all entries after it. - * - * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime" - * field of documents in the config.transactions collection. - * - * @tags: [uses_transactions, uses_prepare_transaction] - */ - -(function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - load("jstests/libs/check_log.js"); - - // A new replica set for both the commit and abort tests to ensure the same clean state. - function doTest(commitOrAbort) { - const replSet = new ReplSetTest({ - // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second. - nodeOptions: - {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}}, - nodes: [{}, {rsConfig: {priority: 0, votes: 0}}] - }); - - replSet.startSet(PrepareHelpers.replSetStartSetOptions); - replSet.initiate(); - const primary = replSet.getPrimary(); - const primaryOplog = primary.getDB("local").oplog.rs; - assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes); - - const coll = primary.getDB("test").test; - assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}})); - - jsTestLog("Prepare a transaction"); - - const session = primary.startSession(); - session.startTransaction(); - assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1})); - const prepareTimestamp = PrepareHelpers.prepareTransaction(session); - - jsTestLog("Insert documents until oplog exceeds oplogSize"); - - // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size. - PrepareHelpers.growOplogPastMaxSize(replSet); - - // Oplog grew past maxSize, and it includes the oldest active transaction's entry. - var secondary = replSet.getSecondary(); - function checkSecondaryOplog() { - const secondaryOplog = secondary.getDB("local").oplog.rs; - assert.soon(() => { - return secondaryOplog.dataSize() >= PrepareHelpers.oplogSizeBytes; - }, "waiting for secondary oplog to grow", ReplSetTest.kDefaultTimeoutMS); - const secondaryOplogEntry = secondaryOplog.findOne({prepare: true}); - assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry)); - } - checkSecondaryOplog(); - - jsTestLog("Restart the secondary"); - - const secondaryId = replSet.getSecondary().nodeId; - // Validation can't complete while the active transaction holds a lock. - replSet.stop(secondaryId, undefined, {skipValidation: true}); - secondary = replSet.start(secondaryId, {}, true /* restart */); - - jsTestLog("Restarted"); - - replSet.waitForState(replSet.getSecondaries(), ReplSetTest.State.SECONDARY); - checkSecondaryOplog(); - - if (commitOrAbort === "commit") { - jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize"); - PrepareHelpers.commitTransactionAfterPrepareTS(session, prepareTimestamp); - } else if (commitOrAbort === "abort") { - jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize"); - session.abortTransaction_forTesting(); - } else { - throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`); - } - - PrepareHelpers.awaitOplogTruncation(replSet); - - // ReplSetTest reacts poorly to restarting a node, end it manually. - replSet.stopSet(true, false, {}); - } - doTest("commit"); - doTest("abort"); -})(); diff --git a/jstests/replsets/rollover_preserves_active_txns.js b/jstests/replsets/rollover_preserves_active_txns.js index 701c77afaa0..8597d1aa3cc 100644 --- a/jstests/replsets/rollover_preserves_active_txns.js +++ b/jstests/replsets/rollover_preserves_active_txns.js @@ -1,9 +1,10 @@ /** * When a primary's oplog size exceeds the configured maximum, it must truncate the oplog only up to * the oldest active transaction timestamp at the time of the last stable checkpoint. The first - * oplog entry that belongs to an active transaction is preserved, and all entries after it. + * oplog entry that belongs to a prepared uncommitted transaction is preserved, and all entries + * after it. * - * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime" + * This tests the oldestActiveTransactionTimestamp, which is calculated from the "startTimestamp" * field of documents in the config.transactions collection. * * @tags: [uses_transactions, uses_prepare_transaction] @@ -13,24 +14,27 @@ "use strict"; load("jstests/core/txns/libs/prepare_helpers.js"); + const oplogSizeMB = 1; + const oplogSizeBytes = oplogSizeMB * 1024 * 1024; + const tenKB = new Array(10 * 1024).join("a"); + // A new replica set for both the commit and abort tests to ensure the same clean state. function doTest(commitOrAbort) { const replSet = new ReplSetTest({ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second. - nodeOptions: - {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}}, - nodes: [{}, {rsConfig: {priority: 0, votes: 0}}] + nodeOptions: {syncdelay: 1}, + nodes: 2 }); - replSet.startSet(PrepareHelpers.replSetStartSetOptions); + replSet.startSet({oplogSize: oplogSizeMB}); replSet.initiate(); const primary = replSet.getPrimary(); const secondary = replSet.getSecondary(); const primaryOplog = primary.getDB("local").oplog.rs; - assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes); + assert.lte(primaryOplog.dataSize(), oplogSizeBytes); const secondaryOplog = secondary.getDB("local").oplog.rs; - assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes); + assert.lte(secondaryOplog.dataSize(), oplogSizeBytes); const coll = primary.getDB("test").test; assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}})); @@ -64,14 +68,16 @@ jsTestLog("Insert documents until oplog exceeds oplogSize"); // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size. - PrepareHelpers.growOplogPastMaxSize(replSet); + while (primaryOplog.dataSize() <= 2 * oplogSizeBytes) { + assert.commandWorked(coll.insert({tenKB: tenKB})); + } jsTestLog( `Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`); assert.eq(oplogEntry, primaryOplog.findOne({prepare: true})); assert.soon(() => { - return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes; + return secondaryOplog.dataSize() > oplogSizeBytes; }); assert.eq(oplogEntry, secondaryOplog.findOne({prepare: true})); @@ -85,7 +91,32 @@ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`); } - PrepareHelpers.awaitOplogTruncation(replSet); + jsTestLog("Add writes after transaction finished to trigger oplog reclamation"); + + // Old entries are reclaimed when oplog size reaches new milestone. With a 1MB oplog, + // milestones are every 0.1 MB (see WiredTigerRecordStore::OplogStones::OplogStones) so + // write about 0.2 MB to be certain. + for (var i = 0; i < 200; i++) { + assert.commandWorked(coll.insert({tenKB: tenKB})); + } + + jsTestLog("Waiting for oplog to shrink to 1MB"); + + for (let [nodeName, oplog] of[["primary", primaryOplog], ["secondary", secondaryOplog]]) { + assert.soon(function() { + const dataSize = oplog.dataSize(); + const prepareEntryRemoved = (oplog.findOne({prepare: true}) === null); + print( + `${nodeName} oplog dataSize: ${dataSize}, prepare entry removed: ${prepareEntryRemoved}`); + // The oplog milestone system allows the oplog to grow to 110% its max size. + if (dataSize < 1.1 * oplogSizeBytes && prepareEntryRemoved) { + return true; + } + + assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: "majority"}})); + return false; + }, `waiting for ${nodeName} oplog reclamation`, ReplSetTest.kDefaultTimeoutMS, 1000); + } replSet.stopSet(); } diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp index 058824de638..6a417ff9a7a 100644 --- a/src/mongo/db/repl/repl_set_commands.cpp +++ b/src/mongo/db/repl/repl_set_commands.cpp @@ -383,7 +383,7 @@ public: status = ReplicationCoordinator::get(opCtx)->processReplSetReconfig(opCtx, parsedArgs, &result); - Lock::GlobalLock globalLock(opCtx, MODE_IX); + Lock::GlobalWrite globalWrite(opCtx); WriteUnitOfWork wuow(opCtx); if (status.isOK() && !parsedArgs.force) { |