diff options
author | Adam Cooper <adam.cooper@mongodb.com> | 2020-03-26 14:24:22 -0400 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-08-13 14:23:14 +0000 |
commit | 069f443e2d9e62a09b73e9f65ad50f805c8cedaa (patch) | |
tree | 791b856744b9d29f9381244fa1f001411577fd3e | |
parent | 86d5aa1f6e698d7b89a614cce25479e20cc6ae6c (diff) | |
download | mongo-069f443e2d9e62a09b73e9f65ad50f805c8cedaa.tar.gz |
SERVER-45803 mongodecrypt needs a ServiceContext
(cherry picked from commit 4d5fbc34882b3211063277ae80867f23e4eeea06)
(cherry picked from commit ff468c0b65af2cc978aa549d0ec7ed56553abf5a)
-rw-r--r-- | jstests/core/txns/libs/prepare_helpers.js | 180 | ||||
-rw-r--r-- | jstests/replsets/libs/rollback_files.js | 72 | ||||
-rw-r--r-- | src/mongo/shell/replsettest.js | 7 |
3 files changed, 259 insertions, 0 deletions
diff --git a/jstests/core/txns/libs/prepare_helpers.js b/jstests/core/txns/libs/prepare_helpers.js new file mode 100644 index 00000000000..6c734e56d98 --- /dev/null +++ b/jstests/core/txns/libs/prepare_helpers.js @@ -0,0 +1,180 @@ +/** + * Helper functions for testing prepared transactions. + * + * @tags: [uses_transactions] + * + */ +const PrepareHelpers = (function() { + /** + * Prepares the active transaction on the session. This expects the 'prepareTransaction' command + * to succeed and return a non-null 'prepareTimestamp'. + * + * @return {Timestamp} the transaction's prepareTimestamp + */ + function prepareTransaction(session, writeConcernOption = {w: "majority"}) { + assert(session); + + const res = assert.commandWorked(session.getDatabase('admin').adminCommand( + {prepareTransaction: 1, writeConcern: writeConcernOption})); + assert(res.prepareTimestamp, + "prepareTransaction did not return a 'prepareTimestamp': " + tojson(res)); + const prepareTimestamp = res.prepareTimestamp; + assert(prepareTimestamp instanceof Timestamp, + 'prepareTimestamp was not a Timestamp: ' + tojson(res)); + assert.neq( + prepareTimestamp, Timestamp(0, 0), "prepareTimestamp cannot be null: " + tojson(res)); + return prepareTimestamp; + } + + /** + * Commits the active transaction on the session. + * + * @return {object} the response to the 'commitTransaction' command. + */ + function commitTransaction(session, commitTimestamp) { + assert(session); + + let cmd = {commitTransaction: 1, commitTimestamp: commitTimestamp}; + const writeConcern = session.getTxnWriteConcern_forTesting(); + if (writeConcern !== undefined) { + cmd.writeConcern = writeConcern; + } + + const res = session.getDatabase('admin').adminCommand(cmd); + + // End the transaction on the shell session. + if (res.ok) { + assert.commandWorked(session.commitTransaction_forTesting()); + } else { + assert.commandWorkedOrFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); + } + return res; + } + + /** + * Creates a session object on the given connection with the provided 'lsid'. + * + * @return {session} the session created. + */ + function createSessionWithGivenId(conn, lsid, sessionOptions = {}) { + const session = conn.startSession(sessionOptions); + + const oldId = session._serverSession.handle.getId(); + print("Overriding sessionID " + tojson(oldId) + " with " + tojson(lsid) + " for test."); + session._serverSession.handle.getId = () => lsid; + + return session; + } + + const oplogSizeMB = 1; + const oplogSizeBytes = oplogSizeMB * 1024 * 1024; + const tenKB = new Array(10 * 1024).join("a"); + + /** + * Writes until the oplog exceeds its configured maximum, proving that the node keeps as much + * oplog as necessary to preserve entries for the oldest active transaction. + */ + function growOplogPastMaxSize(replSet) { + const primary = replSet.getPrimary(); + const oplog = primary.getDB("local").oplog.rs; + assert.lte(oplog.dataSize(), oplogSizeBytes); + const coll = primary.getDB("growOplogPastMaxSize").growOplogPastMaxSize; + const numNodes = replSet.nodeList().length; + while (oplog.dataSize() <= 2 * oplogSizeBytes) { + assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}})); + } + + print(`Oplog on ${primary} dataSize = ${oplog.dataSize()}`); + } + + /** + * Waits for the oplog to be truncated, proving that once a transaction finishes its oplog + * entries can be reclaimed. + */ + function awaitOplogTruncation(replSet) { + print(`Waiting for oplog to shrink to ${oplogSizeMB} MB`); + const primary = replSet.getPrimary(); + const primaryOplog = primary.getDB("local").oplog.rs; + const secondary = replSet.getSecondary(); + const secondaryOplog = secondary.getDB("local").oplog.rs; + + // Old entries are reclaimed when oplog size reaches new milestone. With a 1MB oplog, + // milestones are every 0.1 MB (see WiredTigerRecordStore::OplogStones::OplogStones) so + // write about 0.2 MB to be certain. + print("Add writes after transaction finished to trigger oplog reclamation"); + const tenKB = new Array(10 * 1024).join("a"); + const coll = primary.getDB("awaitOplogTruncation").awaitOplogTruncation; + const numNodes = replSet.nodeList().length; + for (let i = 0; i < 20; i++) { + assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}})); + } + + for (let [nodeName, oplog] of[["primary", primaryOplog], ["secondary", secondaryOplog]]) { + assert.soon(function() { + const dataSize = oplog.dataSize(); + const prepareEntryRemoved = (oplog.findOne({prepare: true}) === null); + print(`${nodeName} oplog dataSize: ${dataSize},` + + ` prepare entry removed: ${prepareEntryRemoved}`); + + // The oplog milestone system allows the oplog to grow to 110% its max size. + if (dataSize < 1.1 * oplogSizeBytes && prepareEntryRemoved) { + return true; + } + + assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}})); + return false; + }, `waiting for ${nodeName} oplog reclamation`, ReplSetTest.kDefaultTimeoutMS, 1000); + } + } + + /** + * Waits for the oplog entry of the given timestamp to be majority committed. + */ + function awaitMajorityCommitted(replSet, timestamp) { + print(`Waiting for majority commit point to advance past the given timestamp ${timestamp}`); + const primary = replSet.getPrimary(); + assert.soon(() => { + const ts = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1})) + .optimes.lastCommittedOpTime.ts; + if (timestampCmp(ts, timestamp) >= 0) { + print(`Finished awaiting lastCommittedOpTime.ts, now at ${ts}`); + return true; + } else { + print(`Awaiting lastCommittedOpTime.ts, now at ${ts}`); + return false; + } + }, "Timeout waiting for majority commit point", ReplSetTest.kDefaultTimeoutMS, 1000); + } + + function findPrepareEntry(oplogColl) { + return oplogColl.findOne({op: "c", "o.prepare": true}); + } + + /** + * Retrieves the oldest required timestamp from the serverStatus output. + * + * @return {Timestamp} oldest required timestamp for crash recovery. + */ + function getOldestRequiredTimestampForCrashRecovery(database) { + const res = database.serverStatus().storageEngine; + const ts = res.oldestRequiredTimestampForCrashRecovery; + assert(ts instanceof Timestamp, + 'oldestRequiredTimestampForCrashRecovery was not a Timestamp: ' + tojson(res)); + return ts; + } + + return { + prepareTransaction: prepareTransaction, + commitTransaction: commitTransaction, + createSessionWithGivenId: createSessionWithGivenId, + oplogSizeMB: oplogSizeMB, + oplogSizeBytes: oplogSizeBytes, + replSetStartSetOptions: {oplogSize: oplogSizeMB}, + growOplogPastMaxSize: growOplogPastMaxSize, + awaitOplogTruncation: awaitOplogTruncation, + awaitMajorityCommitted: awaitMajorityCommitted, + findPrepareEntry: findPrepareEntry, + getOldestRequiredTimestampForCrashRecovery: getOldestRequiredTimestampForCrashRecovery, + }; +})(); diff --git a/jstests/replsets/libs/rollback_files.js b/jstests/replsets/libs/rollback_files.js new file mode 100644 index 00000000000..b87ab7595d7 --- /dev/null +++ b/jstests/replsets/libs/rollback_files.js @@ -0,0 +1,72 @@ +/** + * Verifies that the rollback file for a given database path and namespace exists and contains the + * 'expectedDocs', in any order. If there are multiple rollback files for the given collection, + * chooses one of those files arbitrarily to read data from. Note that a rollback file is simply a + * sequence of concatenated BSON objects, which is a format that can be read by the bsondump tool. + */ +function checkRollbackFiles(dbPath, nss, expectedDocs) { + // Check the path of the rollback directory. + const rollbackDir = dbPath + '/rollback'; + assert(pathExists(rollbackDir), 'directory for rollback files does not exist: ' + rollbackDir); + + // We try to handle both possible rollback file layouts here. The first layout, used by the + // older 'rollbackViaRefetch' algorithm, puts rollback files directly inside the /rollback + // directory with a naming scheme of '<db>.<collection>.<timestamp>.bson'. The newer layout, + // used by recover-to-timestamp (RTT) rollback, places them inside a + // 'rollback/<db>.<collection>' directory with a file naming scheme of + // 'removed.<timestamp>.bson'. The data formats of the files themselves should be the same in + // both cases, though. These file layouts are documented here: + // https://docs.mongodb.com/manual/core/replica-set-rollbacks/#collect-rollback-data. + + function getRollbackViaRefetchRollbackFile() { + let files = listFiles(rollbackDir); + let rollbackFiles = files.filter(f => !f.isDirectory && f.baseName.startsWith(nss)); + assert.gte(rollbackFiles.length, + 1, + "No rollbackViaRefetch rollback files found for namespace: " + nss); + return rollbackFiles[0].name; + } + + function getRTTRollbackFile() { + let rollbackFiles = listFiles(rollbackDir + "/" + nss); + assert.gte(rollbackFiles.length, 1, "No RTT rollback files found for namespace: " + nss); + return rollbackFiles[0].name; + } + + // If all the objects in the rollback directory are files, not directories, then this implies + // the rollback files have been written using the rollbackViaRefetch mechanism. Otherwise, we + // assume the files are written using the RTT mechanism. + let rollbackFile; + if (listFiles(rollbackDir).every(f => !f.isDirectory)) { + print("Assuming rollback files written using the 'rollbackViaRefetch' layout."); + rollbackFile = getRollbackViaRefetchRollbackFile(); + } else { + print("Assuming rollback files written using the 'RTT' layout."); + rollbackFile = getRTTRollbackFile(); + } + + print("Found rollback file: " + rollbackFile); + + // If the rollback BSON file is encrypted, don't try to check the data contents. Checking its + // existence is sufficient. + if (rollbackFile.endsWith(".enc")) { + print("Bypassing check of rollback file data since it is encrypted."); + return; + } + + // Windows doesn't always play nice with the bsondump tool and the way we pass arguments to it. + // Checking the existence of the rollback directory above should be sufficient on Windows. + if (_isWindows()) { + print("Bypassing check of rollback file data on Windows."); + return; + } + + // Parse the BSON rollback file and check for the right documents. The documents may be written + // out in an arbitrary order so we just check the document set. + let tmpJSONFile = rollbackDir + "/rollback_tmp.json"; + let exitCode = + MongoRunner.runMongoTool("bsondump", {outFile: tmpJSONFile, bsonFile: rollbackFile}); + assert.eq(exitCode, 0, "bsondump failed to parse the rollback file"); + let docs = cat(tmpJSONFile).split("\n").filter(l => l.length).map(JSON.parse); + assert.sameMembers(docs, expectedDocs); +}
\ No newline at end of file diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js index a2cc63516e6..82654f1aa72 100644 --- a/src/mongo/shell/replsettest.js +++ b/src/mongo/shell/replsettest.js @@ -492,6 +492,13 @@ var ReplSetTest = function(opts) { return undefined; }; + this.getDbPath = function(node) { + // Get a replica set node (check for use of bridge). + const n = this.getNodeId(node); + const replNode = _useBridge ? _unbridgedNodes[n] : this.nodes[n]; + return replNode.dbpath; + }; + this.getPort = function(n) { var n = this.getNodeId(n); return this.ports[n]; |