summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAli Mir <ali.mir@mongodb.com>2020-05-14 14:25:09 -0400
committerAli Mir <ali.mir@mongodb.com>2020-05-29 00:06:40 -0400
commit7434646ab0c2f94c4ae1cdefc1b5c3764964eb30 (patch)
tree4f1f4bc529023333ec87cd2bb7d04805a334a19c
parente44e6b4f3c0b7fbd604a452ac8afc59dda3f54fa (diff)
downloadmongo-SERVER-47782.tar.gz
SERVER-44782 Support atClusterTime in dbHash commandSERVER-47782
-rw-r--r--jstests/replsets/dbhash_read_at_cluster_time.js92
-rw-r--r--jstests/replsets/prepare_conflict_read_concern_behavior.js14
-rw-r--r--jstests/replsets/prepare_transaction_read_at_cluster_time.js168
-rw-r--r--src/mongo/db/commands/dbhash.cpp16
4 files changed, 227 insertions, 63 deletions
diff --git a/jstests/replsets/dbhash_read_at_cluster_time.js b/jstests/replsets/dbhash_read_at_cluster_time.js
index e8c42b4e57f..99a4517f926 100644
--- a/jstests/replsets/dbhash_read_at_cluster_time.js
+++ b/jstests/replsets/dbhash_read_at_cluster_time.js
@@ -1,7 +1,8 @@
/**
- * Tests that "$_internalReadAtClusterTime" is supported by the "dbHash" command.
+ * Tests that "$_internalReadAtClusterTime" and "snapshot" level
+ * read concern are supported by the "dbHash" command.
*
- * @tags: [uses_transactions]
+ * @tags: [uses_transactions, requires_fcv_46, requires_majority_read_concern]
*/
(function() {
"use strict";
@@ -18,7 +19,6 @@ const secondary = rst.getSecondary();
const session = primary.startSession({causalConsistency: false});
const db = session.getDatabase("test");
-let txnNumber = 0;
// We prevent the replica set from advancing oldest_timestamp. This ensures that the snapshot
// associated with 'clusterTime' is retained for the duration of this test.
@@ -38,7 +38,17 @@ let res = assert.commandWorked(db.runCommand({
$_internalReadAtClusterTime: clusterTime,
}));
-const hash1 = {
+const internalAtClusterTimeHashBefore = {
+ collections: res.collections,
+ md5: res.md5
+};
+
+res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ readConcern: {level: "snapshot", atClusterTime: clusterTime},
+}));
+
+const atClusterTimeHashBefore = {
collections: res.collections,
md5: res.md5
};
@@ -50,32 +60,65 @@ const hash1 = {
// on the secondary.
assert.commandWorked(db.mycoll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
-// However, using $_internalReadAtClusterTime to read at the opTime of the first insert should
-// return the same md5sum as it did originally.
+// However, using $_internalReadAtClusterTime or snapshot read concern to read at the opTime of the
+// first insert should return the same md5sum as it did originally.
res = assert.commandWorked(db.runCommand({
dbHash: 1,
$_internalReadAtClusterTime: clusterTime,
}));
-const hash2 = {
+const internalAtClusterTimeHashAfter = {
collections: res.collections,
md5: res.md5
};
-assert.eq(hash1, hash2, "primary returned different dbhash after second insert");
+
+res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ readConcern: {level: "snapshot", atClusterTime: clusterTime},
+}));
+
+const atClusterTimeHashAfter = {
+ collections: res.collections,
+ md5: res.md5
+};
+
+assert.eq(internalAtClusterTimeHashBefore,
+ internalAtClusterTimeHashAfter,
+ "primary returned different dbhash after " +
+ "second insert while using $_internalReadAtClusterTime");
+assert.eq(atClusterTimeHashBefore,
+ atClusterTimeHashAfter,
+ "primary returned different dbhash after " +
+ "second insert while using \"snapshot\" level read concern");
{
const secondarySession = secondary.startSession({causalConsistency: false});
const secondaryDB = secondarySession.getDatabase("test");
- // Using $_internalReadAtClusterTime to read at the opTime of the first insert should return
- // the same md5sum on the secondary as it did on the primary.
+ // Using $_internalReadAtClusterTime or snapshot read concern to read at the opTime
+ // of the first insert should return the same md5sum on the secondary as it did on the primary.
res = assert.commandWorked(secondaryDB.runCommand({
dbHash: 1,
$_internalReadAtClusterTime: clusterTime,
}));
- const secondaryHash = {collections: res.collections, md5: res.md5};
- assert.eq(hash1, secondaryHash, "primary and secondary have different dbhash");
+ const internalAtClusterTimeSecondaryHash = {collections: res.collections, md5: res.md5};
+
+ res = assert.commandWorked(secondaryDB.runCommand({
+ dbHash: 1,
+ readConcern: {level: "snapshot", atClusterTime: clusterTime},
+ }));
+
+ const atClusterTimeSecondaryHash = {collections: res.collections, md5: res.md5};
+
+ assert.eq(internalAtClusterTimeHashBefore,
+ internalAtClusterTimeSecondaryHash,
+ "primary and secondary have different dbhash " +
+ "while using $_internalReadAtClusterTime.");
+ assert.eq(atClusterTimeHashBefore,
+ atClusterTimeSecondaryHash,
+ "primary returned different dbhash " +
+ "while using \"snapshot\" level read concern");
}
{
@@ -88,19 +131,34 @@ assert.eq(hash1, hash2, "primary returned different dbhash after second insert")
assert.commandWorked(otherDB.mycoll.insert({_id: 3}));
// It should be possible to run the "dbHash" command with "$_internalReadAtClusterTime"
- // concurrently.
+ // or snapshot read concern concurrently.
res = assert.commandWorked(db.runCommand({
dbHash: 1,
$_internalReadAtClusterTime: clusterTime,
}));
- const hash3 = {collections: res.collections, md5: res.md5};
- assert.eq(hash1, hash3, "primary returned different dbhash after third insert");
+ const concurrentInternalAtClusterTimeHash = {collections: res.collections, md5: res.md5};
+
+ res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ readConcern: {level: "snapshot", atClusterTime: clusterTime},
+ }));
+
+ const concurrentAtClusterTimeHash = {collections: res.collections, md5: res.md5};
+
+ assert.eq(internalAtClusterTimeHashBefore,
+ concurrentInternalAtClusterTimeHash,
+ "primary returned different dbhash after " +
+ "third insert while using $_internalReadAtClusterTime.");
+ assert.eq(atClusterTimeHashBefore,
+ concurrentAtClusterTimeHash,
+ "primary returned different dbhash after " +
+ "third insert while using \"snapshot\" level read concern");
// However, the "dbHash" command should block behind the transaction if
// "$_internalReadAtClusterTime" wasn't specified.
- res = assert.commandFailedWithCode(db.runCommand({dbHash: 1, maxTimeMS: 1000}),
- ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(db.runCommand({dbHash: 1, maxTimeMS: 1000}),
+ ErrorCodes.MaxTimeMSExpired);
assert.commandWorked(otherSession.abortTransaction_forTesting());
otherSession.endSession();
diff --git a/jstests/replsets/prepare_conflict_read_concern_behavior.js b/jstests/replsets/prepare_conflict_read_concern_behavior.js
index 4ad65f75506..7b092198dc0 100644
--- a/jstests/replsets/prepare_conflict_read_concern_behavior.js
+++ b/jstests/replsets/prepare_conflict_read_concern_behavior.js
@@ -26,7 +26,19 @@
"use strict";
load("jstests/core/txns/libs/prepare_helpers.js");
-const replTest = new ReplSetTest({nodes: 2});
+// Snapshot read concern for the dbHash command is only available when enableTestCommands=true.
+// To test correctly client behavior with dbHash, we set enableTestCommands=false. We modify the
+// values of roleGraphInvalidationIsFatal and authenticationDatabase in order for this test to work
+// on inMemory build variants.
+TestData.enableTestCommands = false;
+TestData.roleGraphInvalidationIsFatal = false;
+TestData.authenticationDatabase = "local";
+const replTest = new ReplSetTest({
+ nodes: {
+ node0: {setParameter: "enableTestCommands=1"},
+ node1: {setParameter: "enableTestCommands=0"}
+ }
+});
replTest.startSet();
replTest.initiate();
diff --git a/jstests/replsets/prepare_transaction_read_at_cluster_time.js b/jstests/replsets/prepare_transaction_read_at_cluster_time.js
index 40f59c90e76..eb3eb2922a7 100644
--- a/jstests/replsets/prepare_transaction_read_at_cluster_time.js
+++ b/jstests/replsets/prepare_transaction_read_at_cluster_time.js
@@ -1,8 +1,8 @@
/**
* Ensures that performing a write in a prepared transaction, followed by a write outside of a
- * transaction, it is possible to specify '$_internalReadAtClusterTime' as the timestamp of
- * the second write for 'find' and 'dbHash'. The commands should block until the prepared
- * transaction is committed or aborted.
+ * transaction, it is possible to specify either '$_internalReadAtClusterTime' or snapshot read
+ * concern with 'atClusterTime' as the timestamp of the second write for 'find' and 'dbHash'. The
+ * commands should block until the prepared transaction is committed or aborted.
*
* @tags: [uses_transactions, uses_prepare_transaction]
*/
@@ -12,16 +12,19 @@
load("jstests/core/txns/libs/prepare_helpers.js");
load("jstests/libs/parallelTester.js");
-const runDBHashFn = (host, dbName, clusterTime) => {
+const runDBHashFn = (host, dbName, cmd) => {
const conn = new Mongo(host);
const db = conn.getDB(dbName);
conn.setSlaveOk();
- let firstHash = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: eval(clusterTime),
- }));
-
+ // When passing the cmd object through a ScopedThread constructor,
+ // the Timestamp value does not serialize correctly. In order to correct this behavior
+ // and provide the correct serialization of Timestamp, we rehydrate using eval().
+ cmd.hasOwnProperty('$_internalReadAtClusterTime')
+ ? cmd.$_internalReadAtClusterTime = eval(cmd.$_internalReadAtClusterTime)
+ : cmd.readConcern.atClusterTime = eval(cmd.readConcern.atClusterTime);
+
+ let firstHash = assert.commandWorked(db.runCommand(cmd));
// This code will execute once the prepared transaction is committed as the call above will
// be blocked until an abort or commit happens. Ensure that running dbHash here yields the
// same result as above.
@@ -33,29 +36,27 @@ const runDBHashFn = (host, dbName, clusterTime) => {
return firstHash;
};
-const runFindFn = (host, dbName, collName, clusterTime) => {
+const runFindFn = (host, dbName, cmd, clusterTime) => {
const conn = new Mongo(host);
const db = conn.getDB(dbName);
conn.setSlaveOk();
- assert.commandWorked(db.getSiblingDB(dbName).runCommand({
- find: collName,
- $_internalReadAtClusterTime: eval(clusterTime),
- }));
+ // When passing the cmd object through a ScopedThread constructor,
+ // the Timestamp value does not serialize correctly. In order to correct this behavior
+ // and provide the correct serialization of Timestamp, we rehydrate using eval().
+ cmd.hasOwnProperty('$_internalReadAtClusterTime')
+ ? cmd.$_internalReadAtClusterTime = eval(clusterTime)
+ : cmd.readConcern.atClusterTime = eval(clusterTime);
+ assert.commandWorked(db.getSiblingDB(dbName).runCommand(cmd));
};
-const assertOpHasPrepareConflict = (db, commandName) => {
+const assertOpHasPrepareConflict = (db, opsObj) => {
assert.soon(
() => {
- const ops = db.currentOp({
- "command.$_internalReadAtClusterTime": {$exists: true},
- ["command." + commandName]: {$exists: true},
- }).inprog;
-
+ const ops = db.currentOp(opsObj).inprog;
if (ops.length === 1) {
return ops[0].prepareReadConflicts > 0;
}
-
return false;
},
() => `Failed to find '${commandName}' command in the ${db.getMongo().host} currentOp()` +
@@ -103,34 +104,99 @@ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
// to the secondary because we're going to read from it at the returned operationTime.
assert.commandWorked(testDB.getCollection(collName).insert({x: 2}, {writeConcern: {w: 2}}));
-// It should be possible to specify '$_internalReadAtClusterTime' as the timestamp of the
+// It should be possible to specify either '$_internalReadAtClusterTime' or snapshot read
+// concern with 'atClusterTime' as the timestamp of the
// second write without an error for dbHash and find.
let clusterTime = testDB.getSession().getOperationTime();
// Run dbHash and find while the prepared transaction has not commit or aborted yet.
// These should block until the prepared transaction commits or aborts if we specify
-// $_internalReadAtClusterTime to be the timestamp of the second write we did, outside of the
-// transaction.
-const dbHashPrimaryThread = new Thread(runDBHashFn, primary.host, dbName, tojson(clusterTime));
-const dbHashSecondaryThread = new Thread(runDBHashFn, secondary.host, dbName, tojson(clusterTime));
+// $_internalReadAtClusterTime or snapshot read concern with 'atClusterTime' to be the timestamp of
+// the second write we did, outside of the transaction.
+let cmd = {dbHash: 1, $_internalReadAtClusterTime: tojson(clusterTime)}
+
+const dbHashInternalClusterTimePrimaryThread = new Thread(runDBHashFn, primary.host, dbName, cmd);
+const dbHashInternalClusterTimeSecondaryThread =
+ new Thread(runDBHashFn, secondary.host, dbName, cmd);
+
+dbHashInternalClusterTimePrimaryThread.start();
+dbHashInternalClusterTimeSecondaryThread.start();
-dbHashPrimaryThread.start();
-dbHashSecondaryThread.start();
+let curOpObj = {
+ "command.$_internalReadAtClusterTime": {$exists: true},
+ "command.dbHash": {$exists: true},
+}
-assertOpHasPrepareConflict(testDB, "dbHash");
-assertOpHasPrepareConflict(testDBSecondary, "dbHash");
+assertOpHasPrepareConflict(testDB, curOpObj);
+assertOpHasPrepareConflict(testDBSecondary, curOpObj);
-// Run 'find' with '$_internalReadAtClusterTime' specified.
-const findPrimaryThread =
- new Thread(runFindFn, primary.host, dbName, collName, tojson(clusterTime));
-const findSecondaryThread =
- new Thread(runFindFn, secondary.host, dbName, collName, tojson(clusterTime));
+cmd = {
+ dbHash: 1,
+ readConcern: {level: "snapshot", atClusterTime: tojson(clusterTime)}
+}
-findPrimaryThread.start();
-findSecondaryThread.start();
+const dbHashClusterTimePrimaryThread =
+ new Thread(runDBHashFn, primary.host, dbName, cmd, tojson(clusterTime));
+const dbHashClusterTimeSecondaryThread =
+ new Thread(runDBHashFn, secondary.host, dbName, cmd, tojson(clusterTime));
-assertOpHasPrepareConflict(testDB, "find");
-assertOpHasPrepareConflict(testDBSecondary, "find");
+dbHashClusterTimePrimaryThread.start();
+dbHashClusterTimeSecondaryThread.start();
+
+curOpObj = {
+ "command.readConcern.atClusterTime": {$exists: true},
+ "command.dbHash": {$exists: true},
+}
+
+assertOpHasPrepareConflict(testDB, curOpObj);
+assertOpHasPrepareConflict(testDBSecondary, curOpObj);
+
+// Run 'find' with '$_internalReadAtClusterTime' and snapshot read concern specified.
+
+cmd = {
+ find: collName,
+ $_internalReadAtClusterTime: eval(clusterTime),
+};
+
+const findInternalClusterTimePrimaryThread =
+ new Thread(runFindFn, primary.host, dbName, cmd, tojson(clusterTime));
+const findInternalClusterTimeSecondaryThread =
+ new Thread(runFindFn, secondary.host, dbName, cmd, tojson(clusterTime));
+
+findInternalClusterTimePrimaryThread.start();
+findInternalClusterTimeSecondaryThread.start();
+
+curOpObj = {
+ "command.$_internalReadAtClusterTime": {$exists: true},
+ "command.find": {$exists: true},
+};
+
+assertOpHasPrepareConflict(testDB, curOpObj);
+assertOpHasPrepareConflict(testDBSecondary, curOpObj);
+
+cmd = {
+ find: collName,
+ readConcern: {
+ level: "snapshot",
+ atClusterTime: eval(clusterTime),
+ }
+};
+
+const findClusterTimePrimaryThread =
+ new Thread(runFindFn, primary.host, dbName, cmd, tojson(clusterTime));
+const findClusterTimeSecondaryThread =
+ new Thread(runFindFn, secondary.host, dbName, cmd, tojson(clusterTime));
+
+findClusterTimePrimaryThread.start();
+findClusterTimeSecondaryThread.start();
+
+curOpObj = {
+ "command.readConcern.atClusterTime": {$exists: true},
+ "command.find": {$exists: true},
+};
+
+assertOpHasPrepareConflict(testDB, curOpObj);
+assertOpHasPrepareConflict(testDBSecondary, curOpObj);
// Run a series of DDL operations which shouldn't block before committing the prepared
// transaction.
@@ -149,18 +215,30 @@ assert.commandWorked(
PrepareHelpers.commitTransaction(session, prepareTimestamp);
session.endSession();
-dbHashPrimaryThread.join();
-dbHashSecondaryThread.join();
+dbHashInternalClusterTimePrimaryThread.join();
+dbHashInternalClusterTimeSecondaryThread.join();
+
+dbHashClusterTimePrimaryThread.join();
+dbHashClusterTimeSecondaryThread.join();
// Ensure the dbHashes across the replica set match.
-const primaryDBHash = dbHashPrimaryThread.returnData();
-const secondaryDBHash = dbHashSecondaryThread.returnData();
+let primaryDBHash = dbHashInternalClusterTimePrimaryThread.returnData();
+let secondaryDBHash = dbHashInternalClusterTimeSecondaryThread.returnData();
assert.eq(primaryDBHash.collections, secondaryDBHash.collections);
assert.eq(primaryDBHash.md5, secondaryDBHash.md5);
-findPrimaryThread.join();
-findSecondaryThread.join();
+primaryDBHash = dbHashClusterTimePrimaryThread.returnData();
+secondaryDBHash = dbHashClusterTimeSecondaryThread.returnData();
+
+assert.eq(primaryDBHash.collections, secondaryDBHash.collections);
+assert.eq(primaryDBHash.md5, secondaryDBHash.md5);
+
+findInternalClusterTimePrimaryThread.join();
+findInternalClusterTimeSecondaryThread.join();
+
+findClusterTimePrimaryThread.join();
+findClusterTimeSecondaryThread.join();
rst.stopSet();
}());
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 2d686835f81..30ac4e43d5c 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -88,6 +88,22 @@ public:
return false;
}
+ ReadConcernSupportResult supportsReadConcern(const BSONObj& cmdObj,
+ repl::ReadConcernLevel level) const final {
+
+ static const Status kReadConcernNotSupported{ErrorCodes::InvalidOptions,
+ "read concern not supported"};
+ static const Status kDefaultReadConcernNotPermitted{ErrorCodes::InvalidOptions,
+ "default read concern not permitted"};
+ // The dbHash command only supports local and snapshot read concern. Additionally, snapshot
+ // read concern is only supported if test commands are enabled.
+ return {{level != repl::ReadConcernLevel::kLocalReadConcern &&
+ (!getTestCommandsEnabled() ||
+ level != repl::ReadConcernLevel::kSnapshotReadConcern),
+ kReadConcernNotSupported},
+ kDefaultReadConcernNotPermitted};
+ }
+
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) const {