summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/noPassthrough/readConcern_atClusterTime_noop_write.js')
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime_noop_write.js195
1 files changed, 96 insertions, 99 deletions
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
index 0a20621ed3e..c065ae258aa 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
@@ -3,107 +3,104 @@
// as an actual opTime on another shard.
// @tags: [requires_sharding, uses_transactions, uses_atclustertime]
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
+"use strict";
+load("jstests/replsets/rslib.js");
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+if (!assert.commandWorked(conn.getDB("test").serverStatus())
+ .storageEngine.supportsSnapshotReadConcern) {
+ MongoRunner.stopMongod(conn);
+ return;
+}
+MongoRunner.stopMongod(conn);
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+
+// Create database "test0" on shard 0.
+const testDB0 = st.s.getDB("test0");
+assert.commandWorked(testDB0.adminCommand({enableSharding: testDB0.getName()}));
+st.ensurePrimaryShard(testDB0.getName(), st.shard0.shardName);
+assert.commandWorked(testDB0.createCollection("coll0"));
+
+// Create a database "test1" on shard 1.
+const testDB1 = st.s.getDB("test1");
+assert.commandWorked(testDB1.adminCommand({enableSharding: testDB1.getName()}));
+st.ensurePrimaryShard(testDB1.getName(), st.shard1.shardName);
+assert.commandWorked(testDB1.createCollection("coll1"));
+
+const PropagationPreferenceOptions = Object.freeze({kShard: 0, kConfig: 1});
+
+let testNoopWrite = (fromDbName, fromColl, toRS, toDbName, toColl, propagationPreference) => {
+ const fromDBFromMongos = st.s.getDB(fromDbName);
+ const toDBFromMongos = st.s.getDB(toDbName);
+ const configFromMongos = st.s.getDB("config");
+
+ const oplog = toRS.getPrimary().getCollection("local.oplog.rs");
+ let findRes = oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
+ assert(!findRes);
+
+ // Perform a write on the fromDB and get its op time.
+ let res = assert.commandWorked(
+ fromDBFromMongos.runCommand({insert: fromColl, documents: [{_id: 0}]}));
+ assert(res.hasOwnProperty("operationTime"), tojson(res));
+ let clusterTime = res.operationTime;
+
+ // Propagate 'clusterTime' to toRS or the config server. This ensures that its next
+ // write will be at time >= 'clusterTime'. We cannot use toDBFromMongos to propagate
+ // 'clusterTime' to the config server, because mongos only routes to the config server
+ // for the 'config' and 'admin' databases.
+ if (propagationPreference == PropagationPreferenceOptions.kConfig) {
+ configFromMongos.coll1.find().itcount();
+ } else {
+ toDBFromMongos.toColl.find().itcount();
}
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- if (!assert.commandWorked(conn.getDB("test").serverStatus())
- .storageEngine.supportsSnapshotReadConcern) {
- MongoRunner.stopMongod(conn);
- return;
+ // Attempt a snapshot read at 'clusterTime' on toRS. Test that it performs a noop write
+ // to advance its lastApplied optime past 'clusterTime'. The snapshot read itself may
+ // fail if the noop write advances the node's majority commit point past 'clusterTime'
+ // and it releases that snapshot.
+ const toRSSession =
+ toRS.getPrimary().getDB(toDBFromMongos).getMongo().startSession({causalConsistency: false});
+
+ toRSSession.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+ res = toRSSession.getDatabase(toDBFromMongos).runCommand({find: toColl});
+ if (res.ok === 0) {
+ assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld);
+ assert.commandFailedWithCode(toRSSession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ } else {
+ assert.commandWorked(toRSSession.commitTransaction_forTesting());
}
- MongoRunner.stopMongod(conn);
- const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
-
- // Create database "test0" on shard 0.
- const testDB0 = st.s.getDB("test0");
- assert.commandWorked(testDB0.adminCommand({enableSharding: testDB0.getName()}));
- st.ensurePrimaryShard(testDB0.getName(), st.shard0.shardName);
- assert.commandWorked(testDB0.createCollection("coll0"));
-
- // Create a database "test1" on shard 1.
- const testDB1 = st.s.getDB("test1");
- assert.commandWorked(testDB1.adminCommand({enableSharding: testDB1.getName()}));
- st.ensurePrimaryShard(testDB1.getName(), st.shard1.shardName);
- assert.commandWorked(testDB1.createCollection("coll1"));
-
- const PropagationPreferenceOptions = Object.freeze({kShard: 0, kConfig: 1});
-
- let testNoopWrite = (fromDbName, fromColl, toRS, toDbName, toColl, propagationPreference) => {
- const fromDBFromMongos = st.s.getDB(fromDbName);
- const toDBFromMongos = st.s.getDB(toDbName);
- const configFromMongos = st.s.getDB("config");
-
- const oplog = toRS.getPrimary().getCollection("local.oplog.rs");
- let findRes =
- oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
- assert(!findRes);
-
- // Perform a write on the fromDB and get its op time.
- let res = assert.commandWorked(
- fromDBFromMongos.runCommand({insert: fromColl, documents: [{_id: 0}]}));
- assert(res.hasOwnProperty("operationTime"), tojson(res));
- let clusterTime = res.operationTime;
-
- // Propagate 'clusterTime' to toRS or the config server. This ensures that its next
- // write will be at time >= 'clusterTime'. We cannot use toDBFromMongos to propagate
- // 'clusterTime' to the config server, because mongos only routes to the config server
- // for the 'config' and 'admin' databases.
- if (propagationPreference == PropagationPreferenceOptions.kConfig) {
- configFromMongos.coll1.find().itcount();
- } else {
- toDBFromMongos.toColl.find().itcount();
- }
-
- // Attempt a snapshot read at 'clusterTime' on toRS. Test that it performs a noop write
- // to advance its lastApplied optime past 'clusterTime'. The snapshot read itself may
- // fail if the noop write advances the node's majority commit point past 'clusterTime'
- // and it releases that snapshot.
- const toRSSession = toRS.getPrimary().getDB(toDBFromMongos).getMongo().startSession({
- causalConsistency: false
- });
-
- toRSSession.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: clusterTime}});
- res = toRSSession.getDatabase(toDBFromMongos).runCommand({find: toColl});
- if (res.ok === 0) {
- assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld);
- assert.commandFailedWithCode(toRSSession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } else {
- assert.commandWorked(toRSSession.commitTransaction_forTesting());
- }
-
- const toRSOpTime = getLastOpTime(toRS.getPrimary()).ts;
-
- assert.gte(toRSOpTime, clusterTime);
-
- findRes = oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
- assert(findRes);
- };
-
- //
- // Test noop write. Read from the destination shard.
- //
-
- testNoopWrite("test0", "coll0", st.rs1, "test1", "coll1", PropagationPreferenceOptions.kShard);
-
- //
- // Test noop write. Read from the config server's primary.
- //
-
- testNoopWrite(
- "test0", "coll2", st.configRS, "test1", "coll3", PropagationPreferenceOptions.kConfig);
-
- st.stop();
+ const toRSOpTime = getLastOpTime(toRS.getPrimary()).ts;
+
+ assert.gte(toRSOpTime, clusterTime);
+
+ findRes = oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
+ assert(findRes);
+};
+
+//
+// Test noop write. Read from the destination shard.
+//
+
+testNoopWrite("test0", "coll0", st.rs1, "test1", "coll1", PropagationPreferenceOptions.kShard);
+
+//
+// Test noop write. Read from the config server's primary.
+//
+
+testNoopWrite(
+ "test0", "coll2", st.configRS, "test1", "coll3", PropagationPreferenceOptions.kConfig);
+
+st.stop();
}());