summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavi Vetriselvan <pavithra.vetriselvan@mongodb.com>2021-01-05 14:42:22 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-01-14 16:24:05 +0000
commit3a4dda6f0dc7f32e91310b9256cd3b499f7715d2 (patch)
tree5af626ca14786bf79c71fb487cf92861b9a1b7a6
parentaaa6470f8e380579e86db9d0d0f5e81faea7e2fa (diff)
downloadmongo-3a4dda6f0dc7f32e91310b9256cd3b499f7715d2.tar.gz
SERVER-53247 Remove EMRC=false targeted jstests/unittests
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_multiversion.yml1
-rw-r--r--etc/backports_required_for_multiversion_tests.yml2
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/downgrade_after_rollback_via_refetch.js81
-rw-r--r--jstests/noPassthrough/disable_lock_free_reads_server_parameter.js1
-rw-r--r--jstests/noPassthrough/disable_majority_reads_restart.js82
-rw-r--r--jstests/noPassthrough/non_transaction_snapshot_reads_without_majority_reads.js26
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime.js17
-rw-r--r--jstests/noPassthrough/wt_disable_majority_reads.js32
-rw-r--r--jstests/replsets/change_stream_speculative_majority.js84
-rw-r--r--jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js53
-rw-r--r--jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js104
-rw-r--r--jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js89
-rw-r--r--jstests/replsets/change_stream_speculative_majority_optimized_wait.js83
-rw-r--r--jstests/replsets/change_stream_speculative_majority_rollback.js103
-rw-r--r--jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js74
-rw-r--r--jstests/replsets/libs/rollback_test.js2
-rw-r--r--jstests/replsets/prepare_transaction_fails_without_majority_reads.js34
-rw-r--r--jstests/replsets/replsettest_stop_with_default_rwc.js11
-rw-r--r--jstests/replsets/rollback_after_disabling_majority_reads.js45
-rw-r--r--jstests/replsets/rollback_after_enabling_majority_reads.js116
-rw-r--r--jstests/replsets/rollback_crash_before_reaching_minvalid.js118
-rw-r--r--jstests/replsets/rollback_via_refetch_anomaly.js196
-rw-r--r--jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js72
-rw-r--r--jstests/replsets/rollback_via_refetch_update_rollback_id_before_oplog_truncation.js66
-rw-r--r--jstests/replsets/speculative_majority_find.js161
-rw-r--r--jstests/replsets/speculative_majority_supported_commands.js76
-rw-r--r--jstests/replsets/transactions_after_rollback_via_refetch.js122
-rw-r--r--jstests/replsets/unrecoverable_rollback_early_exit.js72
-rw-r--r--jstests/sharding/multi_shard_transaction_without_majority_reads.js39
-rw-r--r--jstests/sharding/set_default_rwc_before_stop_sharding_test.js8
-rw-r--r--jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js96
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp107
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp35
33 files changed, 15 insertions, 2193 deletions
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multiversion.yml b/buildscripts/resmokeconfig/suites/replica_sets_multiversion.yml
index 9df9fbdf159..750c3ceeb46 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multiversion.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multiversion.yml
@@ -15,7 +15,6 @@ selector:
- jstests/replsets/initial_sync_drop_collection.js
- jstests/replsets/apply_prepare_txn_write_conflict_robustness.js
- jstests/replsets/invalidate_sessions_on_stepdown.js
- - jstests/replsets/rollback_via_refetch_anomaly.js
- jstests/replsets/initial_sync_fails_unclean_restart.js
# This tag file can be created using ./buildscripts/evergreen_gen_multiversion_tests.py
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index 6d9d70eef66..f908a828e7a 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -24,8 +24,6 @@ all:
test_file: jstests/replsets/apply_prepare_txn_write_conflict_robustness.js
- ticket: SERVER-47645
test_file: jstests/replsets/invalidate_sessions_on_stepdown.js
- - ticket: SERVER-48518
- test_file: jstests/replsets/rollback_via_refetch_anomaly.js
- ticket: SERVER-47773
test_file: jstests/core/geo_near_tailable.js
- ticket: SERVER-47469
diff --git a/jstests/multiVersion/genericSetFCVUsage/downgrade_after_rollback_via_refetch.js b/jstests/multiVersion/genericSetFCVUsage/downgrade_after_rollback_via_refetch.js
deleted file mode 100644
index 12b9dd435f3..00000000000
--- a/jstests/multiVersion/genericSetFCVUsage/downgrade_after_rollback_via_refetch.js
+++ /dev/null
@@ -1,81 +0,0 @@
-// When enableMajorityReadConcern=false, a node transitions from ROLLBACK to RECOVERING with an
-// unstable checkpoint with appliedThrough set to the common point. Test that if the node crashes
-// and restarts with the downgraded version before its next stable checkpoint, then oplog entries
-// after the common point are replayed.
-(function() {
-"use strict";
-
-load("jstests/replsets/libs/rollback_test.js");
-
-TestData.rollbackShutdowns = true;
-TestData.allowUncleanShutdowns = true;
-let name = "downgrade_after_rollback_via_refetch";
-let dbName = "test";
-let sourceCollName = "coll";
-
-function testDowngrade(enableMajorityReadConcern, downgradeVersion) {
- jsTest.log("Test downgrade with enableMajorityReadConcern=" + enableMajorityReadConcern +
- " and downgradeVersion: " + downgradeVersion);
- const downgradeFCV = binVersionToFCV(downgradeVersion);
- // Set up Rollback Test.
- let replTest = new ReplSetTest(
- {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- replTest.initiateWithHighElectionTimeout(config);
- let rollbackTest = new RollbackTest(name, replTest);
-
- // Set the featureCompatibilityVersion to the downgraded version, so that we can downgrade
- // the rollback node.
- assert.commandWorked(
- rollbackTest.getPrimary().adminCommand({setFeatureCompatibilityVersion: downgradeFCV}));
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
-
- // Turn off stable checkpoints on the rollback node.
- assert.commandWorked(
- rollbackNode.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
-
- // Wait for a rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Replicate a new operation to the rollback node. Replication is disabled on the tiebreaker
- // node, so a successful majority write guarantees the write has replicated to the rollback
- // node.
- assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[sourceCollName].insert(
- {_id: 0}, {writeConcern: {w: "majority"}}));
- assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
-
- // SERVER-47219: The following unclean shutdown followed by a restart into downgradeVersion is
- // not a legal downgrade scenario. However, this illegal downgrade is only prevented when a
- // change across versions requires it. There exists a patch for this test in v4.4 when illegal
- // downgrades are prevented. The patch for that case however requires demonstrating the illegal
- // downgrade is prevented as expected. Applying that here results in a hang. The testing
- // infrastructure for running mongod processes in sufficiently complex scenarios, cannot express
- // both expecting a startup to fail with an error as well as failing immediately if startup
- // succeeds.
- //
- // If this test starts failing on the restart below due to an illegal downgrade, forward-porting
- // the v4.4 patch for SERVER-47219 should be the first thing to try.
- //
- // Kill the rollback node and restart it on the downgraded version.
- rollbackTest.restartNode(
- 0, 9, {binVersion: downgradeVersion, enableMajorityReadConcern: enableMajorityReadConcern});
- replTest.awaitSecondaryNodes();
-
- // The rollback node should replay the new operation.
- rollbackNode = rollbackTest.getSecondary();
- assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
-
- rollbackTest.stop();
-}
-
-testDowngrade("true", "last-lts");
-testDowngrade("false", "last-lts");
-testDowngrade("true", "last-continuous");
-testDowngrade("false", "last-continuous");
-})();
diff --git a/jstests/noPassthrough/disable_lock_free_reads_server_parameter.js b/jstests/noPassthrough/disable_lock_free_reads_server_parameter.js
index 2be59d36013..4e13866a0c1 100644
--- a/jstests/noPassthrough/disable_lock_free_reads_server_parameter.js
+++ b/jstests/noPassthrough/disable_lock_free_reads_server_parameter.js
@@ -101,6 +101,7 @@ assert(conn);
runReadAgainstLock(conn.host, dbName, collName, false);
MongoRunner.stopMongod(conn);
+// TODO SERVER-53247: Remove this test case once emrc defaults to true.
jsTest.log(
"Starting server with featureFlagLockFreeReads=true and enableMajorityReadConcern=false: " +
"this should override the setting to false.");
diff --git a/jstests/noPassthrough/disable_majority_reads_restart.js b/jstests/noPassthrough/disable_majority_reads_restart.js
deleted file mode 100644
index 0d21f0d07f3..00000000000
--- a/jstests/noPassthrough/disable_majority_reads_restart.js
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Tests restarting mongod with 'enableMajorityReadConcern' varying between true and false.
- *
- * @tags: [requires_persistence, requires_replication, requires_majority_read_concern,
- * requires_wiredtiger]
- */
-(function() {
-"use strict";
-
-const dbName = "test";
-const collName = "coll";
-
-const rst = new ReplSetTest({nodes: 1});
-rst.startSet();
-rst.initiate();
-
-// Insert a document and ensure it is in the stable checkpoint by restarting.
-let coll = rst.getPrimary().getDB(dbName)[collName];
-assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-rst.stopSet(undefined, true);
-rst.startSet(undefined, true);
-
-// Disable snapshotting on all members of the replica set so that further operations do not
-// enter the majority snapshot.
-assert.commandWorked(
- rst.getPrimary().adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
-
-// Insert a document that will not be in a stable checkpoint.
-coll = rst.getPrimary().getDB(dbName)[collName];
-assert.commandWorked(coll.insert({_id: 1}));
-
-// Restart the node with enableMajorityReadConcern:false.
-rst.stopSet(undefined, true);
-rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
-
-// Both inserts should be reflected in the data and the oplog.
-coll = rst.getPrimary().getDB(dbName)[collName];
-assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
-let oplog = rst.getPrimary().getDB("local").oplog.rs;
-assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
-assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
-
-// Restart the node with enableMajorityReadConcern:false without adding any documents.
-rst.stopSet(undefined, true);
-rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
-
-// Both inserts should still be reflected in the data and the oplog.
-coll = rst.getPrimary().getDB(dbName)[collName];
-assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
-oplog = rst.getPrimary().getDB("local").oplog.rs;
-assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
-assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
-
-// Insert another document.
-assert.commandWorked(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
-
-// Restart the node with enableMajorityReadConcern:false.
-rst.stopSet(undefined, true);
-rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
-
-// All three inserts should be reflected in the data and the oplog.
-coll = rst.getPrimary().getDB(dbName)[collName];
-assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
-oplog = rst.getPrimary().getDB("local").oplog.rs;
-assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
-assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
-assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
-
-// Restart the node with enableMajorityReadConcern:true.
-rst.stopSet(undefined, true);
-rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
-
-// All three inserts should still be reflected in the data and the oplog.
-coll = rst.getPrimary().getDB(dbName)[collName];
-assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
-oplog = rst.getPrimary().getDB("local").oplog.rs;
-assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
-assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
-assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
-
-rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/non_transaction_snapshot_reads_without_majority_reads.js b/jstests/noPassthrough/non_transaction_snapshot_reads_without_majority_reads.js
deleted file mode 100644
index f0e45c16a37..00000000000
--- a/jstests/noPassthrough/non_transaction_snapshot_reads_without_majority_reads.js
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Tests readConcern level snapshot outside of transactions is not supported when
- * enableMajorityReadConcern is false.
- *
- * @tags: [
- * requires_fcv_47,
- * requires_persistence,
- * requires_replication,
- * ]
- */
-(function() {
-"use strict";
-
-const replSet = new ReplSetTest({nodes: [{"enableMajorityReadConcern": "false"}]});
-
-replSet.startSet();
-replSet.initiate();
-
-const primary = replSet.getPrimary();
-
-// Tests that snapshot reads return error code ReadConcernMajorityNotEnabled.
-assert.commandFailedWithCode(
- primary.getDB('test').runCommand({find: "foo", readConcern: {level: "snapshot"}}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
-
-replSet.stopSet();
-})();
diff --git a/jstests/noPassthrough/readConcern_atClusterTime.js b/jstests/noPassthrough/readConcern_atClusterTime.js
index b9a772a1162..b57f6830bf0 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime.js
@@ -155,21 +155,4 @@ rst.stopSet();
session.endSession();
rst.stopSet();
}
-
-// readConcern with 'atClusterTime' is not allowed when enableMajorityReadConcern=false.
-{
- let rst = new ReplSetTest({nodes: [{"enableMajorityReadConcern": "false"}]});
- rst.startSet();
- rst.initiate();
- let session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- let sessionDb = session.getDatabase(dbName);
- session.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- session.endSession();
- rst.stopSet();
-}
}());
diff --git a/jstests/noPassthrough/wt_disable_majority_reads.js b/jstests/noPassthrough/wt_disable_majority_reads.js
deleted file mode 100644
index 65cba8a8588..00000000000
--- a/jstests/noPassthrough/wt_disable_majority_reads.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// @tags: [requires_wiredtiger, requires_replication]
-(function() {
-"use strict";
-
-var rst = new ReplSetTest({
- nodes: [
- {"enableMajorityReadConcern": ""},
- {"enableMajorityReadConcern": "false"},
- {"enableMajorityReadConcern": "true"}
- ]
-});
-rst.startSet();
-rst.initiate();
-rst.awaitSecondaryNodes();
-
-rst.getPrimary().getDB("test").getCollection("test").insert({});
-rst.awaitReplication();
-
-// Node 0 is using the default, which is `enableMajorityReadConcern: true`. Thus a majority
-// read should succeed.
-assert.commandWorked(
- rst.nodes[0].getDB("test").runCommand({"find": "test", "readConcern": {"level": "majority"}}));
-// Node 1 disables majority reads. Check for the appropriate error code.
-assert.commandFailedWithCode(
- rst.nodes[1].getDB("test").runCommand({"find": "test", "readConcern": {"level": "majority"}}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
-// Same as Node 0.
-assert.commandWorked(
- rst.nodes[2].getDB("test").runCommand({"find": "test", "readConcern": {"level": "majority"}}));
-
-rst.stopSet();
-})();
diff --git a/jstests/replsets/change_stream_speculative_majority.js b/jstests/replsets/change_stream_speculative_majority.js
deleted file mode 100644
index fb37968184e..00000000000
--- a/jstests/replsets/change_stream_speculative_majority.js
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Test basic, steady-state replication change stream functionality with speculative majority reads.
- *
- * @tags: [uses_speculative_majority]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
-const name = "change_stream_speculative_majority";
-const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
-});
-replTest.startSet();
-replTest.initiate();
-
-const dbName = name;
-const collName = "coll";
-
-let primary = replTest.getPrimary();
-let secondary = replTest.getSecondary();
-let primaryDB = primary.getDB(dbName);
-let primaryColl = primaryDB[collName];
-
-// Open a change stream.
-let res = primaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}, maxTimeMS: 5000});
-assert.commandWorked(res);
-let cursorId = res.cursor.id;
-
-// Insert a document on primary and let it majority commit.
-assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
-// Receive the first change event.
-res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
-let changes = res.cursor.nextBatch;
-assert.eq(changes.length, 1);
-assert.eq(changes[0]["fullDocument"], {_id: 1});
-assert.eq(changes[0]["operationType"], "insert");
-
-// Save the resume token.
-let resumeToken = changes[0]["_id"];
-
-// This query should time out waiting for new results and return an empty batch.
-res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
-assert.eq(res.cursor.nextBatch, []);
-
-// Pause replication on the secondary so that writes won't majority commit.
-stopServerReplication(secondary);
-
-// Do a new write on primary.
-assert.commandWorked(primaryColl.insert({_id: 2}));
-
-// The change stream query should time out waiting for the new result to majority commit.
-res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
-assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
-// An aggregate trying to resume a stream that includes the change should also time out.
-res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {},
- maxTimeMS: 5000
-});
-assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
-// Resume the stream after restarting replication. We should now be able to see the new event.
-restartServerReplication(secondary);
-replTest.awaitReplication();
-
-// Re-open the stream, and receive the new event.
-res = primaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
-assert.commandWorked(res);
-changes = res.cursor.firstBatch;
-assert.eq(changes.length, 1);
-assert.eq(changes[0]["fullDocument"], {_id: 2});
-assert.eq(changes[0]["operationType"], "insert");
-
-replTest.stopSet();
-})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js b/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js
deleted file mode 100644
index 8b1e9682403..00000000000
--- a/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Make sure that a speculative majority change stream read on a secondary does not trigger an
- * invariant when there are conflicting catalog changes on the collection.
- *
- * Regression test for SERVER-40706.
- *
- * @tags: [uses_speculative_majority]
- */
-(function() {
-"use strict";
-
-const replTest = new ReplSetTest({
- name: "replset",
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
-});
-replTest.startSet();
-replTest.initiate();
-
-const dbName = "test";
-const collName = "coll";
-
-let primary = replTest.getPrimary();
-let secondary = replTest.getSecondary();
-let primaryDB = primary.getDB(dbName);
-let primaryColl = primaryDB[collName];
-let secondaryDB = secondary.getDB(dbName);
-
-// Insert some documents on the primary that we can index.
-var bulk = primaryColl.initializeUnorderedBulkOp();
-for (var i = 0; i < 1000; i++) {
- let doc = {};
- bulk.insert({a: i, b: i, c: i, d: i, e: i});
-}
-assert.commandWorked(bulk.execute());
-
-// Start several index builds on the primary. This should make it likely that index builds are
-// in progress on the secondary while doing reads below.
-primaryColl.createIndex({a: 1});
-primaryColl.createIndex({b: 1});
-primaryColl.createIndex({c: 1});
-primaryColl.createIndex({d: 1});
-primaryColl.createIndex({e: 1});
-
-// Do a bunch of change stream reads against the secondary. We are not worried about the
-// responses, since we are only verifying that the server doesn't crash.
-for (var i = 0; i < 20; i++) {
- assert.commandWorked(
- secondaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
-}
-
-replTest.stopSet();
-})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js b/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js
deleted file mode 100644
index 16c27968f04..00000000000
--- a/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Test speculative majority change stream reads against a primary when the replication system's
- * 'lastApplied' optime lags behind the timestamp of the newest oplog entry visible in the storage
- * layer. Ensure that we do not return uncommitted data in this case.
- *
- * @tags: [uses_speculative_majority]
- */
-(function() {
-"use strict";
-
-load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
-load("jstests/libs/fail_point_util.js");
-load("jstests/libs/parallelTester.js"); // for Thread.
-
-const name = "change_stream_speculative_majority_lastApplied_lag";
-const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
-});
-replTest.startSet();
-replTest.initiate();
-
-const dbName = name;
-const collName = "coll";
-
-const primary = replTest.getPrimary();
-const primaryDB = primary.getDB(dbName);
-const primaryColl = primaryDB[collName];
-
-// Do a few operations on the primary and let them both majority commit. Later on we will
-// receive both of these operations in a change stream.
-let res = assert.commandWorked(
- primaryColl.runCommand("insert", {documents: [{_id: 1, v: 0}], writeConcern: {w: "majority"}}));
-assert.commandWorked(primaryColl.update({_id: 1}, {$set: {v: 1}}, {writeConcern: {w: "majority"}}));
-
-// Save this operation time so we can start a change stream from here.
-let startOperTime = res.operationTime;
-
-// Make the primary hang after it has completed a write but before it has advanced lastApplied
-// for that write.
-let failPoint = configureFailPoint(primaryDB, "hangBeforeLogOpAdvancesLastApplied");
-
-// Function which will be used by the background thread to perform an update on the specified
-// host, database, and collection.
-function doUpdate(host, dbName, collName, query, update) {
- let hostDB = (new Mongo(host)).getDB(dbName);
- assert.commandWorked(hostDB[collName].update(query, update));
-}
-
-// Do a document update on primary, but don't wait for it to majority commit. The write should
-// hang due to the enabled failpoint.
-jsTestLog("Starting update on primary.");
-var primaryWrite = new Thread(doUpdate, primary.host, dbName, collName, {_id: 1}, {$set: {v: 2}});
-primaryWrite.start();
-
-// Wait for the fail point to be hit. By the time the primary hits this fail point, the update
-// should be visible. 'lastApplied', however, has not yet been advanced yet. We check both the
-// document state and the logs to make sure we hit the failpoint for the correct operation.
-assert.soon(() => (primaryColl.findOne({_id: 1}).v === 2));
-failPoint.wait();
-
-// Open a change stream on the primary. The stream should only return the initial insert and the
-// first of the two update events, since the second update is not yet majority-committed.
-// Despite the fact that the effects of the latter update are already visible to local readers,
-// speculative majority will read at min(lastApplied, allCommitted), and so change stream's
-// 'fullDocument' lookup should also *not* return the second update's uncommitted changes.
-jsTestLog("Opening a change stream on the primary.");
-const cst = new ChangeStreamTest(primaryDB);
-let cursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {startAtOperationTime: startOperTime, fullDocument: "updateLookup"}}],
- collection: collName
-});
-
-cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, v: 0},
- ns: {db: dbName, coll: collName},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, v: 1},
- ns: {db: dbName, coll: collName},
- updateDescription: {removedFields: [], updatedFields: {v: 1}, truncatedArrays: []},
- operationType: "update",
- }
- ]
-});
-
-// Make sure the cursor does not return any more change events.
-cursor = cst.getNextBatch(cursor);
-assert.eq(cursor.nextBatch.length, 0);
-
-// Disable the failpoint to let the test complete.
-failPoint.off();
-
-primaryWrite.join();
-replTest.stopSet();
-})();
diff --git a/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js b/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js
deleted file mode 100644
index c70054f756e..00000000000
--- a/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Test that change streams using speculative majority wait for the latest observed oplog timestamp
- * to majority commit.
- *
- * If a change stream query returns a batch containing oplog entries no newer than timestamp T, the
- * server may still report a high-water-mark postBatchResumeToken representing the latest majority
- * committed oplog timestamp that it observed while scanning the oplog, which may be greater than T.
- * A mongoS will use this PBRT as a guarantee that no new change events will occur at a lesser
- * timestamp. This guarantee is only valid if the timestamp is actually majority committed, so we
- * need to make sure that guarantee holds, even when using speculative majority.
- *
- * @tags: [uses_speculative_majority]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
-const name = "change_stream_speculative_majority_latest_oplog_timestamp";
-const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
-});
-replTest.startSet();
-replTest.initiate();
-
-const dbName = name;
-const collName = "coll";
-const otherCollName = "coll_other";
-
-const primary = replTest.getPrimary();
-const secondary = replTest.getSecondary();
-
-const primaryDB = primary.getDB(dbName);
-const primaryColl = primaryDB[collName];
-
-assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
-let res = primaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}, maxTimeMS: 5000});
-
-assert.commandWorked(res);
-let cursorId = res.cursor.id;
-
-// Insert a document on primary and let it majority commit.
-assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
-// Pause replication on the secondary so that further writes won't majority commit.
-jsTestLog("Stopping replication to secondary.");
-stopServerReplication(secondary);
-
-// Receive the first change event.
-res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
-let changes = res.cursor.nextBatch;
-assert.eq(changes.length, 1);
-assert.eq(changes[0]["fullDocument"], {_id: 1});
-assert.eq(changes[0]["operationType"], "insert");
-
-// Extract the postBatchResumeToken from the first batch.
-const initialPostBatchResumeToken = res.cursor.postBatchResumeToken;
-assert.neq(initialPostBatchResumeToken, undefined);
-
-// Do a write on a collection that we are not watching changes for.
-let otherWriteRes = primaryDB.runCommand({insert: otherCollName, documents: [{_id: 1}]});
-let otherWriteOpTime = otherWriteRes.operationTime;
-
-// Replication to the secondary is paused, so the write to 'otherCollName' cannot majority
-// commit. A change stream getMore is expected to return the "latest oplog timestamp" which it
-// scanned and this timestamp must be majority committed. So, this getMore should time out
-// waiting for the previous write to majority commit, even though it's on a collection that is
-// not being watched.
-res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
-assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
-jsTestLog("Restarting replication to secondary.");
-restartServerReplication(secondary);
-replTest.awaitReplication();
-
-// Now that writes can replicate again, the previous operation should have majority committed,
-// making it safe to advance the postBatchResumeToken. Note that no further events are returned,
-// indicating that the new PBRT is a high water mark generated at the latest oplog timestamp.
-res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
-assert.commandWorked(res);
-assert.eq(res.cursor.nextBatch, []);
-assert.gt(bsonWoCompare(res.cursor.postBatchResumeToken, initialPostBatchResumeToken), 0);
-
-replTest.stopSet();
-})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_optimized_wait.js b/jstests/replsets/change_stream_speculative_majority_optimized_wait.js
deleted file mode 100644
index 65bd4599722..00000000000
--- a/jstests/replsets/change_stream_speculative_majority_optimized_wait.js
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Verify that speculative majority change stream oplog reads only wait on the latest scanned oplog
- * optime, as opposed to the newest system-wide applied optime. This is an optimization to reduce
- * unnecessary waiting on the server.
- *
- * @tags: [uses_speculative_majority]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
-const name = "change_stream_speculative_majority";
-const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
-});
-replTest.startSet();
-replTest.initiate();
-
-const dbName = name;
-const collName = "coll";
-
-let primary = replTest.getPrimary();
-let secondary = replTest.getSecondary();
-let primaryDB = primary.getDB(dbName);
-let primaryColl = primaryDB[collName];
-
-// Receive 1 change to get an initial resume token.
-let res = assert.commandWorked(
- primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
-let cursorId = res.cursor.id;
-assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
-assert.eq(res.cursor.nextBatch.length, 1);
-let resumeToken = res.cursor.nextBatch[0]["_id"];
-
-// Open a change stream.
-res = assert.commandWorked(
- primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
-cursorId = res.cursor.id;
-
-// Insert documents to fill one batch and let them majority commit.
-let batchSize = 2;
-assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-assert.commandWorked(primaryColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
-
-// Pause replication on the secondary so that writes won't majority commit.
-stopServerReplication(secondary);
-
-// Do write on primary that won't majority commit but will advance the last applied optime.
-assert.commandWorked(primaryColl.insert({_id: 3}));
-
-// Receive one batch of change events. We should be able to read only the majority committed
-// change events and no further in order to generate this batch.
-res = assert.commandWorked(primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, batchSize: batchSize}));
-let changes = res.cursor.nextBatch;
-assert.eq(changes.length, 2);
-assert.eq(changes[0]["fullDocument"], {_id: 1});
-assert.eq(changes[0]["operationType"], "insert");
-assert.eq(changes[1]["fullDocument"], {_id: 2});
-assert.eq(changes[1]["operationType"], "insert");
-
-// Make sure that 'aggregate' commands also utilize the optimization.
-res = assert.commandWorked(primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {batchSize: batchSize}
-}));
-changes = res.cursor.firstBatch;
-assert.eq(changes.length, 2);
-assert.eq(changes[0]["fullDocument"], {_id: 1});
-assert.eq(changes[0]["operationType"], "insert");
-assert.eq(changes[1]["fullDocument"], {_id: 2});
-assert.eq(changes[1]["operationType"], "insert");
-
-// Let the test finish.
-restartServerReplication(secondary);
-
-replTest.stopSet();
-})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_rollback.js b/jstests/replsets/change_stream_speculative_majority_rollback.js
deleted file mode 100644
index e53b65ade88..00000000000
--- a/jstests/replsets/change_stream_speculative_majority_rollback.js
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Test change stream behavior with speculative majority reads in the face of replication rollback.
- *
- * @tags: [uses_speculative_majority]
- */
-(function() {
-'use strict';
-
-load("jstests/replsets/libs/rollback_test.js"); // for RollbackTest.
-
-// Disable implicit sessions so it's easy to run commands from different threads.
-TestData.disableImplicitSessions = true;
-
-const name = "change_stream_speculative_majority_rollback";
-const dbName = name;
-const collName = "coll";
-
-// Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so we
-// will utilize speculative majority reads for change streams.
-const replTest = new ReplSetTest({
- name,
- nodes: 3,
- useBridge: true,
- settings: {chainingAllowed: false},
- nodeOptions: {enableMajorityReadConcern: "false"}
-});
-replTest.startSet();
-let config = replTest.getReplSetConfig();
-config.members[2].priority = 0;
-replTest.initiateWithHighElectionTimeout(config);
-
-const rollbackTest = new RollbackTest(name, replTest);
-const primary = rollbackTest.getPrimary();
-const primaryDB = primary.getDB(dbName);
-let coll = primaryDB[collName];
-
-// Create a collection.
-assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
-// Open a change stream on the initial primary.
-let res = primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}});
-assert.commandWorked(res);
-let cursorId = res.cursor.id;
-
-// Receive an initial change event and save the resume token.
-assert.commandWorked(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-res = primaryDB.runCommand({getMore: cursorId, collection: collName});
-let changes = res.cursor.nextBatch;
-assert.eq(changes.length, 1);
-assert.eq(changes[0]["fullDocument"], {_id: 1});
-assert.eq(changes[0]["operationType"], "insert");
-let resumeToken = changes[0]["_id"];
-
-let rollbackNode = rollbackTest.transitionToRollbackOperations();
-assert.eq(rollbackNode, primary);
-
-// Insert a few items that will be rolled back.
-assert.commandWorked(coll.insert({_id: 2}));
-assert.commandWorked(coll.insert({_id: 3}));
-assert.commandWorked(coll.insert({_id: 4}));
-
-let getChangeEvent = new Thread(function(host, cursorId, dbName, collName) {
- jsTestLog("Trying to receive change event from divergent primary.");
- const nodeDB = new Mongo(host).getDB(dbName);
- try {
- return nodeDB.runCommand({getMore: eval(cursorId), collection: collName});
- } catch (e) {
- return isNetworkError(e);
- }
-}, rollbackNode.host, tojson(cursorId), dbName, collName);
-getChangeEvent.start();
-
-// Make sure the change stream query started.
-assert.soon(() => primaryDB.currentOp({"command.getMore": cursorId}).inprog.length === 1);
-
-// Do some operations on the new primary that we can receive in a resumed stream.
-let syncSource = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
-coll = syncSource.getDB(dbName)[collName];
-assert.commandWorked(coll.insert({_id: 5}));
-assert.commandWorked(coll.insert({_id: 6}));
-assert.commandWorked(coll.insert({_id: 7}));
-
-// Let rollback begin and complete.
-rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-rollbackTest.transitionToSteadyStateOperations();
-
-// The change stream query should have failed when the node entered rollback.
-assert(getChangeEvent.returnData());
-
-jsTestLog("Resuming change stream against new primary.");
-res = syncSource.getDB(dbName).runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
-changes = res.cursor.firstBatch;
-assert.eq(changes.length, 3);
-assert.eq(changes[0]["fullDocument"], {_id: 5});
-assert.eq(changes[0]["operationType"], "insert");
-assert.eq(changes[1]["fullDocument"], {_id: 6});
-assert.eq(changes[1]["operationType"], "insert");
-assert.eq(changes[2]["fullDocument"], {_id: 7});
-assert.eq(changes[2]["operationType"], "insert");
-
-rollbackTest.stop();
-})();
diff --git a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js b/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
deleted file mode 100644
index ff583286519..00000000000
--- a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Test speculative majority change stream reads against a secondary while it is applying an oplog
- * batch. Speculative majority change stream reads on secondaries should read from the lastApplied
- * timestamp.
- *
- * @tags: [uses_speculative_majority]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
-const name = "speculative_majority_secondary";
-const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
-});
-replTest.startSet();
-replTest.initiate();
-
-const dbName = name;
-const collName = "coll";
-
-let primary = replTest.getPrimary();
-let secondary = replTest.getSecondary();
-let primaryDB = primary.getDB(dbName);
-let primaryColl = primaryDB[collName];
-let secondaryDB = secondary.getDB(dbName);
-
-// Do a couple writes on primary and save the first operation time, so we can start the
-// secondary change stream from this point.
-let res = assert.commandWorked(primaryColl.runCommand("insert", {documents: [{_id: 0}]}));
-let startTime = res.operationTime;
-assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 0}}));
-replTest.awaitLastOpCommitted();
-
-// Make the secondary pause after it has written a batch of entries to the oplog but before it
-// has applied them.
-assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "alwaysOn"}));
-
-// Pause replication so that the secondary will sync and apply the set of writes from the
-// primary in a single batch.
-stopServerReplication(secondary);
-
-jsTestLog("Do some writes on the primary.");
-assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 1}}));
-assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 2}}));
-assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 3}}));
-
-// Restart server replication on secondary and wait for the failpoint to be hit.
-jsTestLog("Restarting server replication on secondary.");
-restartServerReplication(secondary);
-checkLog.contains(secondary, "pauseBatchApplicationAfterWritingOplogEntries fail point enabled");
-
-// Open a change stream on the secondary.
-res = assert.commandWorked(secondaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {startAtOperationTime: startTime}}],
- cursor: {}
-}));
-
-// We should not expect to see any of the ops currently being applied in the secondary batch.
-let changes = res.cursor.firstBatch;
-assert.eq(changes.length, 2);
-assert.eq(changes[0].fullDocument, {_id: 0});
-assert.eq(changes[1].updateDescription.updatedFields, {v: 0});
-
-// Turn off the failpoint and let the test complete.
-assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "off"}));
-replTest.stopSet();
-})(); \ No newline at end of file
diff --git a/jstests/replsets/libs/rollback_test.js b/jstests/replsets/libs/rollback_test.js
index d32f8792ade..60be4f27ce8 100644
--- a/jstests/replsets/libs/rollback_test.js
+++ b/jstests/replsets/libs/rollback_test.js
@@ -533,6 +533,8 @@ function RollbackTest(name = "RollbackTest", replSet) {
lastRBID = assert.commandWorked(curSecondary.adminCommand("replSetGetRBID")).rbid;
+ // TODO SERVER-53247: Once we remove support for emrc=false in the server, remove this
+ // check.
const isMajorityReadConcernEnabledOnRollbackNode =
assert.commandWorked(curSecondary.adminCommand({serverStatus: 1}))
.storageEngine.supportsCommittedReads;
diff --git a/jstests/replsets/prepare_transaction_fails_without_majority_reads.js b/jstests/replsets/prepare_transaction_fails_without_majority_reads.js
deleted file mode 100644
index f13c4f141e4..00000000000
--- a/jstests/replsets/prepare_transaction_fails_without_majority_reads.js
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Tests that the 'prepareTransaction' command fails against a replica set primary that has
- * 'enableMajorityReadConcern' disabled.
- *
- * @tags: [uses_transactions, uses_prepare_transaction]
- */
-
-(function() {
-"use strict";
-
-const rst = new ReplSetTest({nodes: 1, nodeOptions: {enableMajorityReadConcern: "false"}});
-rst.startSet();
-rst.initiate();
-
-const dbName = "test";
-const collName = "prepare_transaction_fails_without_majority_reads";
-
-const primary = rst.getPrimary();
-const testDB = primary.getDB(dbName);
-
-assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
-const session = primary.startSession({causalConsistency: false});
-const sessionDB = session.getDatabase(dbName);
-const sessionColl = sessionDB.getCollection(collName);
-
-session.startTransaction();
-assert.commandWorked(sessionColl.insert({_id: 42}));
-
-assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
-
-rst.stopSet();
-})();
diff --git a/jstests/replsets/replsettest_stop_with_default_rwc.js b/jstests/replsets/replsettest_stop_with_default_rwc.js
index ccf72d0f8ac..2e5289b80ac 100644
--- a/jstests/replsets/replsettest_stop_with_default_rwc.js
+++ b/jstests/replsets/replsettest_stop_with_default_rwc.js
@@ -8,13 +8,14 @@
const name = jsTestName();
// We need to have at least 2 nodes to run the data consistency checks.
-const rst =
- new ReplSetTest({name: name, nodes: 2, nodeOptions: {enableMajorityReadConcern: "false"}});
+const rst = new ReplSetTest({name: name, nodes: 2});
rst.startSet();
rst.initiate();
-// Deliberately set an unsatisfiable default read and write concern so any operations run in the
-// shutdown hooks will fail if they inherit either.
+// Deliberately set a write concern and read concern that are different from the default w:1 and
+// local values.
+// The write concern is unsatisfiable, so any operations run in the shutdown hooks will fail if
+// they inherit it.
assert.commandWorked(rst.getPrimary().adminCommand({
setDefaultRWConcern: 1,
defaultWriteConcern: {w: 42},
@@ -22,6 +23,6 @@ assert.commandWorked(rst.getPrimary().adminCommand({
}));
// It should always be possible to successfully stop the replset (including running consistency
-// checks) even when the default read and write concerns are unsatisfiable.
+// checks) even when the default write concern is unsatisfiable.
rst.stopSet();
})();
diff --git a/jstests/replsets/rollback_after_disabling_majority_reads.js b/jstests/replsets/rollback_after_disabling_majority_reads.js
deleted file mode 100644
index f1154068ee5..00000000000
--- a/jstests/replsets/rollback_after_disabling_majority_reads.js
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * This test demonstrates that a rollback immediately after disabling majority reads succeeds.
- * @tags: [requires_persistence]
- */
-(function() {
-"use strict";
-
-load("jstests/replsets/libs/rollback_test.js");
-
-TestData.rollbackShutdowns = true;
-const name = "rollback_after_disabling_majority_reads";
-const dbName = "test";
-const collName = "coll";
-
-jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=true");
-const replTest = new ReplSetTest(
- {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "true"}});
-replTest.startSet();
-let config = replTest.getReplSetConfig();
-config.members[2].priority = 0;
-config.settings = {
- chainingAllowed: false
-};
-replTest.initiateWithHighElectionTimeout(config);
-const rollbackTest = new RollbackTest(name, replTest);
-
-const rollbackNode = rollbackTest.transitionToRollbackOperations();
-assert.commandWorked(
- rollbackNode.getDB(dbName).runCommand({insert: collName, documents: [{_id: "rollback op"}]}));
-
-jsTest.log("Restart the rollback node with enableMajorityReadConcern=false");
-rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"});
-
-rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
-rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-rollbackTest.transitionToSteadyStateOperations();
-
-assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
- {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
-
-assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
-assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
-
-rollbackTest.stop();
-}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_after_enabling_majority_reads.js b/jstests/replsets/rollback_after_enabling_majority_reads.js
deleted file mode 100644
index 1a3ed71a133..00000000000
--- a/jstests/replsets/rollback_after_enabling_majority_reads.js
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * This test documents the behavior that rolling back immediately after upgrading
- * enableMajorityReadConcern to true can fassert. If this happens, the user can restart the server
- * with enableMajorityReadConcern=false to complete the rollback, then upgrade again to
- * enableMajorityReadConcern=true.
- * Rollback after restarting with enableMajorityReadConcern=true succeeds if the common point is at
- * least the stable timestamp, i.e. we do not attempt to roll back operations that were included in
- * @tags: [
- * requires_persistence,
- * live_record_incompatible,
- * ]
- */
-(function() {
-"use strict";
-
-load("jstests/replsets/libs/rollback_test.js");
-load("jstests/libs/write_concern_util.js");
-
-TestData.rollbackShutdowns = true;
-const name = "rollback_after_enabling_majority_reads";
-const dbName = "test";
-const collName = "coll";
-
-jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=false");
-const replTest = new ReplSetTest(
- {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
-replTest.startSet();
-let config = replTest.getReplSetConfig();
-config.members[2].priority = 0;
-config.settings = {
- chainingAllowed: false
-};
-replTest.initiateWithHighElectionTimeout(config);
-let rollbackTest = new RollbackTest(name, replTest);
-
-jsTest.log("Ensure the stable timestamp is ahead of the common point on the rollback node.");
-const rollbackNode = rollbackTest.transitionToRollbackOperations();
-const operationTime = assert
- .commandWorked(rollbackNode.getDB(dbName).runCommand(
- {insert: collName, documents: [{_id: "rollback op"}]}))
- .operationTime;
-
-// Do a clean shutdown to ensure the recovery timestamp is at operationTime.
-jsTest.log("Restart the rollback node with enableMajorityReadConcern=true");
-rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
-const replSetGetStatusResponse =
- assert.commandWorked(rollbackNode.adminCommand({replSetGetStatus: 1}));
-assert.eq(replSetGetStatusResponse.lastStableRecoveryTimestamp,
- operationTime,
- tojson(replSetGetStatusResponse));
-
-// The rollback crashes because the common point is before the stable timestamp.
-jsTest.log("Attempt to roll back. This will fassert.");
-rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
-
-// The first rollback attempt with EMRC=true will fassert, so we expect the actual rollback to occur
-// with EMRC=false. Before the second rollback (via refetch) occurs, we must ensure that the sync
-// source's lastApplied is greater than the rollback node's. Otherwise, the rollback node will never
-// transition to SECONDARY since the rollback node's lastApplied will be less than the
-// initialDataTS. See SERVER-48518 for a more detailed explanation of this behavior.
-rollbackTest.awaitPrimaryAppliedSurpassesRollbackApplied();
-
-rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-assert.soon(() => {
- return rawMongoProgramOutput().search(/Fatal assertion.+51121/) != -1;
-});
-
-jsTest.log(
- "Restart the rollback node with enableMajorityReadConcern=false. Now the rollback can succeed.");
-const allowedExitCode = 14;
-rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"}, allowedExitCode);
-
-// Ensure that the secondary has completed rollback by waiting for its last optime to equal the
-// primary's.
-rollbackTest.awaitReplication();
-
-// Fix counts for "local.startup_log", since they are corrupted by this rollback.
-// transitionToSteadyStateOperations() checks collection counts.
-assert.commandWorked(rollbackNode.getDB("local").runCommand({validate: "startup_log"}));
-rollbackTest.transitionToSteadyStateOperations();
-
-assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
- {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
-
-assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
-assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
-
-jsTest.log("Restart the rollback node with enableMajorityReadConcern=true.");
-rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
-
-// Make sure node 0 is the primary.
-let node = replTest.nodes[0];
-jsTestLog("Waiting for node " + node.host + " to become primary.");
-replTest.awaitNodesAgreeOnPrimary();
-// Wait until the node has finished starting up before running replSetStepUp.
-replTest.awaitSecondaryNodes(ReplSetTest.kDefaultTimeoutMS, [node]);
-assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
-replTest.waitForState(node, ReplSetTest.State.PRIMARY);
-assert.eq(replTest.getPrimary(), node, node.host + " was not primary after step up.");
-
-// Restart replication on the tiebreaker node before constructing a new RollbackTest.
-restartServerReplication(replTest.nodes[2]);
-
-// Create a new RollbackTest fixture to execute the final rollback. This will guarantee that the
-// final rollback occurs on the current primary, which should be node 0.
-jsTestLog("Creating a new RollbackTest fixture to execute a final rollback.");
-rollbackTest = new RollbackTest(name, replTest);
-
-jsTest.log("Rollback should succeed since the common point is at least the stable timestamp.");
-rollbackTest.transitionToRollbackOperations();
-rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
-rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-rollbackTest.transitionToSteadyStateOperations();
-
-rollbackTest.stop();
-}());
diff --git a/jstests/replsets/rollback_crash_before_reaching_minvalid.js b/jstests/replsets/rollback_crash_before_reaching_minvalid.js
deleted file mode 100644
index 0a3ff90cacf..00000000000
--- a/jstests/replsets/rollback_crash_before_reaching_minvalid.js
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Test that a node does not take a stable checkpoint at a timestamp earlier than minValid after
- * crashing post rollbackViaRefetch. This test exercises that behavior when run with
- * enableMajorityReadConcern:false.
- *
- * @tags: [requires_persistence]
- */
-(function() {
-"use strict";
-
-load("jstests/replsets/libs/rollback_test.js");
-load("jstests/libs/fail_point_util.js");
-
-TestData.rollbackShutdowns = true;
-TestData.allowUncleanShutdowns = true;
-
-let dbName = "test";
-let sourceCollName = "coll";
-
-let doc1 = {_id: 1, x: "document_of_interest"};
-
-let CommonOps = (node) => {
- // Insert a document that will exist on all nodes.
- assert.commandWorked(node.getDB(dbName)[sourceCollName].insert(doc1));
-};
-
-let SyncSourceOps = (node) => {
- // Insert some documents on the sync source so the rollback node will have a minValid it needs
- // to catch up to.
- assert.commandWorked(node.getDB(dbName)[sourceCollName].insert({x: 1, sync_source: 1}));
- assert.commandWorked(node.getDB(dbName)[sourceCollName].insert({x: 2, sync_source: 1}));
- assert.commandWorked(node.getDB(dbName)[sourceCollName].insert({x: 3, sync_source: 1}));
-};
-
-let RollbackOps = (node) => {
- // Delete the document on the rollback node so it will be refetched from sync source.
- assert.commandWorked(node.getDB(dbName)[sourceCollName].remove(doc1));
-};
-
-const replTest = new ReplSetTest({nodes: 3, useBridge: true});
-replTest.startSet();
-// Speed up the test.
-replTest.nodes.forEach(node => {
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'setSmallOplogGetMoreMaxTimeMS', mode: 'alwaysOn'}));
-});
-let config = replTest.getReplSetConfig();
-config.members[2].priority = 0;
-config.settings = {
- chainingAllowed: false
-};
-replTest.initiateWithHighElectionTimeout(config);
-let rollbackTest = new RollbackTest("rollback_crash_before_reaching_minvalid", replTest);
-CommonOps(rollbackTest.getPrimary());
-
-let rollbackNode = rollbackTest.transitionToRollbackOperations();
-
-// Have the node hang after rollback has completed but before it starts applying ops again.
-rollbackNode.adminCommand({configureFailPoint: 'bgSyncHangAfterRunRollback', mode: 'alwaysOn'});
-RollbackOps(rollbackNode);
-
-let node = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
-SyncSourceOps(node);
-
-// Let the rollback run.
-rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
-jsTestLog("Waiting for the rollback node to hit the failpoint.");
-checkLog.contains(rollbackNode, "bgSyncHangAfterRunRollback failpoint is set");
-
-// Kill the rollback node before it has reached minValid. Sending a shutdown signal to the node
-// should cause us to break out of the hung failpoint, so we don't need to explicitly turn the
-// failpoint off.
-jsTestLog("Killing the rollback node.");
-replTest.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}, {forRestart: true});
-replTest.start(
- 0,
- {
- setParameter: {
- // Pause oplog fetching so the node doesn't advance past minValid after restart.
- "failpoint.stopReplProducer": "{'mode':'alwaysOn'}"
- }
- },
- true /* restart */);
-
-// Wait long enough for the initial stable checkpoint to be triggered if it was going to be. We
-// expect that no stable checkpoints are taken. If they are, we expect the test to fail when we
-// restart below and recover from a stable checkpoint.
-//
-// First we wait until the node has a commit point, since learning of one should trigger an update
-// to the stable timestamp. Then, we wait for a bit after this for any potential checkpoint to
-// occur. In the worst case, if the checkpoint was very slow to complete, we might produce a false
-// negative test result (the test would pass even though a bug existed), but we consider this
-// acceptable if it happens rarely.
-assert.soonNoExcept(() => {
- let status = replTest.nodes[0].adminCommand({replSetGetStatus: 1});
- return status.optimes.lastCommittedOpTime.ts !== Timestamp(0, 0);
-});
-sleep(5000);
-
-// Kill and restart the node to test that we don't recover from an inconsistent stable checkpoint
-// taken above.
-replTest.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}, {forRestart: true});
-replTest.start(
- 0,
- {
- setParameter: {
- // Make sure this failpoint is not still enabled in the saved startup options.
- "failpoint.stopReplProducer": "{'mode':'off'}"
- }
- },
- true /* restart */);
-
-rollbackTest.transitionToSteadyStateOperations();
-
-// Check the replica set.
-rollbackTest.stop();
-}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_via_refetch_anomaly.js b/jstests/replsets/rollback_via_refetch_anomaly.js
deleted file mode 100644
index d155dbc31bd..00000000000
--- a/jstests/replsets/rollback_via_refetch_anomaly.js
+++ /dev/null
@@ -1,196 +0,0 @@
-/**
- * Rolled-back documents must not be visible when querying a recovered secondary.
- *
- * This test attempts to reproduce SERVER-48518. In the following description, 'T' is the tiebreaker
- * node, 'P1' and 'P2' are primaries in terms 1 and 2.
- *
- * - Begin in RollbackTest.kSteadyStateOps with primary P1, all nodes connected:
- *
- * T
- * / \
- * P1 - P2
- * primary secondary
- *
- * - Insert _id 0 into P1 at timestamp 1.
- * - Transition to kRollbackOps by disconnecting P1 from P2:
- *
- * T
- * /
- * P1 P2
- * primary secondary
- *
- * - Insert _id 1 into P1 at timestamp 2.
- *
- * TS 1 TS 2
- * insert 0 insert 1
- * P1 --------------->
- * P2 --->
- *
- * - Isolate P1 from T, connect P2 to T:
- *
- * T
- * \
- * P1 P2
- * primary new primary
- *
- * (Same as RollbackTest.transitionToSyncSourceOperationsBeforeRollback(), except do *not* trigger a
- * stepdown on P1.)
- *
- * - Step up P2, which writes a no-op to its oplog at timestamp 3.
- *
- * TS 1 TS 2
- * insert 0 insert 1
- *
- * P1 --------------->
- * P2 ----*
- * \
- * *-------------------------->
- * no-op
- * TS 3
- *
- * - Delete _id 0 and 1 from P1 at timestamp 4.
- *
- * TS 1 TS 2 TS 4
- * insert 0 insert 1 delete 0 and 1
- *
- * P1 --------------------------------------------------------------->
- * P2 ----*
- * \
- * *-------------------------->
- * no-op
- * TS 3
- *
- * - Reconnect P1 to P2 so it rolls back.
- *
- * T
- * \
- * P1 - P2
- * rollback new primary
- *
- * Rollback via refetch undoes the delete of _id 0 by reinserting _id 0 in P1 with an
- * untimestamped write. (It can't undo the delete of _id 1, since P2 doesn't have _id 1.)
- *
- * Before we fixed SERVER-48518, P1 served queries at lastApplied = top of P2's oplog = TS 3,
- * which includes _id 0, _id 1, and _id 0 again (it was reinserted with an untimestamped write).
- * To fix SERVER-48518, P1 won't transition from ROLLBACK until its lastApplied >= max(P1's oplog
- * top, P2's oplog top) = TS 4.
- *
- * - Write to P2 so it advances >= timestamp 4 and satisfies P1's conditions to finish rollback.
- * - Wait for P1 to finish rollback and transition to SECONDARY.
- * - Query P1 and check that rolled-back records aren't visible.
- *
- * To end the test, RollbackTest.transitionToSteadyStateOperations won't work, we've diverged from
- * the state it expects, so we end the test manually. Reconnect P1 to T, enable replication on T,
- * and stop the replica set.
- *
- * T
- * / \
- * P1 - P2
- * secondary new primary
- *
- * @tags: [
- * requires_wiredtiger
- * ]
- */
-
-(function() {
-"use strict";
-
-load("jstests/libs/write_concern_util.js");
-load("jstests/replsets/libs/rollback_test.js");
-
-const rst = new ReplSetTest({
- nodes: 3,
- useBridge: true,
- nodeOptions: {
- enableMajorityReadConcern: "false",
- setParameter: {logComponentVerbosity: tojsononeline({replication: 2})}
- }
-});
-
-rst.startSet();
-const config = rst.getReplSetConfig();
-config.members[2].priority = 0;
-config.settings = {
- chainingAllowed: false
-};
-rst.initiateWithHighElectionTimeout(config);
-
-const rollbackTest = new RollbackTest(jsTestName(), rst);
-const P1 = rollbackTest.getPrimary();
-const P2 = rollbackTest.getSecondary();
-const T = rollbackTest.getTieBreaker();
-
-jsTestLog(`Node P1: ${P1.host}, P2: ${P2.host}, T: ${T.host}`);
-
-let testDB = P1.getDB(jsTestName());
-const testColl = testDB.getCollection("test");
-
-let reply = assert.commandWorked(testColl.insert({_id: 0}, {"writeConcern": {"w": "majority"}}));
-jsTestLog(`Inserted _id 0 into P1 ${tojson(reply.operationTime)}`);
-
-rollbackTest.transitionToRollbackOperations();
-reply = assert.commandWorked(testColl.insert({_id: 1}, {"writeConcern": {"w": 1}}));
-jsTestLog(`Inserted _id 1 into P1 ${tojson(reply.operationTime)}`);
-
-jsTestLog("Isolating P1 from tiebreaker");
-P1.disconnect([T]);
-
-jsTestLog("Reconnecting P2 to the tiebreaker");
-P2.reconnect([T]);
-
-jsTestLog("Step up P2");
-assert.soonNoExcept(() => {
- const res = P2.adminCommand({replSetStepUp: 1});
- return res.ok;
-}, "Failed to step up P2", ReplSetTest.kDefaultTimeoutMS);
-// "Transition to primary complete; database writes are now permitted".
-checkLog.containsJson(P2, 21331);
-jsTestLog("P2 stepped up");
-
-// Write to P1 to ensure TS 4 (P1's delete timestamp) > TS 3 (P2's step-up timestamp).
-assert.soon(() => {
- testDB.runCommand({insert: "otherCollection", documents: [{}]});
- function lastApplied(node) {
- const reply = assert.commandWorked(node.adminCommand({replSetGetStatus: 1}));
- return reply.optimes.appliedOpTime.ts;
- }
- const P1applied = lastApplied(P1);
- const P2applied = lastApplied(P2);
- jsTestLog(`P1 lastApplied ${tojson(P1applied)}, P2 lastApplied ${tojson(P2applied)}`);
- return timestampCmp(P1applied, P2applied) > 0;
-}, "P1's lastApplied never surpassed P2's");
-
-reply = assert.commandWorked(testDB.runCommand({delete: "test", deletes: [{q: {}, limit: 0}]}));
-jsTestLog(`Deleted from P1 at ${tojson(reply.operationTime)}.` +
- ` Reconnecting P1 to P2, so P1 rolls back.`);
-
-P1.reconnect([P2]);
-// "Rollback using the 'rollbackViaRefetch' method".
-checkLog.containsJson(P1, 21103);
-// "Finding the Common Point".
-checkLog.containsJson(P1, 21682);
-// "We cannot transition to SECONDARY state because our 'lastApplied' optime is less than the
-// initial data timestamp and enableMajorityReadConcern = false".
-checkLog.containsJson(P1, 4851800);
-
-reply = assert.commandWorked(
- P2.getDB(jsTestName()).runCommand({insert: "anotherCollection", documents: [{}]}));
-jsTestLog(`Inserted into P2 at ${tojson(reply.operationTime)}`);
-
-jsTestLog("Wait for P1 to enter SECONDARY");
-waitForState(P1, ReplSetTest.State.SECONDARY);
-
-// Both counts should be 1. If SERVER-48518 isn't fixed then itCount() = 3: _ids 0, 1, and 0 again!
-jsTestLog("Check collection count");
-let itCount = testColl.find().itcount();
-let fastCount = testColl.count();
-assert.eq(itCount,
- fastCount,
- `count: ${fastCount}, itCount: ${itCount}, data: ${tojson(testColl.find().toArray())}`);
-
-jsTestLog("Check succeeded. Ending test.");
-P1.reconnect([T]);
-restartServerReplication(T);
-rst.stopSet();
-}());
diff --git a/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js b/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js
deleted file mode 100644
index e9bd8876672..00000000000
--- a/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Test that rollbackViaRefetch survives an attempt to drop a collection that does not exist.
- * This test simulates a scenario where a collection is dropped during the first rollback
- * attempt.
- *
- * We use a failpoint to ensure the collection was dropped before forcing rollback to return
- * early with a recoverable error. We then turn this failpoint off so that the second attempt
- * can succeed even though the collection has already been dropped.
- *
- */
-
-(function() {
-"use strict";
-load("jstests/replsets/libs/rollback_test.js");
-
-const dbName = "test";
-const collName = "rollback_via_refetch_survives_nonexistent_collection_drop";
-
-// Provide RollbackTest with custom ReplSetTest so we can set enableMajorityReadConcern.
-const rst = new ReplSetTest(
- {name: collName, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
-
-rst.startSet();
-const config = rst.getReplSetConfig();
-config.members[2].priority = 0;
-config.settings = {
- chainingAllowed: false
-};
-rst.initiateWithHighElectionTimeout(config);
-
-const rollbackTest = new RollbackTest(collName, rst);
-
-// Stop replication from the current primary, the rollback node.
-const rollbackNode = rollbackTest.transitionToRollbackOperations();
-const rollbackDB = rollbackNode.getDB(dbName);
-
-jsTestLog("Turning on the rollbackExitEarlyAfterCollectionDrop fail point");
-assert.commandWorked(rollbackDB.adminCommand(
- {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'alwaysOn'}));
-
-// Create a collection on the rollback node.
-assert.commandWorked(rollbackDB.runCommand({create: collName}));
-
-// Step down the current primary and elect the node that does not have the collection.
-rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
-
-jsTestLog("Attempting to roll back.");
-// Make the old primary rollback against the new primary. This attempt should fail because the
-// rollbackExitEarlyAfterCollectionDrop fail point is set. We fail with a recoverable error
-// so that the rollback will be retried.
-rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
-// Make sure we exit the rollback early by checking for the correct log messages.
-checkLog.contains(rollbackDB.getMongo(),
- "rollbackExitEarlyAfterCollectionDrop fail point enabled.");
-
-jsTestLog("Turning off the rollbackExitEarlyAfterCollectionDrop fail point");
-// A rollback attempt after turning off the fail point should succeed even if we already
-// dropped the collection.
-assert.commandWorked(rollbackDB.adminCommand(
- {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'off'}));
-
-// After a successful rollback attempt, we should have seen the following log message to ensure
-// that we tried to drop a non-existent collection and continued without acquiring a database
-// lock. This check has to be before transitionToSteadyStateOperations() to make sure ram logs
-// are not overwritten due to oplog fether retry error.
-checkLog.containsJson(rollbackDB.getMongo(), 21696); // This collection does not exist
-
-rollbackTest.transitionToSteadyStateOperations();
-
-rollbackTest.stop();
-}());
diff --git a/jstests/replsets/rollback_via_refetch_update_rollback_id_before_oplog_truncation.js b/jstests/replsets/rollback_via_refetch_update_rollback_id_before_oplog_truncation.js
deleted file mode 100644
index 0effc3cc1f5..00000000000
--- a/jstests/replsets/rollback_via_refetch_update_rollback_id_before_oplog_truncation.js
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * This test demonstrates that rollback via refetch always increments the rollback id as soon as it
- * resolves the common point and before proceeding with other operations.
- *
- * This is a regression test that makes sure we avoid the scenario where we truncate our oplog (at
- * which point the rollback is effectively finished), then shut down uncleanly before we get a
- * chance to update the rollbackId.
- *
- * @tags: [requires_journaling]
- */
-
-(function() {
-"use strict";
-load("jstests/libs/fail_point_util.js");
-load("jstests/replsets/libs/rollback_test.js");
-load("jstests/replsets/rslib.js");
-
-const name = jsTestName();
-TestData.allowUncleanShutdowns = true;
-
-jsTest.log("Set up a RollbackTest with enableMajorityReadConcern=false");
-const rst = new ReplSetTest({
- name,
- nodes: [{}, {}, {rsConfig: {priority: 0}}],
- useBridge: true,
- nodeOptions: {enableMajorityReadConcern: "false"},
- settings: {chainingAllowed: false}
-});
-
-rst.startSet();
-rst.initiateWithHighElectionTimeout();
-
-const rollbackTest = new RollbackTest(name, rst);
-const rollbackNode = rollbackTest.transitionToRollbackOperations();
-
-const baseRBID = assert.commandWorked(rollbackNode.adminCommand("replSetGetRBID")).rbid;
-
-rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
-
-jsTestLog("Make rollback-via-refetch exit early after truncating the oplog");
-assert.commandWorked(rollbackNode.adminCommand(
- {configureFailPoint: "rollbackExitEarlyAfterCollectionDrop", mode: "alwaysOn"}));
-
-rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
-jsTestLog("Wait until we hit the failpoint");
-assert.soonNoExcept(function() {
- assert.commandWorked(rollbackNode.adminCommand({
- waitForFailPoint: "rollbackExitEarlyAfterCollectionDrop",
- timesEntered: 1,
- maxTimeMS: kDefaultWaitForFailPointTimeout
- }));
- return true;
-}, "failed to reconnect for waitForFailPoint");
-
-// Check that the RBID has still managed to advance.
-// Looking at the RBID directly is our first line of defense.
-assert.eq(baseRBID + 1, assert.commandWorked(rollbackNode.adminCommand("replSetGetRBID")).rbid);
-
-assert.commandWorked(rollbackNode.adminCommand(
- {configureFailPoint: "rollbackExitEarlyAfterCollectionDrop", mode: "off"}));
-
-// Verify that the node can rejoin the set as normal.
-rollbackTest.transitionToSteadyStateOperations();
-rollbackTest.stop();
-}()); \ No newline at end of file
diff --git a/jstests/replsets/speculative_majority_find.js b/jstests/replsets/speculative_majority_find.js
deleted file mode 100644
index 6f040d920d9..00000000000
--- a/jstests/replsets/speculative_majority_find.js
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Test speculative majority reads using the 'find' command.
- *
- * Speculative majority reads allow the server to provide "majority" read guarantees without storage
- * engine support for reading from a historical snapshot. Instead of reading historical, majority
- * committed data, we just read the newest data available on a node, and then, before returning to a
- * client, block until we know the data has become majority committed. Currently this is an internal
- * feature used only by change streams.
- *
- * @tags: [uses_speculative_majority]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-load("jstests/libs/parallelTester.js"); // for Thread.
-
-let name = "speculative_majority_find";
-let replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {
- enableMajorityReadConcern: 'false',
- // Increase log verbosity so we can see all commands that run on the server.
- setParameter: {logComponentVerbosity: tojson({command: 2})}
- }
-});
-replTest.startSet();
-replTest.initiate();
-
-let dbName = name;
-let collName = "coll";
-
-let primary = replTest.getPrimary();
-let secondary = replTest.getSecondary();
-
-let primaryDB = primary.getDB(dbName);
-let secondaryDB = secondary.getDB(dbName);
-let primaryColl = primaryDB[collName];
-// Create a collection.
-assert.commandWorked(primaryColl.insert({}, {writeConcern: {w: "majority"}}));
-
-//
-// Test basic reads with speculative majority.
-//
-
-// Pause replication on the secondary so that writes won't majority commit.
-stopServerReplication(secondary);
-assert.commandWorked(primaryColl.insert({_id: 1}));
-
-jsTestLog("Do a speculative majority read that should time out.");
-let res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority"},
- filter: {_id: 1},
- allowSpeculativeMajorityRead: true,
- maxTimeMS: 5000
-});
-assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
-restartServerReplication(secondary);
-replTest.awaitLastOpCommitted();
-
-jsTestLog("Do a speculative majority read that should succeed.");
-res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority"},
- filter: {_id: 1},
- allowSpeculativeMajorityRead: true
-});
-assert.commandWorked(res);
-assert.eq(res.cursor.firstBatch.length, 1);
-assert.eq(res.cursor.firstBatch[0], {_id: 1});
-
-//
-// Test that blocked reads can succeed when a write majority commits.
-//
-
-// Pause replication on the secondary so that writes won't majority commit.
-stopServerReplication(secondary);
-assert.commandWorked(primaryColl.insert({_id: 2}));
-
-jsTestLog("Do a speculative majority that should block until write commits.");
-let speculativeRead = new Thread(function(host, dbName, collName) {
- const nodeDB = new Mongo(host).getDB(dbName);
- return nodeDB.runCommand({
- find: collName,
- readConcern: {level: "majority"},
- filter: {_id: 2},
- allowSpeculativeMajorityRead: true
- });
-}, primary.host, dbName, collName);
-speculativeRead.start();
-
-// Wait for the read to start on the server.
-assert.soon(() => primaryDB.currentOp({ns: primaryColl.getFullName(), "command.find": collName})
- .inprog.length === 1);
-
-// Let the previous write commit.
-restartServerReplication(secondary);
-assert.commandWorked(
- primaryColl.insert({_id: "commit_last_write"}, {writeConcern: {w: "majority"}}));
-
-// Make sure the read finished and returned correct results.
-speculativeRead.join();
-res = speculativeRead.returnData();
-assert.commandWorked(res);
-assert.eq(res.cursor.firstBatch.length, 1);
-assert.eq(res.cursor.firstBatch[0], {_id: 2});
-
-//
-// Test 'afterClusterTime' reads with speculative majority.
-//
-stopServerReplication(secondary);
-
-// Insert a document on the primary and record the response.
-let writeRes = primaryDB.runCommand({insert: collName, documents: [{_id: 3}]});
-assert.commandWorked(writeRes);
-
-jsTestLog(
- "Do a speculative majority read on primary with 'afterClusterTime' that should time out.");
-res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
- filter: {_id: 3},
- $clusterTime: writeRes.$clusterTime,
- allowSpeculativeMajorityRead: true,
- maxTimeMS: 5000
-});
-assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
-jsTestLog(
- "Do a speculative majority read on secondary with 'afterClusterTime' that should time out.");
-res = secondaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
- filter: {_id: 3},
- $clusterTime: writeRes.$clusterTime,
- allowSpeculativeMajorityRead: true,
- maxTimeMS: 5000
-});
-assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
-// Let the previous write majority commit.
-restartServerReplication(secondary);
-replTest.awaitLastOpCommitted();
-
-jsTestLog("Do a speculative majority read with 'afterClusterTime' that should succeed.");
-res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
- filter: {_id: 3},
- $clusterTime: res.$clusterTime,
- allowSpeculativeMajorityRead: true
-});
-assert.commandWorked(res);
-assert.eq(res.cursor.firstBatch.length, 1);
-assert.eq(res.cursor.firstBatch[0], {_id: 3});
-
-replTest.stopSet();
-})();
diff --git a/jstests/replsets/speculative_majority_supported_commands.js b/jstests/replsets/speculative_majority_supported_commands.js
deleted file mode 100644
index 7c1bbdb2434..00000000000
--- a/jstests/replsets/speculative_majority_supported_commands.js
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Verify that speculative majority is only allowed on supported commands.
- *
- * Currently, only change stream aggregation commands and the 'find' command with the
- * 'allowSpeculativeMajorityRead' flag are permitted.
- *
- * @tags: [uses_speculative_majority]
- */
-(function() {
-"use strict";
-
-let name = "speculative_majority_supported_commands";
-let replTest =
- new ReplSetTest({name: name, nodes: 1, nodeOptions: {enableMajorityReadConcern: 'false'}});
-replTest.startSet();
-replTest.initiate();
-
-let dbName = name;
-let collName = "coll";
-
-let primary = replTest.getPrimary();
-let primaryDB = primary.getDB(dbName);
-
-// Create a collection.
-assert.commandWorked(primaryDB[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
-/**
- * Allowed commands.
- */
-
-// Change stream aggregation is allowed.
-let res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {}}],
- cursor: {},
- readConcern: {level: "majority"}
-});
-assert.commandWorked(res);
-
-// Find query with speculative flag is allowed.
-res = primaryDB.runCommand(
- {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
-assert.commandWorked(res);
-
-/**
- * Disallowed commands.
- */
-
-// A non change stream aggregation is not allowed.
-res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$project: {}}],
- cursor: {},
- readConcern: {level: "majority"}
-});
-assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
-
-// The 'find' command without requisite flag is unsupported.
-res = primaryDB.runCommand({find: collName, readConcern: {level: "majority"}});
-assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
-
-res = primaryDB.runCommand(
- {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: false});
-assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
-
-// Another basic read command. We don't exhaustively check all commands.
-res = primaryDB.runCommand({count: collName, readConcern: {level: "majority"}});
-assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
-
-// Speculative flag is only allowed on find commands.
-res = primaryDB.runCommand(
- {count: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
-assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
-
-replTest.stopSet();
-})(); \ No newline at end of file
diff --git a/jstests/replsets/transactions_after_rollback_via_refetch.js b/jstests/replsets/transactions_after_rollback_via_refetch.js
deleted file mode 100644
index 80ef4a8ded9..00000000000
--- a/jstests/replsets/transactions_after_rollback_via_refetch.js
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Basic test that transactions are able to run against a node immediately after it has executed a
- * refetch based rollback of a few basic CRUD and DDL ops. Local writes done during the rollback
- * process are not timestamped, so we want to ensure that transactions can be started against a
- * valid snapshot post-rollback and read data correctly.
- *
- * @tags: [uses_transactions]
- */
-(function() {
-'use strict';
-
-load("jstests/replsets/libs/rollback_test.js");
-
-let name = "transactions_after_rollback_via_refetch";
-let dbName = name;
-let crudCollName = "crudColl";
-let collToDropName = "collToDrop";
-
-let CommonOps = (node) => {
- // Insert a couple of documents that will initially be present on all nodes.
- let crudColl = node.getDB(dbName)[crudCollName];
- assert.commandWorked(crudColl.insert({_id: 0}));
- assert.commandWorked(crudColl.insert({_id: 1}));
-
- // Create a collection so it can be dropped on the rollback node.
- node.getDB(dbName)[collToDropName].insert({_id: 0});
-};
-
-// We want to have the rollback node perform some inserts, updates, and deletes locally
-// during the rollback process, so we can ensure that transactions will read correct data
-// post-rollback, even though these writes will be un-timestamped.
-let RollbackOps = (node) => {
- let crudColl = node.getDB(dbName)[crudCollName];
- // Roll back an update (causes refetch and local update).
- assert.commandWorked(crudColl.update({_id: 0}, {$set: {rollbackNode: 0}}));
- // Roll back a delete (causes refetch and local insert).
- assert.commandWorked(crudColl.remove({_id: 1}));
- // Roll back an insert (causes local delete).
- assert.commandWorked(crudColl.insert({_id: 2}));
-
- // Roll back a drop (re-creates the collection).
- node.getDB(dbName)[collToDropName].drop();
-};
-
-let SyncSourceOps = (node) => {
- let coll = node.getDB(dbName)[crudCollName];
- // Update these docs so the rollback node will refetch them.
- assert.commandWorked(coll.update({_id: 0}, {$set: {syncSource: 0}}));
- assert.commandWorked(coll.update({_id: 1}, {$set: {syncSource: 1}}));
-};
-
-// Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so that
-// they will use the "rollbackViaRefetch" algorithm.
-let replTest = new ReplSetTest({
- name,
- nodes: 3,
- useBridge: true,
- settings: {chainingAllowed: false},
- nodeOptions: {enableMajorityReadConcern: "false"}
-});
-replTest.startSet();
-let config = replTest.getReplSetConfig();
-config.members[2].priority = 0;
-replTest.initiateWithHighElectionTimeout(config);
-
-let rollbackTest = new RollbackTest(name, replTest);
-
-CommonOps(rollbackTest.getPrimary());
-
-let rollbackNode = rollbackTest.transitionToRollbackOperations();
-RollbackOps(rollbackNode);
-
-let syncSourceNode = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
-SyncSourceOps(syncSourceNode);
-
-// Wait for rollback to finish.
-rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-rollbackTest.transitionToSteadyStateOperations();
-
-// Make the rollback node primary so we can run transactions against it.
-rollbackTest.getTestFixture().stepUp(rollbackNode);
-
-jsTestLog("Testing transactions against the node that just rolled back.");
-const sessionOptions = {
- causalConsistency: false
-};
-let session = rollbackNode.getDB(dbName).getMongo().startSession(sessionOptions);
-let sessionDb = session.getDatabase(dbName);
-let sessionColl = sessionDb[crudCollName];
-
-// Make sure we can do basic CRUD ops inside a transaction and read the data back correctly, pre
-// and post-commit.
-session.startTransaction();
-// Make sure we read from the snapshot correctly.
-assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
- [{_id: 0, syncSource: 0}, {_id: 1, syncSource: 1}]);
-// Do some basic ops.
-assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
-assert.commandWorked(sessionColl.remove({_id: 1}));
-assert.commandWorked(sessionColl.insert({_id: 2}));
-// Make sure we read the updated data correctly.
-assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
- [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
-assert.commandWorked(session.commitTransaction_forTesting());
-
-// Make sure data is visible after commit.
-assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
- [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
-
-// Run a transaction that touches the collection that was re-created during rollback.
-sessionColl = sessionDb[collToDropName];
-session.startTransaction();
-assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0}]);
-assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
-assert.commandWorked(session.commitTransaction_forTesting());
-
-// Make sure data is visible after commit.
-assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0, inTxn: 1}]);
-
-// Check the replica set.
-rollbackTest.stop();
-}());
diff --git a/jstests/replsets/unrecoverable_rollback_early_exit.js b/jstests/replsets/unrecoverable_rollback_early_exit.js
deleted file mode 100644
index c709820eaa6..00000000000
--- a/jstests/replsets/unrecoverable_rollback_early_exit.js
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * This test exercises an unrecoverable rollback using rollback_test.js, expecting it to terminate
- * cleanly early instead of failing.
- * An unrecoverable rollback can happen with EMRC:false, as it is possible for rollback via refetch
- * to set a minValid based on oplog entries that the sync source may have failed to recover after
- * an unclean shutdown. The rollback node will need to sync and apply oplog entries up to minValid
- * to be consistent, but if those oplog entries no longer exist, then it will be stuck in sync
- * source selection and unable to complete recovery.
- * This test reproduces this scenario in a simpler form by modifying the minValid on the rollback
- * node very far forward, so that we do not have to simulate anything happening to the sync source.
- */
-
-(function() {
-"use strict";
-
-load("jstests/libs/fail_point_util.js");
-load("jstests/replsets/libs/rollback_test.js");
-
-function tsToDate(ts) {
- return new Date(ts.getTime() * 1000);
-}
-
-const testName = jsTestName();
-
-const rst = new ReplSetTest({
- name: testName,
- nodes: [{}, {}, {rsConfig: {priority: 0}}],
- useBridge: true,
- settings: {chainingAllowed: false},
- nodeOptions: {enableMajorityReadConcern: "false"}
-});
-rst.startSet();
-rst.initiateWithHighElectionTimeout();
-
-const rollbackTest = new RollbackTest(testName, rst);
-const rollbackNode = rollbackTest.transitionToRollbackOperations();
-
-rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
-
-const failpoint = configureFailPoint(rollbackNode, "rollbackHangBeforeFinish");
-
-rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
-assert.soonNoExcept(function() {
- // Rollback will cause the node to close connections.
- failpoint.wait();
- return true;
-});
-
-const farFutureTS = new Timestamp(
- Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days*/), 0);
-
-jsTest.log("future TS: " + tojson(farFutureTS) + ", date:" + tsToDate(farFutureTS));
-
-const mMinvalid = rollbackNode.getDB("local").getCollection("replset.minvalid");
-
-const minValidUpdate = {
- $set: {ts: farFutureTS}
-};
-jsTestLog("Current minValid is " + tojson(mMinvalid.findOne()));
-jsTestLog("Updating minValid to: " + tojson(minValidUpdate));
-printjson(assert.commandWorked(mMinvalid.update({}, minValidUpdate)));
-
-failpoint.off();
-
-rollbackTest.setAwaitSecondaryNodesForRollbackTimeout(5 * 1000);
-
-// We will detect an unrecoverable rollback here.
-rollbackTest.transitionToSteadyStateOperations();
-
-rollbackTest.stop();
-})(); \ No newline at end of file
diff --git a/jstests/sharding/multi_shard_transaction_without_majority_reads.js b/jstests/sharding/multi_shard_transaction_without_majority_reads.js
deleted file mode 100644
index e694f5d4c7a..00000000000
--- a/jstests/sharding/multi_shard_transaction_without_majority_reads.js
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Test that multi-shard transactions will fail with a non-transient error when run against shards
- * with disabled majority read concern.
- *
- * @tags: [uses_transactions]
- */
-
-(function() {
-'use strict';
-
-const st = new ShardingTest({shards: 2, rs: {nodes: 1, enableMajorityReadConcern: 'false'}});
-
-assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
-st.ensurePrimaryShard('TestDB', st.shard0.shardName);
-assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
-
-const coll = st.s0.getDB('TestDB').TestColl;
-assert.commandWorked(coll.insert({_id: -1, x: 0}));
-assert.commandWorked(coll.insert({_id: 1, x: 0}));
-assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: 1}}));
-assert.commandWorked(
- st.s0.adminCommand({moveChunk: 'TestDB.TestColl', find: {_id: 1}, to: st.shard1.shardName}));
-
-assert.commandWorked(coll.update({_id: -1}, {$inc: {x: 1}}));
-assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}}));
-
-const session = st.s0.startSession();
-const sessionColl = session.getDatabase('TestDB').TestColl;
-
-session.startTransaction();
-
-assert.commandWorked(sessionColl.update({_id: -1}, {$inc: {x: 1}}));
-assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
-
-assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.ReadConcernMajorityNotEnabled);
-
-st.stop();
-})();
diff --git a/jstests/sharding/set_default_rwc_before_stop_sharding_test.js b/jstests/sharding/set_default_rwc_before_stop_sharding_test.js
index 7c1bfebd806..33a4947fbc5 100644
--- a/jstests/sharding/set_default_rwc_before_stop_sharding_test.js
+++ b/jstests/sharding/set_default_rwc_before_stop_sharding_test.js
@@ -5,14 +5,16 @@
(function() {
"use strict";
-const st = new ShardingTest({shards: 1, rs: {nodes: 1, enableMajorityReadConcern: "false"}});
+const st = new ShardingTest({shards: 1, rs: {nodes: 1}});
// Create a sharded collection so the index and uuid hooks have something to check.
assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
assert.commandWorked(st.s.adminCommand({shardCollection: "test.foo", key: {_id: 1}}));
-// Deliberately set an unsatisfiable default read and write concern so any operations run in the
-// shutdown hooks will fail if they inherit either.
+// Deliberately set a write concern and read concern that are different from the default w:1 and
+// local values.
+// The write concern is unsatisfiable, so any operations run in the shutdown hooks will fail if
+// they inherit it.
assert.commandWorked(st.s.adminCommand({
setDefaultRWConcern: 1,
defaultWriteConcern: {w: 42},
diff --git a/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js b/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js
deleted file mode 100644
index a558b388573..00000000000
--- a/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Test that single-shard transactions succeeed against replica sets whose primary has
- * 'enableMajorityReadConcern':false and whose secondary is significantly lagged.
- *
- * With majority reads disabled, we are not guaranteed to be able to service reads at the majority
- * commit point. We can only provide reads within a window behind the primary's 'lastApplied'. The
- * size of that window is controlled by 'minSnapshotHistoryWindowInSeconds', which is 5
- * seconds by default. If the commit point lag is greater than that amount, reading at that time
- * fails with a SnapshotTooOld error. Therefore, in order for the transaction to succeed, mongos
- * needs to pick a read timestamp that is not derived from the commit point, but rather from the
- * 'lastApplied' optime on the primary.
- *
- * Requires fcv_47 because parameter minSnapshotHistoryWindowInSeconds has been introduced in 4.7
- *
- * @tags: [
- * uses_transactions,
- * requires_find_command,
- * requires_fcv_47
- * ]
- */
-
-(function() {
-"use strict";
-
-load("jstests/libs/write_concern_util.js"); // for 'stopServerReplication' and
- // 'restartServerReplication'.
-
-const name = "single_shard_transaction_without_majority_reads_lagged";
-const dbName = "test";
-const collName = name;
-
-const shardingTest = new ShardingTest({
- shards: 1,
- rs: {
- nodes: [
- {/* primary */ enableMajorityReadConcern: 'false'},
- {/* secondary */ rsConfig: {priority: 0}}
- ]
- }
-});
-
-const rst = shardingTest.rs0;
-const mongos = shardingTest.s;
-const mongosDB = mongos.getDB(dbName);
-const mongosColl = mongosDB[collName];
-
-// Create and shard collection beforehand.
-assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
-assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
-// This is the last write the secondary will have before the start of the transaction.
-assert.commandWorked(mongosColl.insert({_id: 1, x: 10}, {writeConcern: {w: "majority"}}));
-
-// We want the secondary to lag for an amount generously greater than the history window.
-const secondary = rst.getSecondary();
-const maxWindowResult = assert.commandWorked(secondary.getDB("admin").runCommand(
- {"getParameter": 1, "minSnapshotHistoryWindowInSeconds": 1}));
-stopServerReplication(secondary);
-
-const maxWindowInMS = maxWindowResult.minSnapshotHistoryWindowInSeconds * 1000;
-const lagTimeMS = maxWindowInMS * 2;
-const startTime = Date.now();
-let nextId = 1000;
-
-// Insert a stream of writes to the primary with _ids all numbers greater or equal than
-// 1000 (this is done to easily distinguish them from the write above done with _id: 1).
-// The secondary cannot replicate them, so this has the effect of making that node lagged.
-// It would also update mongos' notion of the latest clusterTime in the system.
-while (Date.now() - startTime < maxWindowInMS) {
- assert.commandWorked(mongosColl.insert({id: nextId}));
- nextId++;
- sleep(50);
-}
-
-// This is an update only the primary has. The test will explicitly check for it in a few lines.
-assert.commandWorked(mongosColl.update({_id: 1, x: 10}, {_id: 1, x: 20}));
-
-const session = mongos.startSession();
-const sessionDB = session.getDatabase(dbName);
-const sessionColl = sessionDB.getCollection(collName);
-
-// Begin a transaction and make sure its associated read succeeds. To make this test stricter,
-// have the transaction manipulate data that differs between the primary and secondary.
-session.startTransaction();
-assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
-
-assert.commandWorked(session.commitTransaction_forTesting());
-
-// Confirm that the results of the transaction are based on what the primary's data was when we
-// started the transaction.
-assert.eq(21, sessionColl.findOne({_id: 1}).x);
-
-restartServerReplication(secondary);
-shardingTest.stop();
-})();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 409bfd98395..2410e113f8e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -5630,42 +5630,6 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorage) {
ASSERT_EQUALS(Timestamp(2, 2), stableTimestamp);
}
-TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorageDisableMajorityReadConcern) {
-
- /**
- * Test that 'setMyLastAppliedOpTime' sets the stable timestamp to the last applied when
- * enableMajorityReadConcern=false, even if the last committed optime is unset.
- */
-
- const auto originalEnableMajorityReadConcern = serverGlobalParams.enableMajorityReadConcern;
- serverGlobalParams.enableMajorityReadConcern = false;
- ON_BLOCK_EXIT(
- [&] { serverGlobalParams.enableMajorityReadConcern = originalEnableMajorityReadConcern; });
-
- init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234")
- << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
-
- ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- // Initially the stable timestamp is unset.
- ASSERT_EQUALS(Timestamp::min(), getStorageInterface()->getStableTimestamp());
-
- // Check that the stable timestamp is updated for the storage engine when we set the applied
- // optime, even though the last committed optime is unset.
- getStorageInterface()->allDurableTimestamp = Timestamp(1, 1);
- replCoordSetMyLastAppliedOpTime(OpTime({1, 1}, 1), Date_t() + Seconds(100));
- ASSERT_EQUALS(Timestamp(1, 1), getStorageInterface()->getStableTimestamp());
-}
-
TEST_F(StableOpTimeTest, AdvanceCommitPointSetsStableOpTimeForStorage) {
/**
@@ -5722,63 +5686,6 @@ TEST_F(StableOpTimeTest, AdvanceCommitPointSetsStableOpTimeForStorage) {
ASSERT_EQUALS(Timestamp(3, 2), stableTimestamp);
}
-TEST_F(StableOpTimeTest,
- AdvanceCommitPointDoesNotSetStableOpTimeForStorageInRollbackMajorityReadConcernOff) {
-
- const auto originalEnableMajorityReadConcern = serverGlobalParams.enableMajorityReadConcern;
- serverGlobalParams.enableMajorityReadConcern = false;
- ON_BLOCK_EXIT(
- [&] { serverGlobalParams.enableMajorityReadConcern = originalEnableMajorityReadConcern; });
-
- init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234")
- << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
-
- ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- // Initially the stable timestamp and commit point are unset.
- ASSERT_EQUALS(Timestamp::min(), getStorageInterface()->getStableTimestamp());
- ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
-
- // Advance the stable timestamp a bit. In this test we simulate a case where timestamp (1,3) is
- // getting rolled back and timestamp (1,2) is the rollback common point. Note that when
- // EMRC=false, the stable timestamp is always advanced to the newest all durable timestamp i.e.
- // it is not required to be behind the majority commit point.
- getStorageInterface()->allDurableTimestamp = Timestamp(1, 3);
- replCoordSetMyLastAppliedOpTime(OpTime({1, 1}, 1), Date_t() + Seconds(1));
- replCoordSetMyLastAppliedOpTime(OpTime({1, 2}, 1), Date_t() + Seconds(2));
- replCoordSetMyLastAppliedOpTime(OpTime({1, 3}, 1), Date_t() + Seconds(3));
- ASSERT_EQUALS(Timestamp(1, 3), getStorageInterface()->getStableTimestamp());
-
- // We must take the RSTL in mode X before transitioning to RS_ROLLBACK.
- const auto opCtx = makeOperationContext();
- ReplicationStateTransitionLockGuard transitionGuard(opCtx.get(), MODE_X);
- ASSERT_OK(getReplCoord()->setFollowerModeRollback(opCtx.get()));
-
- // It is possible that rollback-via-refetch forces the stable timestamp backwards to the common
- // point at the end of rollback.
- getStorageInterface()->setStableTimestamp(getServiceContext(), Timestamp(1, 2));
-
- // Normally, when not in ROLLBACK state, we will update the stable timestamp whenever we hear
- // about a new commit point. We want to verify that in ROLLBACK state, however, the stable
- // timestamp is not altered when learning of a new commit point. The particular value of the
- // commit point isn't important, since it doesn't affect the calculation of the stable timestamp
- // when EMRC=false. We just want it to be newer than our currently known commit point.
- OpTimeAndWallTime commitPoint = makeOpTimeAndWallTime(OpTime({1, 1}, 1), Date_t() + Seconds(1));
- replCoordAdvanceCommitPoint(commitPoint, false);
-
- // Make sure the stable timestamp did not move.
- ASSERT_EQUALS(Timestamp(1, 2), getStorageInterface()->getStableTimestamp());
-}
-
TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDuringShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
@@ -6036,7 +5943,7 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
}
-TEST_F(ReplCoordTest, WaitUntilOpTimeforReadRejectsUnsupportedMajorityReadConcern) {
+TEST_F(ReplCoordTest, WaitUntilOpTimeforReadReturnsImmediatelyForMajorityReadConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
<< "version" << 2 << "members"
@@ -6050,18 +5957,6 @@ TEST_F(ReplCoordTest, WaitUntilOpTimeforReadRejectsUnsupportedMajorityReadConcer
auto rcArgs = ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern);
auto status = getReplCoord()->waitUntilOpTimeForRead(opCtx.get(), rcArgs);
ASSERT_OK(status);
-
- // Simulate disabling storage support for majority reads.
- disableReadConcernMajoritySupport();
- rcArgs = ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern);
- status = getReplCoord()->waitUntilOpTimeForRead(opCtx.get(), rcArgs);
- ASSERT_EQ(status, ErrorCodes::ReadConcernMajorityNotEnabled);
-
- // Even without storage engine support, speculative majority reads should be allowed.
- rcArgs = ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern);
- rcArgs.setMajorityReadMechanism(ReadConcernArgs::MajorityReadMechanism::kSpeculative);
- status = getReplCoord()->waitUntilOpTimeForRead(opCtx.get(), rcArgs);
- ASSERT_OK(status);
}
TEST_F(ReplCoordTest, DoNotIgnoreTheContentsOfMetadataWhenItsConfigVersionDoesNotMatchOurs) {
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index c8fdbfb4154..3b13fae1c6d 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -1078,41 +1078,6 @@ TEST_F(ReplicationRecoveryTest, AbortTransactionOplogEntryCorrectlyUpdatesConfig
ASSERT_EQ(getConsistencyMarkers()->getAppliedThrough(opCtx), OpTime(Timestamp(3, 0), 1));
}
-DEATH_TEST_REGEX_F(ReplicationRecoveryTest,
- RecoveryFailsWithPrepareAndEnableReadConcernMajorityFalse,
- "Fatal assertion.*51146") {
- ReplicationRecoveryImpl recovery(getStorageInterface(), getConsistencyMarkers());
- auto opCtx = getOperationContext();
-
- const auto appliedThrough = OpTime(Timestamp(1, 1), 1);
- getStorageInterfaceRecovery()->setSupportsRecoverToStableTimestamp(true);
- getStorageInterfaceRecovery()->setRecoveryTimestamp(appliedThrough.getTimestamp());
- getConsistencyMarkers()->setAppliedThrough(opCtx, appliedThrough);
- _setUpOplog(opCtx, getStorageInterface(), {1});
-
- const auto sessionId = makeLogicalSessionIdForTest();
- OperationSessionInfo sessionInfo;
- sessionInfo.setSessionId(sessionId);
- sessionInfo.setTxnNumber(3);
-
- const auto lastDate = Date_t::now();
- const auto prepareOp =
- _makeTransactionOplogEntry({Timestamp(2, 0), 1},
- repl::OpTypeEnum::kCommand,
- BSON("applyOps" << BSONArray() << "prepare" << true),
- OpTime(Timestamp(0, 0), -1),
- 0,
- sessionInfo,
- lastDate);
-
- ASSERT_OK(getStorageInterface()->insertDocument(
- opCtx, oplogNs, {prepareOp.getEntry().toBSON(), Timestamp(2, 0)}, 1));
-
- serverGlobalParams.enableMajorityReadConcern = false;
-
- recovery.recoverFromOplog(opCtx, boost::none);
-}
-
TEST_F(ReplicationRecoveryTest, CommitTransactionOplogEntryCorrectlyUpdatesConfigTransactions) {
ReplicationRecoveryImpl recovery(getStorageInterface(), getConsistencyMarkers());
auto opCtx = getOperationContext();