summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJudah Schvimer <judah@mongodb.com>2020-06-08 23:20:37 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-07-09 22:39:43 +0000
commit64fcdabe8c7cc38188f31fd606379f935c94555a (patch)
tree8dbc2fc938b52f9c7f410abff410d30beaeaeceb
parent915402884c52da861b1660cd6a7172c552ce1806 (diff)
downloadmongo-64fcdabe8c7cc38188f31fd606379f935c94555a.tar.gz
SERVER-46541 enable automatic reconfigs for initial sync semantics by default
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_auth.yml2
-rw-r--r--buildscripts/resmokelib/testing/fixtures/replicaset.py92
-rw-r--r--jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js12
-rw-r--r--jstests/replsets/atlas_initialsync_workflows.js3
-rw-r--r--jstests/replsets/auth1.js4
-rw-r--r--jstests/replsets/auto_reconfig_remove_newly_added_and_stepup.js14
-rw-r--r--jstests/replsets/catchup.js2
-rw-r--r--jstests/replsets/catchup_takeover_one_high_priority.js4
-rw-r--r--jstests/replsets/currentOp_during_automatic_reconfig.js14
-rw-r--r--jstests/replsets/disallow_adding_initialized_node1.js77
-rw-r--r--jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js4
-rw-r--r--jstests/replsets/dont_read_oplog_hole_on_step_up.js10
-rw-r--r--jstests/replsets/election_candidate_and_participant_metrics.js2
-rw-r--r--jstests/replsets/force_reconfig_sets_newly_added_field_correctly.js10
-rw-r--r--jstests/replsets/initial_sync1.js28
-rw-r--r--jstests/replsets/initial_sync_fcv_downgrade.js8
-rw-r--r--jstests/replsets/initial_sync_fcv_upgrade.js7
-rw-r--r--jstests/replsets/initial_sync_nodes_contribute_to_liveness_majorities.js9
-rw-r--r--jstests/replsets/libs/rename_across_dbs.js1
-rw-r--r--jstests/replsets/newly_added_member_id_vs_index.js7
-rw-r--r--jstests/replsets/newly_added_two_nodes_simultaneous.js15
-rw-r--r--jstests/replsets/newly_added_user_reconfig_while_exiting_initial_sync.js6
-rw-r--r--jstests/replsets/newly_added_with_user_reconfig.js16
-rw-r--r--jstests/replsets/no_progress_updates_during_initial_sync.js9
-rw-r--r--jstests/replsets/reconfig_avoids_diverging_configs.js4
-rw-r--r--jstests/replsets/reconfig_waits_for_oplog_commitment_condition.js12
-rw-r--r--jstests/replsets/reconfig_waits_for_oplog_commitment_condition_when_leaving_force.js2
-rw-r--r--jstests/replsets/remove1.js12
-rw-r--r--jstests/replsets/remove_newly_added_field_after_finishing_initial_sync.js25
-rw-r--r--jstests/replsets/remove_newly_added_field_votes_zero.js9
-rw-r--r--jstests/replsets/remove_newly_added_member_index_swap_concurrent.js15
-rw-r--r--jstests/replsets/rslib.js134
-rw-r--r--jstests/replsets/test_only_repl_commands.js42
-rw-r--r--jstests/sharding/refine_collection_shard_key_abort_on_stepup.js2
-rw-r--r--jstests/ssl/repl_ssl_split_horizon.js5
-rw-r--r--src/mongo/db/repl/member_config_test.cpp40
-rw-r--r--src/mongo/db/repl/repl_server_parameters.idl2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp18
-rw-r--r--src/mongo/db/repl/repl_set_config_checks_test.cpp54
-rw-r--r--src/mongo/db/repl/repl_set_config_test.cpp35
-rw-r--r--src/mongo/db/repl/replication_coordinator.h6
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp9
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h3
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp113
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.h4
-rw-r--r--src/mongo/db/repl/replication_coordinator_noop.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_noop.h4
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.h4
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.cpp3
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.h4
-rw-r--r--src/mongo/shell/assert.js5
-rw-r--r--src/mongo/shell/replsettest.js197
53 files changed, 603 insertions, 522 deletions
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_auth.yml b/buildscripts/resmokeconfig/suites/replica_sets_auth.yml
index 904256d3302..c1b2375e461 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_auth.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_auth.yml
@@ -15,6 +15,8 @@ selector:
- jstests/replsets/interrupted_batch_insert.js
- jstests/replsets/transactions_reaped_with_tickets_exhausted.js
- jstests/replsets/transactions_committed_with_tickets_exhausted.js
+ # This test disables test commands which is incompatible with this suite.
+ - jstests/replsets/test_only_repl_commands.js
executor:
config:
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py
index e5bd42521ee..7f9cb751e00 100644
--- a/buildscripts/resmokelib/testing/fixtures/replicaset.py
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -20,8 +20,12 @@ from buildscripts.resmokelib.testing.fixtures import standalone
class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-instance-attributes
"""Fixture which provides JSTests with a replica set to run against."""
- # Error response codes copied from mongo/base/error_codes.err.
+ # Error response codes copied from mongo/base/error_codes.yml.
_NODE_NOT_FOUND = 74
+ _NEW_REPLICA_SET_CONFIGURATION_INCOMPATIBLE = 103
+ _CONFIGURATION_IN_PROGRESS = 109
+ _CURRENT_CONFIG_NOT_COMMITTED_YET = 308
+ _INTERRUPTED_DUE_TO_REPL_STATE_CHANGE = 11602
def __init__( # pylint: disable=too-many-arguments, too-many-locals
self, logger, job_num, mongod_options=None, dbpath_prefix=None, preserve_dbpath=False,
@@ -108,7 +112,7 @@ class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-inst
self.initial_sync_node = None
self.initial_sync_node_idx = -1
- def setup(self): # pylint: disable=too-many-branches,too-many-statements
+ def setup(self): # pylint: disable=too-many-branches,too-many-statements,too-many-locals
"""Set up the replica set."""
self.replset_name = self.mongod_options.get("replSet", "rs")
if not self.nodes:
@@ -232,18 +236,44 @@ class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-inst
node.await_ready()
# Add in the members one at a time, since non force reconfigs can only add/remove a
# single voting member at a time.
- repl_config["version"] = client.admin.command(
- {"replSetGetConfig": 1})['config']['version']
for ind in range(2, len(members) + 1):
- repl_config["version"] = repl_config["version"] + 1
- repl_config["members"] = members[:ind]
- self.logger.info("Issuing replSetReconfig command: %s", repl_config)
- self._configure_repl_set(
- client, {
- "replSetReconfig": repl_config,
- "maxTimeMS": self.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000
- })
+ self.logger.info("Adding in node %d: %s", ind, members[ind - 1])
+ while True:
+ try:
+ # 'newlyAdded' removal reconfigs could bump the version.
+ # Get the current version to be safe.
+ curr_version = client.admin.command(
+ {"replSetGetConfig": 1})['config']['version']
+ repl_config["version"] = curr_version + 1
+ repl_config["members"] = members[:ind]
+ self.logger.info("Issuing replSetReconfig command: %s", repl_config)
+ self._configure_repl_set(
+ client, {
+ "replSetReconfig": repl_config,
+ "maxTimeMS": self.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000
+ })
+ break
+ except pymongo.errors.OperationFailure as err:
+ # These error codes may be transient, and so we retry the reconfig with a
+ # (potentially) higher config version. We should not receive these codes
+ # indefinitely.
+ if (err.code !=
+ ReplicaSetFixture._NEW_REPLICA_SET_CONFIGURATION_INCOMPATIBLE
+ and err.code != ReplicaSetFixture._CURRENT_CONFIG_NOT_COMMITTED_YET
+ and err.code != ReplicaSetFixture._CONFIGURATION_IN_PROGRESS
+ and err.code != ReplicaSetFixture._NODE_NOT_FOUND and err.code !=
+ ReplicaSetFixture._INTERRUPTED_DUE_TO_REPL_STATE_CHANGE):
+ msg = ("Operation failure while setting up the "
+ "replica set fixture: {}").format(err)
+ self.logger.error(msg)
+ raise errors.ServerFailure(msg)
+
+ msg = ("Retrying failed attempt to add new node to fixture: {}").format(err)
+ self.logger.error(msg)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
self._await_secondaries()
+ self._await_newly_added_removals()
def pids(self):
""":return: all pids owned by this fixture if any."""
@@ -407,6 +437,44 @@ class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-inst
break
time.sleep(0.1) # Wait a little bit before trying again.
+ def _should_await_newly_added_removals_longer(self, client):
+ """
+ Return whether the current replica set config has any 'newlyAdded' fields.
+
+ Return true if the current config is not committed.
+ """
+
+ get_config_res = client.admin.command(
+ {"replSetGetConfig": 1, "commitmentStatus": True, "$_internalIncludeNewlyAdded": True})
+ for member in get_config_res["config"]["members"]:
+ if "newlyAdded" in member:
+ self.logger.info(
+ "Waiting longer for 'newlyAdded' removals, " +
+ "member %d is still 'newlyAdded'", member["_id"])
+ return True
+ if not get_config_res["commitmentStatus"]:
+ self.logger.info("Waiting longer for 'newlyAdded' removals, " +
+ "config is not yet committed")
+ return True
+
+ return False
+
+ def _await_newly_added_removals(self):
+ """
+ Wait for all 'newlyAdded' fields to be removed from the replica set config.
+
+ Additionally, wait for that config to be committed, and for the in-memory
+ and on-disk configs to match.
+ """
+
+ self.logger.info("Waiting to remove all 'newlyAdded' fields")
+ primary = self.get_primary()
+ client = primary.mongo_client()
+ self.auth(client, self.auth_options)
+ while self._should_await_newly_added_removals_longer(client):
+ time.sleep(0.1) # Wait a little bit before trying again.
+ self.logger.info("All 'newlyAdded' fields removed")
+
def _setup_cwrwc_defaults(self):
"""Set up the cluster-wide read/write concern defaults."""
if self.default_read_concern is None and self.default_write_concern is None:
diff --git a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
index 98b9641ed55..a861aa00aff 100644
--- a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
+++ b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
@@ -45,11 +45,13 @@ if (storageEngine !== "wiredTiger") {
conf.members[1].slaveDelay = 24 * 60 * 60;
rst.startSet();
- // We cannot wait for a stable recovery timestamp or replication due to the slaveDelay.
- rst.initiateWithAnyNodeAsPrimary(
- conf,
- "replSetInitiate",
- {doNotWaitForStableRecoveryTimestamp: true, doNotWaitForReplication: true});
+ // We cannot wait for a stable recovery timestamp, oplog replication, or config replication due
+ // to the slaveDelay.
+ rst.initiateWithAnyNodeAsPrimary(conf, "replSetInitiate", {
+ doNotWaitForStableRecoveryTimestamp: true,
+ doNotWaitForReplication: true,
+ doNotWaitForNewlyAddedRemovals: true
+ });
var master = rst.getPrimary(); // Waits for PRIMARY state.
// Reconfigure primary with a small cache size so less data needs to be
diff --git a/jstests/replsets/atlas_initialsync_workflows.js b/jstests/replsets/atlas_initialsync_workflows.js
index afecb7ee47e..1d960b2ccef 100644
--- a/jstests/replsets/atlas_initialsync_workflows.js
+++ b/jstests/replsets/atlas_initialsync_workflows.js
@@ -96,6 +96,9 @@ function testAddWithInitialSync(secondariesDown) {
}
function testReplaceWithInitialSync(secondariesDown) {
+ // Wait for existing reconfigs to complete.
+ rst.waitForAllNewlyAddedRemovals();
+
const nodeToBeReplaced = rst.getSecondaries()[2];
secondariesDown = secondariesDown || 0;
const useForce = secondariesDown > 1;
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index 4d17af8aeac..b931524352e 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -203,7 +203,9 @@ assert.soon(function() {
rs.nodes[i].setSlaveOk();
rs.nodes[i].getDB("admin").auth("foo", "bar");
config = rs.nodes[i].getDB("local").system.replset.findOne();
- if (config.version != 2) {
+ // We expect the config version to be 3 due to the initial config and then the
+ // 'newlyAdded' removal reconfig.
+ if (config.version !== 3) {
return false;
}
}
diff --git a/jstests/replsets/auto_reconfig_remove_newly_added_and_stepup.js b/jstests/replsets/auto_reconfig_remove_newly_added_and_stepup.js
index e5c6a90c966..6760153ae7c 100644
--- a/jstests/replsets/auto_reconfig_remove_newly_added_and_stepup.js
+++ b/jstests/replsets/auto_reconfig_remove_newly_added_and_stepup.js
@@ -18,13 +18,8 @@ const testName = jsTestName();
const dbName = "testdb";
const collName = "testcoll";
-const rst = new ReplSetTest({
- name: testName,
- nodes: 1,
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
- settings: {chainingAllowed: false},
- useBridge: true
-});
+const rst = new ReplSetTest(
+ {name: testName, nodes: 1, settings: {chainingAllowed: false}, useBridge: true});
rst.startSet();
rst.initiateWithHighElectionTimeout();
@@ -32,10 +27,6 @@ const primary = rst.getPrimary();
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO (SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-waitForConfigReplication(primary, rst.nodes);
-
assert.commandWorked(primaryColl.insert({"starting": "doc"}));
let hangBeforeNewlyAddedRemovalFP = configureFailPoint(primaryDb, "hangDuringAutomaticReconfig");
@@ -44,7 +35,6 @@ const secondary = rst.add({
rsConfig: {priority: 0},
setParameter: {
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
rst.reInitiate();
diff --git a/jstests/replsets/catchup.js b/jstests/replsets/catchup.js
index 7425fa0b52e..5bcf7edd220 100644
--- a/jstests/replsets/catchup.js
+++ b/jstests/replsets/catchup.js
@@ -47,7 +47,7 @@ function reconfigElectionAndCatchUpTimeout(electionTimeout, catchupTimeout) {
// Reconnect all nodes to make sure reconfig succeeds.
rst.nodes.forEach(reconnect);
// Wait for the config with the new term to propagate.
- waitForConfigReplication(rst.getPrimary());
+ rst.waitForConfigReplication(rst.getPrimary());
// Reconfigure replica set to decrease catchup timeout.
var newConfig = rst.getReplSetConfigFromNode();
newConfig.version++;
diff --git a/jstests/replsets/catchup_takeover_one_high_priority.js b/jstests/replsets/catchup_takeover_one_high_priority.js
index 738312d1f58..2a1b0d249e2 100644
--- a/jstests/replsets/catchup_takeover_one_high_priority.js
+++ b/jstests/replsets/catchup_takeover_one_high_priority.js
@@ -37,7 +37,7 @@ replSet.waitForState(2, ReplSetTest.State.PRIMARY, replSet.kDefaultTimeoutMS);
jsTestLog('node 2 is now primary');
replSet.awaitReplication();
-waitForConfigReplication(nodes[2]);
+replSet.waitForConfigReplication(nodes[2]);
// Stop replication and disconnect node 2 so that it cannot do a priority takeover.
stopServerReplication(nodes[2]);
@@ -71,7 +71,7 @@ nodes[2].reconnect(nodes[0]);
nodes[2].reconnect(nodes[1]);
// Wait until nodes have learned the latest config.
-waitForConfigReplication(nodes[1], [nodes[1], nodes[2]]);
+replSet.waitForConfigReplication(nodes[1], [nodes[1], nodes[2]]);
// Step up a lagged node.
assert.commandWorked(nodes[1].adminCommand({replSetStepUp: 1}));
diff --git a/jstests/replsets/currentOp_during_automatic_reconfig.js b/jstests/replsets/currentOp_during_automatic_reconfig.js
index 846ab3d0f50..df107eca587 100644
--- a/jstests/replsets/currentOp_during_automatic_reconfig.js
+++ b/jstests/replsets/currentOp_during_automatic_reconfig.js
@@ -16,13 +16,8 @@ const testName = jsTestName();
const dbName = "testdb";
const collName = "testcoll";
-const rst = new ReplSetTest({
- name: testName,
- nodes: [{}],
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
- settings: {chainingAllowed: false},
- useBridge: true
-});
+const rst = new ReplSetTest(
+ {name: testName, nodes: [{}], settings: {chainingAllowed: false}, useBridge: true});
rst.startSet();
rst.initiateWithHighElectionTimeout();
@@ -30,10 +25,6 @@ const primary = rst.getPrimary();
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO (SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-waitForConfigReplication(primary, rst.nodes);
-
assert.commandWorked(primaryColl.insert({"starting": "doc"}));
jsTestLog("Adding a new node to the replica set");
@@ -42,7 +33,6 @@ const secondary = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
rst.reInitiate();
diff --git a/jstests/replsets/disallow_adding_initialized_node1.js b/jstests/replsets/disallow_adding_initialized_node1.js
index 7034805b480..bc6d3ed764a 100644
--- a/jstests/replsets/disallow_adding_initialized_node1.js
+++ b/jstests/replsets/disallow_adding_initialized_node1.js
@@ -28,6 +28,8 @@ replSetB.initiate();
var primaryA = replSetA.getPrimary();
var primaryB = replSetB.getPrimary();
+assert.commandWorked(primaryA.getDB('foo').bar.insert({a: 1}));
+assert.commandWorked(primaryB.getDB('foo').bar.insert({b: 1}));
jsTestLog('Before merging: primary A = ' + primaryA.host + '; primary B = ' + primaryB.host);
var configA = assert.commandWorked(primaryA.adminCommand({replSetGetConfig: 1})).config;
@@ -43,9 +45,57 @@ assert.neq(configA.settings.replicaSetId, configB.settings.replicaSetId);
configA.version++;
assert.commandWorked(primaryA.adminCommand({replSetReconfig: configA}));
-jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
+// We add B's primary with 0 votes so no 'newlyAdded' field is added, so a user initiated reconfig
+// to give it 1 vote will fail, which is what we'd like to test. Since B's primary has 0 votes,
+// it is not considered part of the reconfig quorum and does not block the reconfig from succeeding.
+jsTestLog("Adding replica set B's primary " + primaryB.host +
+ " to replica set A's config with 0 votes");
configA.version++;
-configA.members.push({_id: 11, host: primaryB.host});
+configA.members.push({_id: 11, host: primaryB.host, votes: 0, priority: 0});
+assert.commandWorked(primaryA.adminCommand({replSetReconfig: configA}));
+
+// Wait for primary A to report primary B down. B should reject all heartbeats from A due to a
+// replset name mismatch, leading A to consider it down.
+assert.soon(function() {
+ const statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
+ if (statusA.members.length !== 2) {
+ return false;
+ }
+ return statusA.members[1].state === ReplSetTest.State.DOWN;
+});
+
+// Confirm that each set still has the correct primary.
+let newPrimaryA = replSetA.getPrimary();
+let newPrimaryB = replSetB.getPrimary();
+jsTestLog('After merging with 0 votes: primary A = ' + newPrimaryA.host +
+ '; primary B = ' + newPrimaryB.host);
+assert.eq(primaryA, newPrimaryA);
+assert.eq(primaryB, newPrimaryB);
+
+// Replica set A's config should include primary B and consider it DOWN.
+let statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
+jsTestLog('After merging with 0 votes: replica set status A = ' + tojson(statusA));
+assert.eq(2, statusA.members.length);
+assert.eq(10, statusA.members[0]._id);
+assert.eq(primaryA.host, statusA.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
+assert.eq(11, statusA.members[1]._id);
+assert.eq(primaryB.host, statusA.members[1].name);
+assert.eq(ReplSetTest.State.DOWN, statusA.members[1].state);
+
+// Replica set B's config should remain unchanged.
+let statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
+jsTestLog('After merging with 0 votes: replica set status B = ' + tojson(statusB));
+assert.eq(1, statusB.members.length);
+assert.eq(20, statusB.members[0]._id);
+assert.eq(primaryB.host, statusB.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
+
+// This reconfig should fail since B's primary is now part of the reconfig quorum and should reject
+// it.
+jsTestLog("Giving replica set B's primary " + primaryB.host + " 1 vote in replica set A's config");
+configA.version++;
+configA.members[1].votes = 1;
var reconfigResult =
assert.commandFailedWithCode(primaryA.adminCommand({replSetReconfig: configA}),
ErrorCodes.NewReplicaSetConfigurationIncompatible);
@@ -54,8 +104,8 @@ var msgA = 'Our replica set ID did not match that of our request target, replSet
', requestTargetReplSetId: ' + configB.settings.replicaSetId;
assert.neq(-1, reconfigResult.errmsg.indexOf(msgA));
-var newPrimaryA = replSetA.getPrimary();
-var newPrimaryB = replSetB.getPrimary();
+newPrimaryA = replSetA.getPrimary();
+newPrimaryB = replSetB.getPrimary();
jsTestLog('After merging: primary A = ' + newPrimaryA.host + '; primary B = ' + newPrimaryB.host);
assert.eq(primaryA, newPrimaryA);
assert.eq(primaryB, newPrimaryB);
@@ -65,23 +115,30 @@ var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetI
"; remote node's: " + configA.settings.replicaSetId;
checkLog.contains(primaryB, msgB);
-var statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
-var statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
+// Confirm primary B is still DOWN.
+statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
jsTestLog('After merging: replica set status A = ' + tojson(statusA));
-jsTestLog('After merging: replica set status B = ' + tojson(statusB));
-
-// Replica set A's config should remain unchanged due to failed replSetReconfig command.
-assert.eq(1, statusA.members.length);
+assert.eq(2, statusA.members.length);
assert.eq(10, statusA.members[0]._id);
assert.eq(primaryA.host, statusA.members[0].name);
assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
+assert.eq(11, statusA.members[1]._id);
+assert.eq(primaryB.host, statusA.members[1].name);
+assert.eq(ReplSetTest.State.DOWN, statusA.members[1].state);
// Replica set B's config should remain unchanged.
+statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
+jsTestLog('After merging: replica set status B = ' + tojson(statusB));
assert.eq(1, statusB.members.length);
assert.eq(20, statusB.members[0]._id);
assert.eq(primaryB.host, statusB.members[0].name);
assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
+assert.eq(1, primaryA.getDB('foo').bar.find({a: 1}).itcount());
+assert.eq(0, primaryA.getDB('foo').bar.find({b: 1}).itcount());
+assert.eq(0, primaryB.getDB('foo').bar.find({a: 1}).itcount());
+assert.eq(1, primaryB.getDB('foo').bar.find({b: 1}).itcount());
+
replSetB.stopSet();
replSetA.stopSet();
})();
diff --git a/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js b/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
index 94bbfb9a356..4b296ea4db7 100644
--- a/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
+++ b/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
@@ -38,7 +38,7 @@ jsTest.log("Node A is primary in term 1. Node E is delayed.");
// D: [1]
// E:
assert.eq(nodeA, rst.getPrimary());
-waitForConfigReplication(nodeA);
+rst.waitForConfigReplication(nodeA);
nodeE.disconnect([nodeA, nodeB, nodeC, nodeD]);
assert.commandWorked(nodeA.getDB(dbName)[collName].insert({term: 1}));
rst.awaitReplication(undefined, undefined, [nodeB, nodeC, nodeD]);
@@ -54,7 +54,7 @@ assert.commandWorked(nodeB.adminCommand({replSetStepUp: 1}));
rst.waitForState(nodeA, ReplSetTest.State.SECONDARY);
assert.eq(nodeB, rst.getPrimary());
assert.commandWorked(nodeB.getDB(dbName)[collName].insert({term: 2}));
-waitForConfigReplication(nodeB, [nodeA, nodeB, nodeC, nodeD]);
+rst.waitForConfigReplication(nodeB, [nodeA, nodeB, nodeC, nodeD]);
jsTest.log("Node A steps up again in term 3 with votes from A, C, and D and commits a write.");
// A: [1] [3]
diff --git a/jstests/replsets/dont_read_oplog_hole_on_step_up.js b/jstests/replsets/dont_read_oplog_hole_on_step_up.js
index 4d42eb3c299..84562213bbe 100644
--- a/jstests/replsets/dont_read_oplog_hole_on_step_up.js
+++ b/jstests/replsets/dont_read_oplog_hole_on_step_up.js
@@ -21,13 +21,17 @@ var rst = new ReplSetTest({
nodes: [
{},
{},
- {rsConfig: {priority: 0}},
],
});
const nodes = rst.startSet();
+// Initiate in two steps so that the first two nodes finish initial sync before the third begins
+// its initial sync. This prevents the long getMore timeout from causing the first initial sync to
+// take so much time that the second cannot succeed.
+rst.initiate();
+
const oldPrimary = nodes[0];
const newPrimary = nodes[1];
-const secondary = nodes[2];
+const secondary = rst.add({rsConfig: {priority: 0}});
// Make sure this secondary syncs only from the node bound to be the new primary.
assert.commandWorked(secondary.adminCommand({
@@ -35,7 +39,7 @@ assert.commandWorked(secondary.adminCommand({
mode: "alwaysOn",
data: {hostAndPort: newPrimary.host}
}));
-rst.initiate();
+rst.reInitiate();
// Make sure when the original primary syncs, it's only from the secondary; this avoids spurious log
// messages.
diff --git a/jstests/replsets/election_candidate_and_participant_metrics.js b/jstests/replsets/election_candidate_and_participant_metrics.js
index cdd3499b8dd..20c75437c61 100644
--- a/jstests/replsets/election_candidate_and_participant_metrics.js
+++ b/jstests/replsets/election_candidate_and_participant_metrics.js
@@ -217,7 +217,7 @@ assert.commandWorked(originalPrimary.adminCommand(
// The new primary might still be processing the reconfig via heartbeat from the original primary's
// reconfig on step up. Wait for config replication first so it doesn't interfere with the step up
// on the new primary below.
-waitForConfigReplication(originalPrimary);
+rst.waitForConfigReplication(originalPrimary);
// Attempt to step up the new primary a second time. Due to the failpoint, the current primary
// should vote no, and as a result the election should fail.
diff --git a/jstests/replsets/force_reconfig_sets_newly_added_field_correctly.js b/jstests/replsets/force_reconfig_sets_newly_added_field_correctly.js
index 6f30c1c1277..3fbe912712b 100644
--- a/jstests/replsets/force_reconfig_sets_newly_added_field_correctly.js
+++ b/jstests/replsets/force_reconfig_sets_newly_added_field_correctly.js
@@ -9,8 +9,7 @@
load("jstests/replsets/rslib.js");
load('jstests/libs/fail_point_util.js');
-const rst = new ReplSetTest(
- {name: jsTestName(), nodes: 1, nodeOptions: {setParameter: {enableAutomaticReconfig: true}}});
+const rst = new ReplSetTest({name: jsTestName(), nodes: 1});
rst.startSet();
rst.initiateWithHighElectionTimeout();
@@ -21,10 +20,6 @@ const collName = "testcoll";
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO (SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-assert.eq(false, isMemberNewlyAdded(primary, 0));
-
assert.commandWorked(primaryColl.insert({"starting": "doc"}));
assertVoteCount(primary, {
@@ -46,7 +41,6 @@ const addNode = (id, {newlyAdded, force, shouldSucceed, failureCode} = {}) => {
tojson({mode: 'alwaysOn', data: {"hostAndPort": primary.host}}),
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true
}
});
@@ -76,7 +70,7 @@ const addNode = (id, {newlyAdded, force, shouldSucceed, failureCode} = {}) => {
jsTestLog("Running reconfig with valid config " + tojsononeline(config));
assert(!failureCode);
assert.commandWorked(primary.adminCommand({replSetReconfig: config, force: force}));
- waitForConfigReplication(primary, rst.nodes);
+ rst.waitForConfigReplication(primary, rst.nodes);
assert.commandWorked(newNode.adminCommand({
waitForFailPoint: "initialSyncHangBeforeFinish",
diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js
index 0a536b4d601..c01e6ff51d7 100644
--- a/jstests/replsets/initial_sync1.js
+++ b/jstests/replsets/initial_sync1.js
@@ -11,6 +11,11 @@
* 9. Bring #2 back up
* 10. Insert some stuff
* 11. Everyone happy eventually
+ *
+ * This test assumes a 'newlyAdded' removal.
+ * @tags: [
+ * requires_fcv_46,
+ * ]
*/
load("jstests/replsets/rslib.js");
@@ -66,16 +71,19 @@ try {
}
reconnect(slave1);
reconnect(slave2);
-
-wait(function() {
- var config2 = local_s1.system.replset.findOne();
- var config3 = local_s2.system.replset.findOne();
-
- printjson(config2);
- printjson(config3);
-
- return config2.version == config.version && (config3 && config3.version == config.version);
-});
+replTest.waitForAllNewlyAddedRemovals();
+
+print("Config 1: " + tojsononeline(config));
+var config2 = local_s1.system.replset.findOne();
+print("Config 2: " + tojsononeline(config2));
+assert(config2);
+// Add one to config.version to account for the 'newlyAdded' removal.
+assert.eq(config2.version, (config.version + 1));
+
+var config3 = local_s2.system.replset.findOne();
+print("Config 3: " + tojsononeline(config3));
+assert(config3);
+assert.eq(config3.version, (config.version + 1));
replTest.waitForState(slave2, [ReplSetTest.State.SECONDARY, ReplSetTest.State.RECOVERING]);
diff --git a/jstests/replsets/initial_sync_fcv_downgrade.js b/jstests/replsets/initial_sync_fcv_downgrade.js
index c16b1a35046..4d53969765c 100644
--- a/jstests/replsets/initial_sync_fcv_downgrade.js
+++ b/jstests/replsets/initial_sync_fcv_downgrade.js
@@ -18,11 +18,7 @@ load("jstests/libs/parallel_shell_helpers.js"); // for funWithArgs()
// Start a single node replica set.
// Disable Chaining so that initial sync nodes always sync from primary.
-const rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
- settings: {chainingAllowed: false}
-});
+const rst = new ReplSetTest({nodes: 1, settings: {chainingAllowed: false}});
rst.startSet();
rst.initiateWithHighElectionTimeout();
@@ -60,8 +56,6 @@ function checkFCV({version, targetVersion}) {
}
function addNewVotingNode({parallel: parallel = false, startupParams: startupParams = {}}) {
- startupParams['enableAutomaticReconfig'] = true;
-
const conn = rst.add({rsConfig: {priority: 0, votes: 1}, setParameter: startupParams});
jsTestLog("Adding a new voting node {" + conn.host + "} to the replica set");
diff --git a/jstests/replsets/initial_sync_fcv_upgrade.js b/jstests/replsets/initial_sync_fcv_upgrade.js
index c251055da86..c8178d8610f 100644
--- a/jstests/replsets/initial_sync_fcv_upgrade.js
+++ b/jstests/replsets/initial_sync_fcv_upgrade.js
@@ -12,11 +12,7 @@ load('jstests/replsets/rslib.js');
// Start a single node replica set.
// Disable Chaining so that initial sync nodes always sync from primary.
-const rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
- settings: {chainingAllowed: false}
-});
+const rst = new ReplSetTest({nodes: 1, settings: {chainingAllowed: false}});
rst.startSet();
rst.initiateWithHighElectionTimeout();
@@ -34,7 +30,6 @@ assert.commandWorked(primary.adminCommand({clearLog: "global"}));
let startupParams = {};
startupParams['failpoint.initialSyncHangAfterDataCloning'] = tojson({mode: 'alwaysOn'});
-startupParams['enableAutomaticReconfig'] = true;
jsTestLog("Upgrade FCV to " + latestFCV);
assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
diff --git a/jstests/replsets/initial_sync_nodes_contribute_to_liveness_majorities.js b/jstests/replsets/initial_sync_nodes_contribute_to_liveness_majorities.js
index a5787e445ac..821012ad21f 100644
--- a/jstests/replsets/initial_sync_nodes_contribute_to_liveness_majorities.js
+++ b/jstests/replsets/initial_sync_nodes_contribute_to_liveness_majorities.js
@@ -28,11 +28,18 @@ const primary = rst.getPrimary();
const secondary = rst.getSecondaries()[0];
const initialSyncSecondary = rst.add({
- rsConfig: {priority: 0},
+ rsConfig: {priority: 0, votes: 0},
setParameter: {'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'})},
});
rst.reInitiate();
+
+// Add the new node with votes:0 and then give it votes:1 to avoid 'newlyAdded' and mimic a resync,
+// where a node is in initial sync with 1 vote.
+let nextConfig = rst.getReplSetConfigFromNode(0);
+nextConfig.members[2].votes = 1;
+reconfig(rst, nextConfig, false /* force */, true /* wait */);
+
assert.commandWorked(initialSyncSecondary.adminCommand({
waitForFailPoint: "initialSyncHangBeforeFinish",
timesEntered: 1,
diff --git a/jstests/replsets/libs/rename_across_dbs.js b/jstests/replsets/libs/rename_across_dbs.js
index d32d6a11627..a602b51326f 100644
--- a/jstests/replsets/libs/rename_across_dbs.js
+++ b/jstests/replsets/libs/rename_across_dbs.js
@@ -58,6 +58,7 @@ var RenameAcrossDatabasesTest = function(options) {
for (let i = 1; i < nodes.length; ++i) {
replTest.add(nodes[i]);
}
+ replTest.waitForAllNewlyAddedRemovals();
const conns = replTest.nodes;
const hosts = replTest.nodeList();
diff --git a/jstests/replsets/newly_added_member_id_vs_index.js b/jstests/replsets/newly_added_member_id_vs_index.js
index 1e8e644307e..1464da17c78 100644
--- a/jstests/replsets/newly_added_member_id_vs_index.js
+++ b/jstests/replsets/newly_added_member_id_vs_index.js
@@ -20,7 +20,6 @@ const collName = "testcoll";
const rst = new ReplSetTest({
name: testName,
nodes: 1,
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
settings: {chainingAllowed: false},
});
rst.startSet();
@@ -30,10 +29,6 @@ const primary = rst.getPrimary();
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO (SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-waitForConfigReplication(primary, rst.nodes);
-
assert.commandWorked(primaryColl.insert({"starting": "doc"}));
jsTestName("Adding two members with alternating memberIds and memberIndex fields");
@@ -44,7 +39,6 @@ const newNodeOne = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
@@ -54,7 +48,6 @@ const newNodeTwo = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
diff --git a/jstests/replsets/newly_added_two_nodes_simultaneous.js b/jstests/replsets/newly_added_two_nodes_simultaneous.js
index 09205b9c27f..8352d99ac99 100644
--- a/jstests/replsets/newly_added_two_nodes_simultaneous.js
+++ b/jstests/replsets/newly_added_two_nodes_simultaneous.js
@@ -16,13 +16,8 @@ const testName = jsTestName();
const dbName = "testdb";
const collName = "testcoll";
-const rst = new ReplSetTest({
- name: testName,
- nodes: 1,
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
- settings: {chainingAllowed: false},
- useBridge: true
-});
+const rst = new ReplSetTest(
+ {name: testName, nodes: 1, settings: {chainingAllowed: false}, useBridge: true});
rst.startSet();
rst.initiateWithHighElectionTimeout();
@@ -30,10 +25,6 @@ const primary = rst.getPrimary();
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO (SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-waitForConfigReplication(primary, rst.nodes);
-
assert.commandWorked(primaryColl.insert({"starting": "doc"}));
const newNodeOne = rst.add({
@@ -41,7 +32,6 @@ const newNodeOne = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
@@ -50,7 +40,6 @@ const newNodeTwo = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
diff --git a/jstests/replsets/newly_added_user_reconfig_while_exiting_initial_sync.js b/jstests/replsets/newly_added_user_reconfig_while_exiting_initial_sync.js
index 104e25c85cf..d2b2567ed7b 100644
--- a/jstests/replsets/newly_added_user_reconfig_while_exiting_initial_sync.js
+++ b/jstests/replsets/newly_added_user_reconfig_while_exiting_initial_sync.js
@@ -20,7 +20,6 @@ const collName = "testcoll";
const rst = new ReplSetTest({
name: testName,
nodes: 1,
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
settings: {chainingAllowed: false},
});
rst.startSet();
@@ -30,10 +29,6 @@ const primary = rst.getPrimary();
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO (SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-waitForConfigReplication(primary, rst.nodes);
-
assert.commandWorked(primaryColl.insert({"starting": "doc"}));
jsTestLog("Adding a new node to the replica set");
@@ -42,7 +37,6 @@ const secondary = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
rst.reInitiate();
diff --git a/jstests/replsets/newly_added_with_user_reconfig.js b/jstests/replsets/newly_added_with_user_reconfig.js
index d2d092110d4..b5530812a4d 100644
--- a/jstests/replsets/newly_added_with_user_reconfig.js
+++ b/jstests/replsets/newly_added_with_user_reconfig.js
@@ -16,13 +16,8 @@ const testName = jsTestName();
const dbName = "testdb";
const collName = "testcoll";
-const rst = new ReplSetTest({
- name: testName,
- nodes: 1,
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
- settings: {chainingAllowed: false},
- useBridge: true
-});
+const rst = new ReplSetTest(
+ {name: testName, nodes: 1, settings: {chainingAllowed: false}, useBridge: true});
rst.startSet();
rst.initiateWithHighElectionTimeout();
@@ -30,10 +25,6 @@ const primary = rst.getPrimary();
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO (SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-waitForConfigReplication(primary, rst.nodes);
-
assert.commandWorked(primaryColl.insert({"starting": "doc"}));
jsTestLog("Adding a new node to the replica set");
@@ -42,7 +33,6 @@ const secondary = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
rst.reInitiate();
@@ -143,11 +133,11 @@ assertVoteCount(primary, {
// Now try removing the member we added above.
jsTestLog("[4] Member removal, after initial sync");
+rst.remove(2);
config = rst.getReplSetConfigFromNode();
const twoNodeConfig = Object.assign({}, config);
twoNodeConfig.members = twoNodeConfig.members.slice(0, 2); // Remove the last node.
reconfig(rst, twoNodeConfig);
-rst.remove(2);
// Check 'newlyAdded' and vote counts.
assert(isMemberNewlyAdded(primary, 1));
diff --git a/jstests/replsets/no_progress_updates_during_initial_sync.js b/jstests/replsets/no_progress_updates_during_initial_sync.js
index 5622f6524cd..4f2a13113d8 100644
--- a/jstests/replsets/no_progress_updates_during_initial_sync.js
+++ b/jstests/replsets/no_progress_updates_during_initial_sync.js
@@ -11,6 +11,7 @@
load("jstests/libs/write_concern_util.js");
load("jstests/libs/fail_point_util.js");
+load('jstests/replsets/rslib.js');
const testName = jsTestName();
const rst = new ReplSetTest({name: testName, nodes: [{}, {rsConfig: {priority: 0}}]});
@@ -24,7 +25,7 @@ assert.commandWorked(primaryDb.test.insert({"starting": "doc"}, {writeConcern: {
jsTestLog("Adding a new node to the replica set");
const secondary = rst.add({
- rsConfig: {priority: 0},
+ rsConfig: {priority: 0, votes: 0},
setParameter: {
'failpoint.forceSyncSourceCandidate':
tojson({mode: 'alwaysOn', data: {"hostAndPort": primary.host}}),
@@ -37,6 +38,12 @@ const secondary = rst.add({
rst.reInitiate();
rst.waitForState(secondary, ReplSetTest.State.STARTUP_2);
+// Add the new node with votes:0 and then give it votes:1 to avoid 'newlyAdded' and mimic a resync,
+// where a node is in initial sync with 1 vote.
+let nextConfig = rst.getReplSetConfigFromNode(0);
+nextConfig.members[2].votes = 1;
+reconfig(rst, nextConfig, false /* force */, true /* wait */);
+
// Shut down the steady-state secondary so that it cannot participate in the majority.
rst.stop(1);
diff --git a/jstests/replsets/reconfig_avoids_diverging_configs.js b/jstests/replsets/reconfig_avoids_diverging_configs.js
index 0b0522d0be4..c4cb9aa31a7 100644
--- a/jstests/replsets/reconfig_avoids_diverging_configs.js
+++ b/jstests/replsets/reconfig_avoids_diverging_configs.js
@@ -41,7 +41,7 @@ C1.members = C1.members.slice(0, 3); // Remove the last node.
// Increase the C1 version by a high number to ensure the following config
// C2 will win the propagation by having a higher term.
C1.version = C1.version + 1000;
-waitForConfigReplication(node0);
+rst.waitForConfigReplication(node0);
rst.awaitReplication();
jsTestLog("Disconnecting the primary from other nodes");
@@ -78,7 +78,7 @@ node0.reconnect([node1, node2, node3]);
// step down from being primary. The reconfig command issued to this node, C1, will fail.
rst.waitForState(node0, ReplSetTest.State.SECONDARY);
rst.awaitNodesAgreeOnPrimary(rst.kDefaultTimeoutMS, [node0, node1, node3]);
-waitForConfigReplication(node1);
+rst.waitForConfigReplication(node1);
assert.eq(C2, rst.getReplSetConfigFromNode());
// The new config is now {node0, node1, node2, node3}.
diff --git a/jstests/replsets/reconfig_waits_for_oplog_commitment_condition.js b/jstests/replsets/reconfig_waits_for_oplog_commitment_condition.js
index 3079b0d1c37..a28ed9c1501 100644
--- a/jstests/replsets/reconfig_waits_for_oplog_commitment_condition.js
+++ b/jstests/replsets/reconfig_waits_for_oplog_commitment_condition.js
@@ -57,7 +57,7 @@ C2.version = C1.version + 1;
// {n0, n1}
let C3 = Object.assign({}, origConfig);
-C3.version = C2.version + 1;
+C3.version = C2.version + 2; // Leave one for the 'newlyAdded' automatic reconfig
jsTestLog("Do a write on primary and commit it in the current config.");
assert.commandWorked(coll.insert({x: 1}, {writeConcern: {w: "majority"}}));
@@ -66,11 +66,17 @@ jsTestLog("Reconfig to add the secondary back in.");
// We expect this to succeed but the last committed op from C1 cannot become
// committed in C2, so the new config is not committed.
assert.commandWorked(primary.adminCommand({replSetReconfig: C2}));
+
+jsTestLog("Waiting for member 1 to no longer be 'newlyAdded'");
+assert.soonNoExcept(function() {
+ return !isMemberNewlyAdded(primary, 1, false /* force */);
+}, () => tojson(primary.getDB("local").system.replset.findOne()));
+
assert.eq(isConfigCommitted(primary), false);
// Wait until the config has propagated to the secondary and the primary has learned of it, so that
// the config replication check is satisfied.
-waitForConfigReplication(primary);
+rst.waitForConfigReplication(primary);
// Reconfig should time out since we have not committed the last committed op from C1 in C2.
assert.commandFailedWithCode(primary.adminCommand({replSetReconfig: C3, maxTimeMS: 1000}),
@@ -120,7 +126,7 @@ assert.eq(primary, rst.getPrimary());
assert.eq(isConfigCommitted(primary), false);
// Wait for the config with the new term to propagate.
-waitForConfigReplication(primary);
+rst.waitForConfigReplication(primary);
// Even though the current config has been replicated to all nodes, reconfig should still fail since
// the primary has not yet committed an op in its term.
diff --git a/jstests/replsets/reconfig_waits_for_oplog_commitment_condition_when_leaving_force.js b/jstests/replsets/reconfig_waits_for_oplog_commitment_condition_when_leaving_force.js
index a986180852f..1880c4cdef6 100644
--- a/jstests/replsets/reconfig_waits_for_oplog_commitment_condition_when_leaving_force.js
+++ b/jstests/replsets/reconfig_waits_for_oplog_commitment_condition_when_leaving_force.js
@@ -51,7 +51,7 @@ assert.commandWorked(primary.adminCommand({replSetReconfig: twoNodeConfig, force
// Wait until the config has propagated to the secondary and the primary has learned of it, so that
// the config replication check is satisfied.
-waitForConfigReplication(primary);
+rst.waitForConfigReplication(primary);
// Reconfig should succeed even if we have not committed the last committed op in the current
// config because the current config is from a force reconfig.
diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js
index 42f61d1665c..352cb874663 100644
--- a/jstests/replsets/remove1.js
+++ b/jstests/replsets/remove1.js
@@ -6,6 +6,11 @@
* Bring secondary back up
* Add it back as secondary
* Make sure both nodes are either primary or secondary
+ *
+ * This test assumes 'newlyAdded' fields are enabled, so blacklist from multiversion tests in 4.6.
+ * @tags: [
+ * requires_fcv_46,
+ * ]
*/
load("jstests/replsets/rslib.js");
@@ -25,7 +30,7 @@ master.getDB("foo").bar.baz.insert({x: 1});
replTest.awaitReplication();
print("Remove secondary");
-var config = replTest.getReplSetConfig();
+var config = replTest.getReplSetConfigFromNode(0);
for (var i = 0; i < config.members.length; i++) {
if (config.members[i].host == secondary.host) {
config.members.splice(i, 1);
@@ -81,6 +86,11 @@ assert.soon(function() {
}
});
master = replTest.getPrimary();
+
+// Wait and account for 'newlyAdded' automatic reconfig.
+nextVersion++;
+replTest.waitForAllNewlyAddedRemovals();
+
secondary = replTest.getSecondary();
printjson(master.getDB("admin").runCommand({replSetGetStatus: 1}));
var newConfig = master.getDB("local").system.replset.findOne();
diff --git a/jstests/replsets/remove_newly_added_field_after_finishing_initial_sync.js b/jstests/replsets/remove_newly_added_field_after_finishing_initial_sync.js
index bc9a4f048e0..a8198a1bd84 100644
--- a/jstests/replsets/remove_newly_added_field_after_finishing_initial_sync.js
+++ b/jstests/replsets/remove_newly_added_field_after_finishing_initial_sync.js
@@ -22,7 +22,6 @@ const collName = "testcoll";
const rst = new ReplSetTest({
name: testName,
nodes: [{}, {}, {rsConfig: {priority: 0}}],
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
settings: {chainingAllowed: false},
useBridge: true
});
@@ -33,12 +32,6 @@ const primary = rst.getPrimary();
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO (SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 1);
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 2);
-waitForConfigReplication(primary, rst.nodes);
-
// We did two automatic reconfigs to remove 'newlyAdded' fields (for members 1 and 2).
const replMetricsAtStart = primaryDb.serverStatus().metrics.repl;
assert(replMetricsAtStart.hasOwnProperty("reconfig"));
@@ -55,7 +48,6 @@ const secondary = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
rst.reInitiate();
@@ -73,6 +65,11 @@ let getConfigRes = assert.commandWorked(primary.adminCommand({replSetGetConfig:
let newNodeRes = getConfigRes.members[3];
assert.eq(false, newNodeRes.hasOwnProperty("newlyAdded"), getConfigRes);
+jsTestLog("Making sure the 'newlyAdded' field is visible in replSetGetConfig with test param");
+getConfigRes = getConfigWithNewlyAdded(primary).config;
+newNodeRes = getConfigRes.members[3];
+assert.eq(true, newNodeRes.hasOwnProperty("newlyAdded"), getConfigRes);
+
jsTestLog("Checking behavior with 'newlyAdded' field set, during initial sync");
assertVoteCount(primary, {
votingMembersCount: 3,
@@ -97,13 +94,13 @@ assert.commandWorked(primaryColl.insert({a: 3}, {writeConcern: {w: "majority"}})
// Only two nodes are needed for an election (0 and 1).
assert.commandWorked(rst.nodes[1].adminCommand({replSetStepUp: 1}));
assert.eq(rst.getPrimary(), rst.nodes[1]);
-waitForConfigReplication(rst.nodes[1], [rst.nodes[0], rst.nodes[1], rst.nodes[3]]);
+rst.waitForConfigReplication(rst.nodes[1], [rst.nodes[0], rst.nodes[1], rst.nodes[3]]);
// Reset node 0 to be primary.
rst.awaitReplication(null, null, [rst.nodes[0], rst.nodes[1]]);
assert.commandWorked(rst.nodes[0].adminCommand({replSetStepUp: 1}));
assert.eq(rst.getPrimary(), rst.nodes[0]);
-waitForConfigReplication(rst.nodes[0], [rst.nodes[0], rst.nodes[1], rst.nodes[3]]);
+rst.waitForConfigReplication(rst.nodes[0], [rst.nodes[0], rst.nodes[1], rst.nodes[3]]);
// Initial syncing nodes do not acknowledge replication.
rst.nodes[1].disconnect(rst.nodes);
@@ -153,13 +150,13 @@ assert.commandWorked(primaryColl.insert({a: 6}, {writeConcern: {w: "majority"}})
// Only two nodes are needed for an election (0 and 1).
assert.commandWorked(rst.nodes[1].adminCommand({replSetStepUp: 1}));
assert.eq(rst.getPrimary(), rst.nodes[1]);
-waitForConfigReplication(rst.nodes[1], [rst.nodes[0], rst.nodes[1]]);
+rst.waitForConfigReplication(rst.nodes[1], [rst.nodes[0], rst.nodes[1]]);
// Reset node 0 to be primary.
rst.awaitReplication(null, null, [rst.nodes[0], rst.nodes[1]]);
assert.commandWorked(rst.nodes[0].adminCommand({replSetStepUp: 1}));
assert.eq(rst.getPrimary(), rst.nodes[0]);
-waitForConfigReplication(rst.nodes[0], [rst.nodes[0], rst.nodes[1]]);
+rst.waitForConfigReplication(rst.nodes[0], [rst.nodes[0], rst.nodes[1]]);
// 'newlyAdded' nodes cannot be one of the two nodes to satisfy w:majority.
rst.nodes[3].reconnect(rst.nodes);
@@ -217,13 +214,13 @@ assert.commandWorked(primaryColl.insert({a: 8}, {writeConcern: {w: "majority"}})
// Only three nodes are needed for an election (0, 1, and 3).
assert.commandWorked(rst.nodes[1].adminCommand({replSetStepUp: 1}));
assert.eq(rst.getPrimary(), rst.nodes[1]);
-waitForConfigReplication(rst.nodes[1], [rst.nodes[0], rst.nodes[1], rst.nodes[3]]);
+rst.waitForConfigReplication(rst.nodes[1], [rst.nodes[0], rst.nodes[1], rst.nodes[3]]);
// Reset node 0 to be primary.
rst.awaitReplication(null, null, [rst.nodes[0], rst.nodes[1]]);
assert.commandWorked(rst.nodes[0].adminCommand({replSetStepUp: 1}));
assert.eq(rst.getPrimary(), rst.nodes[0]);
-waitForConfigReplication(rst.nodes[0], [rst.nodes[0], rst.nodes[1], rst.nodes[3]]);
+rst.waitForConfigReplication(rst.nodes[0], [rst.nodes[0], rst.nodes[1], rst.nodes[3]]);
// 3 nodes are needed for a w:majority write.
rst.nodes[3].disconnect(rst.nodes);
diff --git a/jstests/replsets/remove_newly_added_field_votes_zero.js b/jstests/replsets/remove_newly_added_field_votes_zero.js
index cc443439580..5423462afc5 100644
--- a/jstests/replsets/remove_newly_added_field_votes_zero.js
+++ b/jstests/replsets/remove_newly_added_field_votes_zero.js
@@ -17,8 +17,7 @@ const testName = jsTestName();
const dbName = "testdb";
const collName = "testcoll";
-const rst = new ReplSetTest(
- {name: testName, nodes: 1, nodeOptions: {setParameter: {enableAutomaticReconfig: true}}});
+const rst = new ReplSetTest({name: testName, nodes: 1});
rst.startSet();
rst.initiateWithHighElectionTimeout();
@@ -26,10 +25,6 @@ const primary = rst.getPrimary();
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO(SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-waitForConfigReplication(primary, rst.nodes);
-
assert.commandWorked(primaryColl.insert({a: 1}));
jsTestLog("Adding a new non-voting node to the replica set");
@@ -38,7 +33,6 @@ const secondary0 = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
rst.reInitiate();
@@ -85,7 +79,6 @@ const secondary1 = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
rst.reInitiate();
diff --git a/jstests/replsets/remove_newly_added_member_index_swap_concurrent.js b/jstests/replsets/remove_newly_added_member_index_swap_concurrent.js
index cdedbc8e659..3e9884386ac 100644
--- a/jstests/replsets/remove_newly_added_member_index_swap_concurrent.js
+++ b/jstests/replsets/remove_newly_added_member_index_swap_concurrent.js
@@ -18,12 +18,7 @@ const testName = jsTestName();
const dbName = "testdb";
const collName = "testcoll";
-const rst = new ReplSetTest({
- name: testName,
- nodes: 1,
- nodeOptions: {setParameter: {enableAutomaticReconfig: true}},
- settings: {chainingAllowed: false}
-});
+const rst = new ReplSetTest({name: testName, nodes: 1, settings: {chainingAllowed: false}});
rst.startSet();
rst.initiateWithHighElectionTimeout();
@@ -31,10 +26,6 @@ const primary = rst.getPrimary();
const primaryDb = primary.getDB(dbName);
const primaryColl = primaryDb.getCollection(collName);
-// TODO (SERVER-46808): Move this into ReplSetTest.initiate
-waitForNewlyAddedRemovalForNodeToBeCommitted(primary, 0);
-waitForConfigReplication(primary, rst.nodes);
-
assert.commandWorked(primaryColl.insert({"starting": "doc"}));
const newNodeOne = rst.add({
@@ -42,7 +33,6 @@ const newNodeOne = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
@@ -51,7 +41,6 @@ const newNodeTwo = rst.add({
setParameter: {
'failpoint.initialSyncHangBeforeFinish': tojson({mode: 'alwaysOn'}),
'numInitialSyncAttempts': 1,
- 'enableAutomaticReconfig': true,
}
});
@@ -101,7 +90,7 @@ config.members[1] = config.members[2];
config.members[2] = tempMemberOne;
config.version++;
assert.commandWorked(primary.adminCommand({replSetReconfig: config}));
-waitForConfigReplication(primary);
+rst.waitForConfigReplication(primary);
jsTestLog("Making sure the config now has the ids and indexes flipped");
configOnDisk = primary.getDB("local").system.replset.findOne();
diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js
index 6a6bae32f3c..8f4f200f877 100644
--- a/jstests/replsets/rslib.js
+++ b/jstests/replsets/rslib.js
@@ -18,8 +18,8 @@ var stopReplicationAndEnforceNewPrimaryToCatchUp;
var setFailPoint;
var clearFailPoint;
var isConfigCommitted;
-var waitForConfigReplication;
var assertSameConfigContent;
+var getConfigWithNewlyAdded;
var isMemberNewlyAdded;
var replConfigHasNewlyAddedMembers;
var waitForNewlyAddedRemovalForNodeToBeCommitted;
@@ -199,25 +199,59 @@ waitForAllMembers = function(master, timeout) {
};
/**
- * Run a 'replSetReconfig' command with one retry.
+ * Run a 'replSetReconfig' command with one retry on NodeNotFound and multiple retries on
+ * ConfigurationInProgress, CurrentConfigNotCommittedYet, and
+ * NewReplicaSetConfigurationIncompatible.
*/
function reconfigWithRetry(primary, config, force) {
- var admin = primary.getDB("admin");
+ const admin = primary.getDB("admin");
force = force || false;
- var reconfigCommand = {
+ let reconfigCommand = {
replSetReconfig: config,
force: force,
maxTimeMS: ReplSetTest.kDefaultTimeoutMS
};
- var res = admin.runCommand(reconfigCommand);
- // Retry reconfig if quorum check failed because not enough voting nodes responded.
- if (!res.ok && res.code === ErrorCodes.NodeNotFound) {
- print("Replset reconfig failed because quorum check failed. Retry reconfig once. " +
- "Error: " + tojson(res));
- res = admin.runCommand(reconfigCommand);
- }
- assert.commandWorked(res);
+ assert.soon(function() {
+ const newVersion =
+ assert.commandWorked(admin.runCommand({replSetGetConfig: 1})).config.version + 1;
+ reconfigCommand.replSetReconfig.version = newVersion;
+ const res = admin.runCommand(reconfigCommand);
+
+ // Retry reconfig if quorum check failed because not enough voting nodes responded. One
+ // reason for this is if the connections used for heartbeats get closed on the destination
+ // node.
+ if (!res.ok && res.code === ErrorCodes.NodeNotFound) {
+ print("Replset reconfig failed because quorum check failed. Retry reconfig once. " +
+ "Error: " + tojson(res));
+ res = admin.runCommand(reconfigCommand);
+ }
+
+ // Always retry on these errors, even if we already retried on NodeNotFound.
+ if (!res.ok &&
+ (res.code === ErrorCodes.ConfigurationInProgress ||
+ res.code === ErrorCodes.CurrentConfigNotCommittedYet)) {
+ print("Replset reconfig failed since another configuration is in progress. Retry.");
+ // Retry.
+ return false;
+ }
+
+ // Always retry on NewReplicaSetConfigurationIncompatible, if the current config version is
+ // higher than the requested one.
+ if (!res.ok && res.code === ErrorCodes.NewReplicaSetConfigurationIncompatible) {
+ const curVersion =
+ assert.commandWorked(admin.runCommand({replSetGetConfig: 1})).config.version;
+ if (curVersion >= newVersion) {
+ print("Replset reconfig failed since the config version was too low. Retry. " +
+ "Error: " + tojson(res));
+ // Retry.
+ return false;
+ }
+ }
+
+ assert.commandWorked(res);
+ return true;
+ });
}
/**
@@ -681,27 +715,6 @@ isConfigCommitted = function(node) {
};
/**
- * Wait until the config on the primary becomes committed.
- */
-waitForConfigReplication = function(primary, nodes) {
- const nodeHosts = nodes == null ? "all nodes" : tojson(nodes.map((n) => n.host));
- jsTestLog("Waiting for the config on " + primary.host + " to replicate to " + nodeHosts);
- assert.soon(function() {
- const res = primary.adminCommand({replSetGetStatus: 1});
- const primaryMember = res.members.find((m) => m.self);
- function hasSameConfig(member) {
- return member.configVersion === primaryMember.configVersion &&
- member.configTerm === primaryMember.configTerm;
- }
- let members = res.members;
- if (nodes != null) {
- members = res.members.filter((m) => nodes.some((node) => m.name === node.host));
- }
- return members.every((m) => hasSameConfig(m));
- });
-};
-
-/**
* Asserts that replica set config A is the same as replica set config B ignoring the 'version' and
* 'term' field.
*/
@@ -722,56 +735,37 @@ assertSameConfigContent = function(configA, configB) {
};
/**
+ * Returns the result of 'replSetGetConfig' with the test-parameter specified that indicates to
+ * include 'newlyAdded' fields.
+ */
+getConfigWithNewlyAdded = function(node) {
+ return assert.commandWorked(
+ node.adminCommand({replSetGetConfig: 1, $_internalIncludeNewlyAdded: true}));
+};
+
+/**
* @param memberIndex is optional. If not provided, then it will return true even if
* a single member in the replSet config has "newlyAdded" field.
*/
-isMemberNewlyAdded = function(node, memberIndex, force = false) {
- // The in-memory config will not include the 'newlyAdded' field, so we must consult the on-disk
- // version. However, the in-memory config is updated after the config is persisted to disk, so
- // we must confirm that the in-memory config agrees with the on-disk config, before returning
- // true or false.
- const configInMemory = assert.commandWorked(node.adminCommand({replSetGetConfig: 1})).config;
-
- const versionSetInMemory = configInMemory.hasOwnProperty("version");
- const termSetInMemory = configInMemory.hasOwnProperty("term");
-
- // Since the term is not set in a force reconfig, we skip the check for the term if
- // 'force=true'.
- if (!versionSetInMemory || (!termSetInMemory && !force)) {
- throw new Error("isMemberNewlyAdded: in-memory config has no version or term: " +
- tojsononeline(configInMemory));
- }
-
- const configOnDisk = node.getDB("local").system.replset.findOne();
- const termSetOnDisk = configOnDisk.hasOwnProperty("term");
-
- const isVersionSetCorrectly = (configOnDisk.version === configInMemory.version);
- const isTermSetCorrectly =
- ((!termSetInMemory && !termSetOnDisk) || (configOnDisk.term === configInMemory.term));
-
- if (!isVersionSetCorrectly || !isTermSetCorrectly) {
- throw new error(
- "isMemberNewlyAdded: in-memory config version/term does not match on-disk config." +
- " in-memory: " + tojsononeline(configInMemory) +
- ", on-disk: " + tojsononeline(configOnDisk));
- }
+isMemberNewlyAdded = function(node, memberIndex) {
+ const config = getConfigWithNewlyAdded(node).config;
const allMembers = (memberIndex === undefined);
- assert(allMembers || (memberIndex >= 0 && memberIndex < configOnDisk.members.length),
- "memberIndex should be between 0 and " + (configOnDisk.members.length - 1) +
+ assert(allMembers || (memberIndex >= 0 && memberIndex < config.members.length),
+ "memberIndex should be between 0 and " + (config.members.length - 1) +
", but memberIndex is " + memberIndex);
var hasNewlyAdded = (index) => {
- const memberConfigOnDisk = configOnDisk.members[index];
- if (memberConfigOnDisk.hasOwnProperty("newlyAdded")) {
- assert(memberConfigOnDisk["newlyAdded"] === true, () => tojson(configOnDisk));
+ const memberConfig = config.members[index];
+ if (memberConfig.hasOwnProperty("newlyAdded")) {
+ assert(memberConfig["newlyAdded"] === true, config);
return true;
}
return false;
};
if (allMembers) {
- for (let i = 0; i < configOnDisk.members.length; i++) {
+ for (let i = 0; i < config.members.length; i++) {
if (hasNewlyAdded(i))
return true;
}
@@ -790,7 +784,7 @@ waitForNewlyAddedRemovalForNodeToBeCommitted = function(node, memberIndex, force
jsTestLog("Waiting for member " + memberIndex + " to no longer be 'newlyAdded'");
assert.soonNoExcept(function() {
return !isMemberNewlyAdded(node, memberIndex, force) && isConfigCommitted(node);
- }, () => tojson(node.getDB("local").system.replset.findOne()));
+ }, getConfigWithNewlyAdded(node).config);
};
assertVoteCount = function(node, {
diff --git a/jstests/replsets/test_only_repl_commands.js b/jstests/replsets/test_only_repl_commands.js
new file mode 100644
index 00000000000..afabfa904b3
--- /dev/null
+++ b/jstests/replsets/test_only_repl_commands.js
@@ -0,0 +1,42 @@
+/**
+ * Tests that test-only replica-set only commands are truly test-only.
+ *
+ * @tags: [
+ * requires_fcv_46,
+ * ]
+ */
+
+(function() {
+"use strict";
+
+const cmdList = [
+ {'replSetGetConfig': 1, '$_internalIncludeNewlyAdded': true},
+ {'replSetGetConfig': 1, '$_internalIncludeNewlyAdded': false}
+];
+
+TestData.enableTestCommands = false;
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiateWithAnyNodeAsPrimary(null, "replSetInitiate", {doNotWaitForNewlyAddedRemovals: true});
+
+let primary = rst.getPrimary();
+for (let cmd of cmdList) {
+ assert.commandFailedWithCode(primary.adminCommand(cmd), ErrorCodes.InvalidOptions);
+}
+
+rst.awaitReplication();
+rst.stopSet();
+
+TestData.enableTestCommands = true;
+rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiateWithAnyNodeAsPrimary(null, "replSetInitiate", {doNotWaitForNewlyAddedRemovals: true});
+
+primary = rst.getPrimary();
+for (let cmd of cmdList) {
+ assert.commandWorked(primary.adminCommand(cmd));
+}
+
+rst.awaitReplication();
+rst.stopSet();
+})();
diff --git a/jstests/sharding/refine_collection_shard_key_abort_on_stepup.js b/jstests/sharding/refine_collection_shard_key_abort_on_stepup.js
index 4946ed83974..ab40f5c1522 100644
--- a/jstests/sharding/refine_collection_shard_key_abort_on_stepup.js
+++ b/jstests/sharding/refine_collection_shard_key_abort_on_stepup.js
@@ -38,7 +38,7 @@ cfg.settings.electionTimeoutMillis = csrs.kDefaultTimeoutMS;
cfg.settings.catchUpTimeoutMillis = 0;
cfg.settings.chainingAllowed = false;
reconfig(csrs, cfg, true);
-waitForConfigReplication(csrs.getPrimary());
+csrs.waitForConfigReplication(csrs.getPrimary());
csrs.awaitReplication();
const kDbName = jsTestName();
diff --git a/jstests/ssl/repl_ssl_split_horizon.js b/jstests/ssl/repl_ssl_split_horizon.js
index 198c4d4d6f9..bc6411510ec 100644
--- a/jstests/ssl/repl_ssl_split_horizon.js
+++ b/jstests/ssl/repl_ssl_split_horizon.js
@@ -165,8 +165,9 @@ assert.eq(checkExpectedHorizon(horizonMissingURL, 1, node1localHostname),
0,
"does not return localhost as expected");
-// Check so we can replSetReconfig to add another horizon
-config.version += 1;
+// Check so we can replSetReconfig to add another horizon.
+// Add 2 to the config version to account for the 'newlyAdded' removal reconfig.
+config.version += 2;
config.members[0].horizons.other_horizon_name = node0horizonMissingHostname;
config.members[1].horizons.other_horizon_name = node1horizonMissingHostname;
diff --git a/src/mongo/db/repl/member_config_test.cpp b/src/mongo/db/repl/member_config_test.cpp
index 8b52a238b16..00c9fb36765 100644
--- a/src/mongo/db/repl/member_config_test.cpp
+++ b/src/mongo/db/repl/member_config_test.cpp
@@ -235,11 +235,6 @@ void setNewlyAdded_ForTest(MemberConfig* mc, boost::optional<bool> newlyAdded) {
namespace {
TEST(MemberConfig, ParseAndSetNewlyAddedField) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetTagConfig tagConfig;
{
MemberConfig mc(BSON("_id" << 0 << "host"
@@ -261,11 +256,6 @@ TEST(MemberConfig, ParseAndSetNewlyAddedField) {
}
TEST(MemberConfig, NewlyAddedSetToFalseShouldThrow) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetTagConfig tagConfig;
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "h"
@@ -275,11 +265,6 @@ TEST(MemberConfig, NewlyAddedSetToFalseShouldThrow) {
}
TEST(MemberConfig, VotingNodeWithNewlyAddedFieldShouldStillHaveVoteAfterToBSON) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetTagConfig tagConfig;
// Create a member with 'newlyAdded: true'.
@@ -306,11 +291,6 @@ TEST(MemberConfig, VotingNodeWithNewlyAddedFieldShouldStillHaveVoteAfterToBSON)
}
TEST(MemberConfig, NonVotingNodesWithNewlyAddedFieldShouldStillHaveZeroVotesAfterToBSON) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetTagConfig tagConfig;
MemberConfig mc(BSON("_id" << 0 << "host"
@@ -374,11 +354,6 @@ TEST(MemberConfig, VotingNodesShouldStillHaveVoteAfterToBSON) {
}
TEST(MemberConfig, NodeWithNewlyAddedFieldShouldStillHavePriorityAfterToBSON) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetTagConfig tagConfig;
// Create a member with 'newlyAdded: true' and 'priority: 3'.
@@ -403,11 +378,6 @@ TEST(MemberConfig, NodeWithNewlyAddedFieldShouldStillHavePriorityAfterToBSON) {
}
TEST(MemberConfig, PriorityZeroNodeWithNewlyAddedFieldShouldStillHaveZeroPriorityAfterToBSON) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetTagConfig tagConfig;
// Create a member with 'newlyAdded: true' and 'priority: 0'.
@@ -484,11 +454,6 @@ TEST(MemberConfig, NodeShouldStillHavePriorityAfterToBSON) {
}
TEST(MemberConfig, CanOmitNewlyAddedFieldInBSONViaToBSONWithoutNewlyAdded) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetTagConfig tagConfig;
// Create a member with 'newlyAdded: true' and 'priority: 0'.
@@ -514,11 +479,6 @@ TEST(MemberConfig, CanOmitNewlyAddedFieldInBSONViaToBSONWithoutNewlyAdded) {
}
TEST(MemberConfig, ArbiterCannotHaveNewlyAddedFieldSet) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetTagConfig tagConfig;
// Verify that an exception is thrown when we try to create an arbiter with 'newlyAdded' field.
diff --git a/src/mongo/db/repl/repl_server_parameters.idl b/src/mongo/db/repl/repl_server_parameters.idl
index 8e345c8fc92..f1b801ee1f4 100644
--- a/src/mongo/db/repl/repl_server_parameters.idl
+++ b/src/mongo/db/repl/repl_server_parameters.idl
@@ -276,7 +276,7 @@ server_parameters:
test_only: true
cpp_vartype: bool
cpp_varname: enableAutomaticReconfig
- default: false
+ default: true
oplogApplicationEnforcesSteadyStateConstraints:
description: >-
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index de4411d1488..22a42f2da65 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -77,7 +77,7 @@ namespace repl {
using std::string;
using std::stringstream;
-static const std::string kReplSetReconfigNss = "local.replset.reconfig";
+constexpr StringData kInternalIncludeNewlyAddedFieldName = "$_internalIncludeNewlyAdded"_sd;
class ReplExecutorSSM : public ServerStatusMetric {
public:
@@ -206,7 +206,21 @@ public:
bool wantCommitmentStatus;
uassertStatusOK(bsonExtractBooleanFieldWithDefault(
cmdObj, "commitmentStatus", false, &wantCommitmentStatus));
- ReplicationCoordinator::get(opCtx)->processReplSetGetConfig(&result, wantCommitmentStatus);
+
+ if (cmdObj[kInternalIncludeNewlyAddedFieldName]) {
+ uassert(ErrorCodes::InvalidOptions,
+ "The '$_internalIncludeNewlyAdded' option is only supported when testing"
+ " commands are enabled",
+ getTestCommandsEnabled());
+ }
+
+ bool includeNewlyAdded;
+ uassertStatusOK(bsonExtractBooleanFieldWithDefault(
+ cmdObj, kInternalIncludeNewlyAddedFieldName, false, &includeNewlyAdded));
+
+ ReplicationCoordinator::get(opCtx)->processReplSetGetConfig(
+ &result, wantCommitmentStatus, includeNewlyAdded);
+
return true;
}
diff --git a/src/mongo/db/repl/repl_set_config_checks_test.cpp b/src/mongo/db/repl/repl_set_config_checks_test.cpp
index 6a98744db44..ae90eb20f83 100644
--- a/src/mongo/db/repl/repl_set_config_checks_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_checks_test.cpp
@@ -233,10 +233,6 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_ArbiterPriorityMustBeZeroOr
}
TEST_F(ServiceContextTest, ValidateConfigForInitiate_NewlyAddedFieldNotAllowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
ReplSetConfig firstNewlyAdded;
ReplSetConfig lastNewlyAdded;
OID newReplSetId = OID::gen();
@@ -1076,22 +1072,12 @@ TEST_F(ServiceContextTest, ValidateForReconfig_MultiNodeAdditionOfArbitersDisall
}
TEST_F(ServiceContextTest, ValidateForReconfig_SingleNodeAdditionOfNewlyAddedAllowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2);
BSONArray newMembers = BSON_ARRAY(m1 << m2 << m3_NewlyAdded); // add 1 'newlyAdded' node.
ASSERT_OK(validateMemberReconfig(oldMembers, newMembers, m1));
}
TEST_F(ServiceContextTest, ValidateForReconfig_MultiNodeAdditionOfNewlyAddedAllowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2);
BSONArray newMembers =
BSON_ARRAY(m1 << m2 << m3_NewlyAdded << m4_NewlyAdded); // add 2 'newlyAdded' nodes.
@@ -1099,22 +1085,12 @@ TEST_F(ServiceContextTest, ValidateForReconfig_MultiNodeAdditionOfNewlyAddedAllo
}
TEST_F(ServiceContextTest, ValidateForReconfig_MultiNodeRemovalOfNewlyAddedAllowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2_NewlyAdded << m3_NewlyAdded);
BSONArray newMembers = BSON_ARRAY(m1); // Remove 2 'newlyAdded' nodes.
ASSERT_OK(validateMemberReconfig(oldMembers, newMembers, m1));
}
TEST_F(ServiceContextTest, ValidateForReconfig_SimultaneousAddAndRemoveOfNewlyAddedAllowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2_NewlyAdded);
BSONArray newMembers =
BSON_ARRAY(m1 << m3_NewlyAdded); // Remove 'newlyAdded' 2, add 'newlyAdded' 3.
@@ -1122,22 +1098,12 @@ TEST_F(ServiceContextTest, ValidateForReconfig_SimultaneousAddAndRemoveOfNewlyAd
}
TEST_F(ServiceContextTest, ValidateForReconfig_SingleAutoReconfigAllowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2_NewlyAdded);
BSONArray newMembers = BSON_ARRAY(m1 << m2); // Remove 'newlyAdded' 2, add voting node 2.
ASSERT_OK(validateMemberReconfig(oldMembers, newMembers, m1));
}
TEST_F(ServiceContextTest, ValidateForReconfig_MultiAutoReconfigDisallowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2_NewlyAdded << m3_NewlyAdded);
BSONArray newMembers =
BSON_ARRAY(m1 << m2 << m3); // Remove 'newlyAdded' 2 & 3, add voting node 2 & 3.
@@ -1147,11 +1113,6 @@ TEST_F(ServiceContextTest, ValidateForReconfig_MultiAutoReconfigDisallowed) {
TEST_F(ServiceContextTest,
ValidateForReconfig_SimultaneousAutoReconfigAndAdditionOfVoterNodeDisallowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2_NewlyAdded);
BSONArray newMembers =
BSON_ARRAY(m1 << m2 << m3); // Remove 'newlyAdded' 2, add voting node 2 & 3.
@@ -1161,11 +1122,6 @@ TEST_F(ServiceContextTest,
TEST_F(ServiceContextTest,
ValidateForReconfig_SimultaneousAutoReconfigAndRemovalOfVoterNodeDisallowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2_NewlyAdded << m3);
BSONArray newMembers =
BSON_ARRAY(m1 << m2); // Remove 'newlyAdded' 2 and voter node 3, add voting node 2.
@@ -1175,11 +1131,6 @@ TEST_F(ServiceContextTest,
TEST_F(ServiceContextTest,
ValidateForReconfig_SimultaneousAutoReconfigAndAdditionOfNewlyAddedAllowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2_NewlyAdded);
BSONArray newMembers = BSON_ARRAY(
m1 << m2 << m3_NewlyAdded); // Remove 'newlyAdded' 2, add voting node 2 & 'newlyAdded' 3.
@@ -1188,11 +1139,6 @@ TEST_F(ServiceContextTest,
TEST_F(ServiceContextTest,
ValidateForReconfig_SimultaneousAutoReconfigAndRemovalOfNewlyAddedAllowed) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
BSONArray oldMembers = BSON_ARRAY(m1 << m2_NewlyAdded << m3_NewlyAdded);
BSONArray newMembers = BSON_ARRAY(m1 << m2); // Remove 'newlyAdded' 2 & 3, add voting node 2.
ASSERT_OK(validateMemberReconfig(oldMembers, newMembers, m1));
diff --git a/src/mongo/db/repl/repl_set_config_test.cpp b/src/mongo/db/repl/repl_set_config_test.cpp
index 20d7bb4270d..16618f56cbf 100644
--- a/src/mongo/db/repl/repl_set_config_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_test.cpp
@@ -891,11 +891,6 @@ TEST(ReplSetConfig, ConfigServerField) {
}
TEST(ReplSetConfig, SetNewlyAddedFieldForMemberConfig) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetConfig config(
ReplSetConfig::parse(BSON("_id"
<< "rs0"
@@ -938,11 +933,6 @@ TEST(ReplSetConfig, SetNewlyAddedFieldForMemberConfig) {
}
TEST(ReplSetConfig, RemoveNewlyAddedFieldForMemberConfig) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetConfig config(
ReplSetConfig::parse(BSON("_id"
<< "rs0"
@@ -986,11 +976,6 @@ TEST(ReplSetConfig, RemoveNewlyAddedFieldForMemberConfig) {
}
TEST(ReplSetConfig, ParsingNewlyAddedSetsFieldToTrueCorrectly) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetConfig config(
ReplSetConfig::parse(BSON("_id"
<< "rs0"
@@ -1004,11 +989,6 @@ TEST(ReplSetConfig, ParsingNewlyAddedSetsFieldToTrueCorrectly) {
}
TEST(ReplSetConfig, ParseFailsWithNewlyAddedSetToFalse) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetConfig config;
ASSERT_THROWS(ReplSetConfig::parse(BSON("_id"
<< "rs0"
@@ -1020,11 +1000,6 @@ TEST(ReplSetConfig, ParseFailsWithNewlyAddedSetToFalse) {
}
TEST(ReplSetConfig, NodeWithNewlyAddedFieldHasVotesZero) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
// Create a config for a three-node set with one arbiter and one node with 'newlyAdded: true'.
ReplSetConfig config(
ReplSetConfig::parse(BSON("_id"
@@ -1052,11 +1027,6 @@ TEST(ReplSetConfig, NodeWithNewlyAddedFieldHasVotesZero) {
}
TEST(ReplSetConfig, ToBSONWithoutNewlyAdded) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
// Create a config for a three-node set with one arbiter and one node with 'newlyAdded: true'.
ReplSetConfig config(
ReplSetConfig::parse(BSON("_id"
@@ -1383,11 +1353,6 @@ TEST(ReplSetConfig, toBSONRoundTripAbilityWithHorizon) {
}
TEST(ReplSetConfig, toBSONRoundTripAbilityLarge) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
ReplSetConfig configA;
ReplSetConfig configB;
configA = ReplSetConfig::parse(BSON(
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index 0a8547d5d98..f997fb16771 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -634,8 +634,12 @@ public:
*
* If commitmentStatus is true, adds a boolean 'commitmentStatus' field to 'result' indicating
* whether the current config is committed.
+ *
+ * If includeNewlyAdded is true, does not omit 'newlyAdded' fields from the config.
*/
- virtual void processReplSetGetConfig(BSONObjBuilder* result, bool commitmentStatus = false) = 0;
+ virtual void processReplSetGetConfig(BSONObjBuilder* result,
+ bool commitmentStatus = false,
+ bool includeNewlyAdded = false) = 0;
/**
* Processes the ReplSetMetadata returned from a command run against another
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 85f26da6e00..027a75ba474 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -3074,9 +3074,14 @@ WriteConcernOptions ReplicationCoordinatorImpl::_getConfigReplicationWriteConcer
}
void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result,
- bool commitmentStatus) {
+ bool commitmentStatus,
+ bool includeNewlyAdded) {
stdx::lock_guard<Latch> lock(_mutex);
- result->append("config", _rsConfig.toBSONWithoutNewlyAdded());
+ if (includeNewlyAdded) {
+ result->append("config", _rsConfig.toBSON());
+ } else {
+ result->append("config", _rsConfig.toBSONWithoutNewlyAdded());
+ }
if (commitmentStatus) {
uassert(ErrorCodes::NotMaster,
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 354304e2a5f..71b485b8379 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -228,7 +228,8 @@ public:
virtual ReplSetConfig getConfig() const override;
virtual void processReplSetGetConfig(BSONObjBuilder* result,
- bool commitmentStatus = false) override;
+ bool commitmentStatus = false,
+ bool includeNewlyAdded = false) override;
virtual void processReplSetMetadata(const rpc::ReplSetMetadata& replMetadata) override;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 6fc714b3304..419b28b86f4 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -998,8 +998,10 @@ TEST_F(ReplCoordReconfigTest,
// Start up as a secondary.
init();
assertStartSuccess(
- configWithMembers(
- 1, 0, BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1") << member(3, "n3:1"))),
+ configWithMembers(1,
+ 0,
+ BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1") << member(3, "n3:1")
+ << member(4, "n4:1", 0))),
HostAndPort("n1", 1));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -1136,14 +1138,14 @@ TEST_F(ReplCoordReconfigTest, WaitForConfigCommitmentTimesOutIfConfigIsNotCommit
// Start out in a non-initial config version.
init();
auto configVersion = 2;
- auto Ca_members = BSON_ARRAY(member(1, "n1:1"));
+ auto Ca_members = BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1", 0));
auto Cb_members = BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1"));
// Startup, simulate application of one oplog entry and get elected.
assertStartSuccess(configWithMembers(configVersion, 0, Ca_members), HostAndPort("n1", 1));
+ ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedAndDurableOpTime(OpTime(Timestamp(1, 1), 0));
- const auto opCtx = makeOperationContext();
- runSingleNodeElection(opCtx.get());
+ simulateSuccessfulV1Election();
ASSERT_EQ(getReplCoord()->getMemberState(), MemberState::RS_PRIMARY);
ASSERT_EQ(getReplCoord()->getTerm(), 1);
@@ -1154,6 +1156,7 @@ TEST_F(ReplCoordReconfigTest, WaitForConfigCommitmentTimesOutIfConfigIsNotCommit
respondToAllHeartbeats();
// Do a first reconfig that should succeed since the current config is committed.
+ const auto opCtx = makeOperationContext();
Status status(ErrorCodes::InternalError, "Not Set");
configVersion = 3;
ASSERT_OK(doSafeReconfig(opCtx.get(), configVersion, Cb_members, 1 /* quorumHbs */));
@@ -1175,14 +1178,14 @@ TEST_F(ReplCoordReconfigTest, WaitForConfigCommitmentReturnsOKIfConfigIsCommitte
// Start out in a non-initial config version.
init();
auto configVersion = 2;
- auto Ca_members = BSON_ARRAY(member(1, "n1:1"));
+ auto Ca_members = BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1", 0));
auto Cb_members = BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1"));
// Startup, simulate application of one oplog entry and get elected.
assertStartSuccess(configWithMembers(configVersion, 0, Ca_members), HostAndPort("n1", 1));
+ ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedAndDurableOpTime(OpTime(Timestamp(1, 1), 0));
- const auto opCtx = makeOperationContext();
- runSingleNodeElection(opCtx.get());
+ simulateSuccessfulV1Election();
ASSERT_EQ(getReplCoord()->getMemberState(), MemberState::RS_PRIMARY);
ASSERT_EQ(getReplCoord()->getTerm(), 1);
@@ -1193,6 +1196,7 @@ TEST_F(ReplCoordReconfigTest, WaitForConfigCommitmentReturnsOKIfConfigIsCommitte
respondToAllHeartbeats();
// Do a first reconfig that should succeed since the current config is committed.
+ const auto opCtx = makeOperationContext();
configVersion = 3;
ASSERT_OK(doSafeReconfig(opCtx.get(), configVersion, Cb_members, 1 /* quorumHbs */));
@@ -1206,18 +1210,19 @@ TEST_F(ReplCoordReconfigTest,
// Start out in a non-initial config version.
init();
auto configVersion = 2;
- auto Ca_members = BSON_ARRAY(member(1, "n1:1"));
+ auto Ca_members = BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1", 0));
auto Cb_members = BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1"));
// Startup, simulate application of one oplog entry and get elected.
assertStartSuccess(configWithMembers(configVersion, 0, Ca_members), HostAndPort("n1", 1));
+ ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedAndDurableOpTime(OpTime(Timestamp(1, 1), 0));
- const auto opCtx = makeOperationContext();
- runSingleNodeElection(opCtx.get());
+ simulateSuccessfulV1Election();
ASSERT_EQ(getReplCoord()->getMemberState(), MemberState::RS_PRIMARY);
ASSERT_EQ(getReplCoord()->getTerm(), 1);
// Write and commit one new oplog entry, and consume any heartbeats.
+ const auto opCtx = makeOperationContext();
auto commitPoint = OpTime(Timestamp(2, 1), 1);
replCoordSetMyLastAppliedAndDurableOpTime(commitPoint);
ASSERT_EQ(getReplCoord()->getLastCommittedOpTime(), commitPoint);
@@ -1242,7 +1247,8 @@ TEST_F(ReplCoordReconfigTest,
// Start out in a non-initial config version.
init();
auto configVersion = 2;
- auto Ca_members = BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1") << member(3, "n3:1"));
+ auto Ca_members = BSON_ARRAY(member(1, "n1:1")
+ << member(2, "n2:1") << member(3, "n3:1") << member(4, "n4:1", 0));
auto Cb_members = BSON_ARRAY(member(1, "n1:1")
<< member(2, "n2:1") << member(3, "n3:1") << member(4, "n4:1"));
@@ -1402,9 +1408,11 @@ TEST_F(ReplCoordReconfigTest, StepdownShouldInterruptConfigWrite) {
// Start out in a non-initial config version.
init();
auto configVersion = 2;
- assertStartSuccess(
- configWithMembers(configVersion, 0, BSON_ARRAY(member(1, "n1:1") << member(2, "n2:1"))),
- HostAndPort("n1", 1));
+ assertStartSuccess(configWithMembers(configVersion,
+ 0,
+ BSON_ARRAY(member(1, "n1:1")
+ << member(2, "n2:1") << member(3, "n3:1", 0))),
+ HostAndPort("n1", 1));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
// Simulate application of one oplog entry.
@@ -1490,11 +1498,6 @@ TEST_F(ReplCoordReconfigTest, StartElectionOnReconfigToSingleNode) {
}
TEST_F(ReplCoordReconfigTest, NewlyAddedFieldIsTrueForNewMembersInReconfig) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1520,11 +1523,6 @@ TEST_F(ReplCoordReconfigTest, NewlyAddedFieldIsTrueForNewMembersInReconfig) {
}
TEST_F(ReplCoordReconfigTest, NewlyAddedFieldIsNotPresentForNodesWithVotesZero) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1554,11 +1552,6 @@ TEST_F(ReplCoordReconfigTest, NewlyAddedFieldIsNotPresentForNodesWithVotesZero)
}
TEST_F(ReplCoordReconfigTest, NewlyAddedFieldIsNotPresentForNodesWithModifiedHostName) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1583,11 +1576,6 @@ TEST_F(ReplCoordReconfigTest, NewlyAddedFieldIsNotPresentForNodesWithModifiedHos
}
TEST_F(ReplCoordReconfigTest, NewlyAddedFieldIsNotPresentForNodesWithDifferentIndexButSameID) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1611,11 +1599,6 @@ TEST_F(ReplCoordReconfigTest, NewlyAddedFieldIsNotPresentForNodesWithDifferentIn
}
TEST_F(ReplCoordReconfigTest, ForceReconfigDoesNotPersistNewlyAddedFieldFromOldNodes) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1674,11 +1657,6 @@ TEST_F(ReplCoordReconfigTest, ForceReconfigDoesNotPersistNewlyAddedFieldFromOldN
}
TEST_F(ReplCoordReconfigTest, ForceReconfigDoesNotAppendNewlyAddedFieldToNewNodes) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1705,11 +1683,6 @@ TEST_F(ReplCoordReconfigTest, ForceReconfigDoesNotAppendNewlyAddedFieldToNewNode
}
TEST_F(ReplCoordReconfigTest, ForceReconfigFailsWhenNewlyAddedFieldIsSetToTrue) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1730,11 +1703,6 @@ TEST_F(ReplCoordReconfigTest, ForceReconfigFailsWhenNewlyAddedFieldIsSetToTrue)
}
TEST_F(ReplCoordReconfigTest, ForceReconfigFailsWhenNewlyAddedFieldSetToFalse) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1755,11 +1723,6 @@ TEST_F(ReplCoordReconfigTest, ForceReconfigFailsWhenNewlyAddedFieldSetToFalse) {
}
TEST_F(ReplCoordReconfigTest, ParseFailedIfUserProvidesNewlyAddedFieldDuringSafeReconfig) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1789,11 +1752,6 @@ TEST_F(ReplCoordReconfigTest, ParseFailedIfUserProvidesNewlyAddedFieldDuringSafe
}
TEST_F(ReplCoordReconfigTest, ReconfigNeverModifiesExistingNewlyAddedFieldForMember) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1840,11 +1798,6 @@ TEST_F(ReplCoordReconfigTest, ReconfigNeverModifiesExistingNewlyAddedFieldForMem
}
TEST_F(ReplCoordReconfigTest, ReconfigNeverModifiesExistingNewlyAddedFieldForPreviouslyAddedNodes) {
- // Set the flag to add the 'newlyAdded' field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1895,11 +1848,6 @@ TEST_F(ReplCoordReconfigTest, ReconfigNeverModifiesExistingNewlyAddedFieldForPre
}
TEST_F(ReplCoordReconfigTest, NodesWithNewlyAddedFieldSetAreTreatedAsVotesZero) {
- // Set the flag to add the `newlyAdded` field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1928,11 +1876,6 @@ TEST_F(ReplCoordReconfigTest, NodesWithNewlyAddedFieldSetAreTreatedAsVotesZero)
}
TEST_F(ReplCoordReconfigTest, NodesWithNewlyAddedFieldSetHavePriorityZero) {
- // Set the flag to add the `newlyAdded` field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -1997,11 +1940,6 @@ TEST_F(ReplCoordReconfigTest, NodesWithNewlyAddedFieldSetHavePriorityZero) {
}
TEST_F(ReplCoordReconfigTest, ArbiterNodesShouldNeverHaveNewlyAddedField) {
- // Set the flag to add the `newlyAdded` field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
@@ -2031,11 +1969,6 @@ TEST_F(ReplCoordReconfigTest, ArbiterNodesShouldNeverHaveNewlyAddedField) {
}
TEST_F(ReplCoordReconfigTest, ForceReconfigShouldThrowIfArbiterNodesHaveNewlyAddedField) {
- // Set the flag to add the `newlyAdded` field to MemberConfigs.
- enableAutomaticReconfig = true;
- // Set the flag back to false after this test exits.
- ON_BLOCK_EXIT([] { enableAutomaticReconfig = false; });
-
setUpNewlyAddedFieldTest();
auto opCtx = makeOperationContext();
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index a531cb41da2..be452167de0 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -378,7 +378,8 @@ void ReplicationCoordinatorMock::setGetConfigReturnValue(ReplSetConfig returnVal
}
void ReplicationCoordinatorMock::processReplSetGetConfig(BSONObjBuilder* result,
- bool commitmentStatus) {
+ bool commitmentStatus,
+ bool includeNewlyAdded) {
// TODO
}
diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h
index cafb302372a..ebd848e62d8 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_mock.h
@@ -195,7 +195,9 @@ public:
virtual ReplSetConfig getConfig() const;
- virtual void processReplSetGetConfig(BSONObjBuilder* result, bool commitmentStatus = false);
+ virtual void processReplSetGetConfig(BSONObjBuilder* result,
+ bool commitmentStatus = false,
+ bool includeNewlyAdded = false);
virtual void processReplSetMetadata(const rpc::ReplSetMetadata& replMetadata) override;
diff --git a/src/mongo/db/repl/replication_coordinator_noop.cpp b/src/mongo/db/repl/replication_coordinator_noop.cpp
index a85689d66e3..5d703311b4a 100644
--- a/src/mongo/db/repl/replication_coordinator_noop.cpp
+++ b/src/mongo/db/repl/replication_coordinator_noop.cpp
@@ -299,7 +299,8 @@ ReplSetConfig ReplicationCoordinatorNoOp::getConfig() const {
}
void ReplicationCoordinatorNoOp::processReplSetGetConfig(BSONObjBuilder* result,
- bool commitmentStatus) {
+ bool commitmentStatus,
+ bool includeNewlyAdded) {
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/repl/replication_coordinator_noop.h b/src/mongo/db/repl/replication_coordinator_noop.h
index cef32c185e2..fb3a5ec64d2 100644
--- a/src/mongo/db/repl/replication_coordinator_noop.h
+++ b/src/mongo/db/repl/replication_coordinator_noop.h
@@ -173,7 +173,9 @@ public:
ReplSetConfig getConfig() const final;
- void processReplSetGetConfig(BSONObjBuilder*, bool commitmentStatus = false) final;
+ void processReplSetGetConfig(BSONObjBuilder*,
+ bool commitmentStatus = false,
+ bool includeNewlyAdded = false) final;
void processReplSetMetadata(const rpc::ReplSetMetadata&) final;
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.h b/src/mongo/db/repl/replication_coordinator_test_fixture.h
index e74e89bc6ee..a29fecdaf70 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.h
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.h
@@ -79,8 +79,8 @@ public:
/**
* Helpers to construct a config.
*/
- static BSONObj member(int id, std::string host) {
- return BSON("_id" << id << "host" << host);
+ static BSONObj member(int id, std::string host, int votes = 1) {
+ return BSON("_id" << id << "host" << host << "votes" << votes << "priority" << votes);
}
static BSONObj configWithMembers(int version, long long term, BSONArray members) {
diff --git a/src/mongo/embedded/replication_coordinator_embedded.cpp b/src/mongo/embedded/replication_coordinator_embedded.cpp
index 9d08d1519de..fd12817508c 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.cpp
+++ b/src/mongo/embedded/replication_coordinator_embedded.cpp
@@ -324,7 +324,8 @@ ReplSetConfig ReplicationCoordinatorEmbedded::getConfig() const {
}
void ReplicationCoordinatorEmbedded::processReplSetGetConfig(BSONObjBuilder*,
- bool commitmentStatus) {
+ bool commitmentStatus,
+ bool includeNewlyAdded) {
UASSERT_NOT_IMPLEMENTED;
}
diff --git a/src/mongo/embedded/replication_coordinator_embedded.h b/src/mongo/embedded/replication_coordinator_embedded.h
index 4201903d3a0..e7a2de3b9a8 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.h
+++ b/src/mongo/embedded/replication_coordinator_embedded.h
@@ -179,7 +179,9 @@ public:
repl::ReplSetConfig getConfig() const override;
- void processReplSetGetConfig(BSONObjBuilder*, bool commitmentStatus = false) override;
+ void processReplSetGetConfig(BSONObjBuilder*,
+ bool commitmentStatus = false,
+ bool includeNewlyAdded = false) override;
void processReplSetMetadata(const rpc::ReplSetMetadata&) override;
diff --git a/src/mongo/shell/assert.js b/src/mongo/shell/assert.js
index f50ba594a66..b9ba9fdc7d2 100644
--- a/src/mongo/shell/assert.js
+++ b/src/mongo/shell/assert.js
@@ -419,7 +419,7 @@ assert = (function() {
// Used up all attempts
msg = _buildAssertionMessage(msg);
if (runHangAnalyzer) {
- msg = msg + "The hang analyzer is automatically called in assert.retry functions. " +
+ msg = msg + " The hang analyzer is automatically called in assert.retry functions. " +
"If you are *expecting* assert.soon to possibly fail, call assert.retry " +
"with {runHangAnalyzer: false} as the fifth argument " +
"(you can fill unused arguments with `undefined`).";
@@ -481,7 +481,8 @@ assert = (function() {
"assert.time failed timeout " + timeout + "ms took " + diff + "ms : " + f + ", msg";
msg = _buildAssertionMessage(msg, msgPrefix);
if (runHangAnalyzer) {
- msg = msg + "The hang analyzer is automatically called in assert.time functions. " +
+ msg = msg +
+ " The hang analyzer is automatically called in assert.time functions. " +
"If you are *expecting* assert.soon to possibly fail, call assert.time " +
"with {runHangAnalyzer: false} as the fourth argument " +
"(you can fill unused arguments with `undefined`).";
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index 7fa584c6008..d9f2aa0b650 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -1146,7 +1146,7 @@ var ReplSetTest = function(opts) {
};
function replSetCommandWithRetry(master, cmd) {
- printjson(cmd);
+ print("Running command with retry: " + tojson(cmd));
const cmdName = Object.keys(cmd)[0];
const errorMsg = `${cmdName} during initiate failed`;
assert.retry(() => {
@@ -1155,7 +1155,9 @@ var ReplSetTest = function(opts) {
[
ErrorCodes.NodeNotFound,
ErrorCodes.NewReplicaSetConfigurationIncompatible,
- ErrorCodes.InterruptedDueToReplStateChange
+ ErrorCodes.InterruptedDueToReplStateChange,
+ ErrorCodes.ConfigurationInProgress,
+ ErrorCodes.CurrentConfigNotCommittedYet
],
errorMsg);
return result.ok;
@@ -1163,6 +1165,90 @@ var ReplSetTest = function(opts) {
}
/**
+ * Wait until the config on the primary becomes committed. Callers specify the primary in case
+ * this must be called when two nodes are expected to be concurrently primary.
+ */
+ this.waitForConfigReplication = function(primary, nodes) {
+ const nodeHosts = nodes ? tojson(nodes.map((n) => n.host)) : "all nodes";
+ print("waitForConfigReplication: Waiting for the config on " + primary.host +
+ " to replicate to " + nodeHosts);
+
+ let configVersion = -2;
+ let configTerm = -2;
+ assert.soon(function() {
+ const res = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}));
+ const primaryMember = res.members.find((m) => m.self);
+ configVersion = primaryMember.configVersion;
+ configTerm = primaryMember.configTerm;
+ function hasSameConfig(member) {
+ return member.configVersion === primaryMember.configVersion &&
+ member.configTerm === primaryMember.configTerm;
+ }
+ let members = res.members;
+ if (nodes) {
+ members = res.members.filter((m) => nodes.some((node) => m.name === node.host));
+ }
+ return members.every((m) => hasSameConfig(m));
+ });
+
+ print("waitForConfigReplication: config on " + primary.host +
+ " replicated successfully to " + nodeHosts + " with version " + configVersion +
+ " and term " + configTerm);
+ };
+
+ /**
+ * Waits for all 'newlyAdded' fields to be removed, for that config to be committed, and for
+ * the in-memory and on-disk configs to match.
+ */
+ this.waitForAllNewlyAddedRemovals = function(timeout) {
+ timeout = timeout || self.kDefaultTimeoutMS;
+ print("waitForAllNewlyAddedRemovals: starting for set " + self.name);
+ const primary = self.getPrimary();
+
+ // Shadow 'db' so that we can call the function on the primary without a separate shell when
+ // x509 auth is not needed.
+ let db = primary.getDB('admin');
+ runFnWithAuthOnPrimary(function() {
+ assert.soon(function() {
+ const getConfigRes = assert.commandWorked(db.adminCommand({
+ replSetGetConfig: 1,
+ commitmentStatus: true,
+ $_internalIncludeNewlyAdded: true
+ }));
+ const config = getConfigRes.config;
+ for (let i = 0; i < config.members.length; i++) {
+ const memberConfig = config.members[i];
+ if (memberConfig.hasOwnProperty("newlyAdded")) {
+ assert(memberConfig["newlyAdded"] === true, config);
+ print("waitForAllNewlyAddedRemovals: Retrying because memberIndex " + i +
+ " is still 'newlyAdded'");
+ return false;
+ }
+ }
+ if (!getConfigRes.hasOwnProperty("commitmentStatus")) {
+ print(
+ "waitForAllNewlyAddedRemovals: Skipping wait due to no commitmentStatus." +
+ " Assuming this is an older version.");
+ return true;
+ }
+
+ if (!getConfigRes.commitmentStatus) {
+ print("waitForAllNewlyAddedRemovals: " +
+ "Retrying because primary's config isn't committed. " +
+ "Version: " + config.version + ", Term: " + config.term);
+ return false;
+ }
+
+ return true;
+ });
+ }, "waitForAllNewlyAddedRemovals");
+
+ self.waitForConfigReplication(primary);
+
+ print("waitForAllNewlyAddedRemovals: finished for set " + this.name);
+ };
+
+ /**
* Runs replSetInitiate on the first node of the replica set.
* Ensures that a primary is elected (not necessarily node 0).
* initiate() should be preferred instead of this, but this is useful when the connections
@@ -1171,7 +1257,8 @@ var ReplSetTest = function(opts) {
*/
this.initiateWithAnyNodeAsPrimary = function(cfg, initCmd, {
doNotWaitForStableRecoveryTimestamp: doNotWaitForStableRecoveryTimestamp = false,
- doNotWaitForReplication: doNotWaitForReplication = false
+ doNotWaitForReplication: doNotWaitForReplication = false,
+ doNotWaitForNewlyAddedRemovals: doNotWaitForNewlyAddedRemovals = false
} = {}) {
let startTime = new Date(); // Measure the execution time of this function.
var master = this.nodes[0].getDB("admin");
@@ -1357,11 +1444,30 @@ var ReplSetTest = function(opts) {
print("Reconfiguring replica set to add in other nodes");
for (let i = 2; i <= originalMembers.length; i++) {
print("ReplSetTest adding in node " + i);
- config.members = originalMembers.slice(0, i);
- // Set a maxTimeMS so reconfig fails if it times out.
- replSetCommandWithRetry(
- master, {replSetReconfig: config, maxTimeMS: ReplSetTest.kDefaultTimeoutMS});
- config.version++;
+ assert.soon(function() {
+ const statusRes =
+ assert.commandWorked(master.adminCommand({replSetGetStatus: 1}));
+ const primaryMember = statusRes.members.find((m) => m.self);
+ config.version = primaryMember.configVersion + 1;
+
+ config.members = originalMembers.slice(0, i);
+ cmd = {replSetReconfig: config, maxTimeMS: ReplSetTest.kDefaultTimeoutMS};
+ print("Running reconfig command: " + tojsononeline(cmd));
+ const reconfigRes = master.adminCommand(cmd);
+ const retryableReconfigCodes = [
+ ErrorCodes.NodeNotFound,
+ ErrorCodes.NewReplicaSetConfigurationIncompatible,
+ ErrorCodes.InterruptedDueToReplStateChange,
+ ErrorCodes.ConfigurationInProgress,
+ ErrorCodes.CurrentConfigNotCommittedYet
+ ];
+ if (retryableReconfigCodes.includes(reconfigRes.code)) {
+ print("Retrying reconfig due to " + tojsononeline(reconfigRes));
+ return false;
+ }
+ assert.commandWorked(reconfigRes);
+ return true;
+ }, "reconfig for fixture set up failed", ReplSetTest.kDefaultTimeoutMS, 1000);
}
}
@@ -1375,6 +1481,13 @@ var ReplSetTest = function(opts) {
// detect initial sync completion more quickly.
this.awaitSecondaryNodes(
self.kDefaultTimeoutMS, null /* slaves */, 25 /* retryIntervalMS */);
+
+ // If test commands are not enabled, we cannot wait for 'newlyAdded' removals. Tests that
+ // disable test commands must ensure 'newlyAdded' removals mid-test are acceptable.
+ if (!doNotWaitForNewlyAddedRemovals && jsTest.options().enableTestCommands) {
+ self.waitForAllNewlyAddedRemovals();
+ }
+
print("ReplSetTest initiate reconfig and awaitSecondaryNodes took " +
(new Date() - reconfigStart) + "ms for " + this.nodes.length + " nodes in set '" +
this.name + "'");
@@ -1633,6 +1746,41 @@ var ReplSetTest = function(opts) {
return masterOpTime;
};
+ // TODO(SERVER-14017): Remove this extra sub-shell in favor of a cleaner authentication
+ // solution.
+ function runFnWithAuthOnPrimary(fn, fnName) {
+ const primary = self.getPrimary();
+ const primaryId = "n" + self.getNodeId(primary);
+ const primaryOptions = self.nodeOptions[primaryId] || {};
+ const options =
+ (primaryOptions === {} || !self.startOptions) ? primaryOptions : self.startOptions;
+ const authMode = options.clusterAuthMode;
+ if (authMode === "x509") {
+ print(fnName + ": authenticating on separate shell with x509 for " + self.name);
+ const caFile = options.sslCAFile ? options.sslCAFile : options.tlsCAFile;
+ const keyFile =
+ options.sslPEMKeyFile ? options.sslPEMKeyFile : options.tlsCertificateKeyFile;
+ const subShellArgs = [
+ 'mongo',
+ '--ssl',
+ '--sslCAFile=' + caFile,
+ '--sslPEMKeyFile=' + keyFile,
+ '--sslAllowInvalidHostnames',
+ '--authenticationDatabase=$external',
+ '--authenticationMechanism=MONGODB-X509',
+ primary.host,
+ '--eval',
+ `(${fn.toString()})();`
+ ];
+
+ const retVal = _runMongoProgram(...subShellArgs);
+ assert.eq(retVal, 0, 'mongo shell did not succeed with exit code 0');
+ } else {
+ print(fnName + ": authenticating with authMode '" + authMode + "' for " + self.name);
+ asCluster(primary, fn, primaryOptions.keyFile);
+ }
+ }
+
/**
* This function performs some writes and then waits for all nodes in this replica set to
* establish a stable recovery timestamp. The writes are necessary to prompt storage engines to
@@ -1654,7 +1802,8 @@ var ReplSetTest = function(opts) {
// propagate to all members and trigger a stable checkpoint on all persisted storage engines
// nodes.
function advanceCommitPoint(master) {
- // Shadow 'db' so that we can call 'advanceCommitPoint' directly on the primary node.
+ // Shadow 'db' so that we can call the function on the primary without a separate shell
+ // when x509 auth is not needed.
let db = master.getDB('admin');
const appendOplogNoteFn = function() {
assert.commandWorked(db.adminCommand({
@@ -1664,35 +1813,7 @@ var ReplSetTest = function(opts) {
}));
};
- // TODO(SERVER-14017): Remove this extra sub-shell in favor of a cleaner authentication
- // solution.
- const masterId = "n" + rst.getNodeId(master);
- const masterOptions = rst.nodeOptions[masterId] || {};
- if (masterOptions.clusterAuthMode === "x509") {
- print("AwaitLastStableRecoveryTimestamp: authenticating on separate shell " +
- "with x509 for " + id);
- const subShellArgs = [
- 'mongo',
- '--ssl',
- '--sslCAFile=' + masterOptions.sslCAFile,
- '--sslPEMKeyFile=' + masterOptions.sslPEMKeyFile,
- '--sslAllowInvalidHostnames',
- '--authenticationDatabase=$external',
- '--authenticationMechanism=MONGODB-X509',
- master.host,
- '--eval',
- `(${appendOplogNoteFn.toString()})();`
- ];
-
- const retVal = _runMongoProgram(...subShellArgs);
- assert.eq(retVal, 0, 'mongo shell did not succeed with exit code 0');
- } else {
- if (masterOptions.clusterAuthMode) {
- print("AwaitLastStableRecoveryTimestamp: authenticating with " +
- masterOptions.clusterAuthMode + " for " + id);
- }
- asCluster(master, appendOplogNoteFn, masterOptions.keyFile);
- }
+ runFnWithAuthOnPrimary(appendOplogNoteFn, "AwaitLastStableRecoveryTimestamp");
}
print("AwaitLastStableRecoveryTimestamp: Beginning for " + id);