summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAli Mir <ali.mir@mongodb.com>2020-09-16 19:33:30 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-09-28 18:05:55 +0000
commit93cff71bfc5b28d0724e9c19f52041f249fb854c (patch)
treeea3de0eecbf90d8f84db5dfe71d5c4138c0224f4
parentf509116d2db0aa550d05c77f385402cd0d1406b4 (diff)
downloadmongo-93cff71bfc5b28d0724e9c19f52041f249fb854c.tar.gz
SERVER-50651 Replace occurrences of isMaster command with hello in replsets jstests
-rw-r--r--jstests/replsets/apply_batch_only_goes_forward.js2
-rw-r--r--jstests/replsets/awaitable_ismaster_errors_on_horizon_change.js180
-rw-r--r--jstests/replsets/awaitable_ismaster_fcv_change.js104
-rw-r--r--jstests/replsets/awaitable_ismaster_metrics_on_state_change.js98
-rw-r--r--jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js76
-rw-r--r--jstests/replsets/awaitable_ismaster_stepdown_stepup.js78
-rw-r--r--jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js2
-rw-r--r--jstests/replsets/drain.js4
-rw-r--r--jstests/replsets/linearizable_read_concern.js4
-rw-r--r--jstests/replsets/localhost1.js2
-rw-r--r--jstests/replsets/localhost2.js2
-rw-r--r--jstests/replsets/localhost3.js2
-rw-r--r--jstests/replsets/maintenance.js14
-rw-r--r--jstests/replsets/maintenance_non-blocking.js12
-rw-r--r--jstests/replsets/minimum_visible_with_cluster_time.js2
-rw-r--r--jstests/replsets/not_master_unacknowledged_write.js12
-rw-r--r--jstests/replsets/quiesce_mode.js44
-rw-r--r--jstests/replsets/read_committed_after_rollback.js7
-rw-r--r--jstests/replsets/read_committed_stale_history.js2
-rw-r--r--jstests/replsets/read_concern_uninitated_set.js2
-rw-r--r--jstests/replsets/reconfig_add_remove_arbiter.js2
-rw-r--r--jstests/replsets/reconfig_avoids_diverging_configs.js2
-rw-r--r--jstests/replsets/reconfig_tags.js8
-rw-r--r--jstests/replsets/remove1.js30
-rw-r--r--jstests/replsets/replset1.js4
-rw-r--r--jstests/replsets/replset4.js4
-rw-r--r--jstests/replsets/replsetarb2.js2
-rw-r--r--jstests/replsets/replsetfreeze.js48
-rw-r--r--jstests/replsets/rollback_auth.js20
-rw-r--r--jstests/replsets/rollback_crud_op_sequences.js20
-rw-r--r--jstests/replsets/rollback_ddl_op_sequences.js18
-rw-r--r--jstests/replsets/slavedelay1.js2
-rw-r--r--jstests/replsets/split_horizon_hostname_validation.js2
-rw-r--r--jstests/replsets/step_down_during_draining.js4
-rw-r--r--jstests/replsets/step_down_during_draining2.js12
-rw-r--r--jstests/replsets/step_down_during_draining3.js6
-rw-r--r--jstests/replsets/stepdown.js10
-rw-r--r--jstests/replsets/temp_namespace.js73
-rw-r--r--jstests/replsets/unconditional_step_down.js2
-rw-r--r--jstests/replsets/write_concern_after_stepdown.js6
-rw-r--r--jstests/replsets/write_concern_after_stepdown_and_stepup.js6
41 files changed, 469 insertions, 461 deletions
diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js
index 9f7faae5b08..b7b1e0b81da 100644
--- a/jstests/replsets/apply_batch_only_goes_forward.js
+++ b/jstests/replsets/apply_batch_only_goes_forward.js
@@ -73,7 +73,7 @@ jsTest.log('Restarting primary ' + master.host +
'Secondary ' + slave.host + ' will become new primary.');
clearRawMongoProgramOutput();
replTest.restart(master);
-printjson(sLocal.adminCommand("isMaster"));
+printjson(sLocal.adminCommand("hello"));
replTest.waitForState(master, ReplSetTest.State.RECOVERING);
replTest.awaitNodesAgreeOnPrimary();
diff --git a/jstests/replsets/awaitable_ismaster_errors_on_horizon_change.js b/jstests/replsets/awaitable_ismaster_errors_on_horizon_change.js
index e2ac2eda372..6189ef0ca04 100644
--- a/jstests/replsets/awaitable_ismaster_errors_on_horizon_change.js
+++ b/jstests/replsets/awaitable_ismaster_errors_on_horizon_change.js
@@ -1,6 +1,6 @@
/**
* Tests that doing a reconfig that changes the SplitHorizon will cause the server to disconnect
- * from clients with waiting isMaster requests.
+ * from clients with waiting hello/isMaster requests.
*/
(function() {
"use strict";
@@ -14,15 +14,15 @@ const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}
replTest.startSet();
replTest.initiate();
-const dbName = "awaitable_ismaster_horizon_change";
+const dbName = "awaitable_command_horizon_change";
const primary = replTest.getPrimary();
const primaryDB = primary.getDB(dbName);
const secondary = replTest.getSecondary();
const secondaryDB = secondary.getDB(dbName);
-function runAwaitableIsMasterBeforeHorizonChange(topologyVersionField) {
+function runAwaitableCmdBeforeHorizonChange(cmd, topologyVersionField) {
let res = assert.throws(() => db.runCommand({
- isMaster: 1,
+ [cmd]: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}));
@@ -32,95 +32,105 @@ function runAwaitableIsMasterBeforeHorizonChange(topologyVersionField) {
assert.commandWorked(db.adminCommand({ping: 1}));
}
-function runAwaitableIsMaster(topologyVersionField) {
+function runAwaitableCmd(cmd, topologyVersionField) {
const result = assert.commandWorked(db.runCommand({
- isMaster: 1,
+ [cmd]: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}));
assert.eq(topologyVersionField.counter + 1, result.topologyVersion.counter);
}
-const primaryFirstResponse = assert.commandWorked(primaryDB.runCommand({isMaster: 1}));
-const primaryTopologyVersion = primaryFirstResponse.topologyVersion;
-
-const secondaryFirstResponse = assert.commandWorked(secondaryDB.runCommand({isMaster: 1}));
-const secondaryTopologyVersion = secondaryFirstResponse.topologyVersion;
-
-// A failpoint signalling that the server has received the isMaster request and is waiting for a
-// topology change.
-let primaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
-let secondaryFailPoint = configureFailPoint(secondary, "waitForIsMasterResponse");
-// Send an awaitable isMaster request. This will block until there is a topology change.
-const awaitIsMasterHorizonChangeOnPrimary = startParallelShell(
- funWithArgs(runAwaitableIsMasterBeforeHorizonChange, primaryTopologyVersion), primary.port);
-const awaitIsMasterHorizonChangeOnSecondary = startParallelShell(
- funWithArgs(runAwaitableIsMasterBeforeHorizonChange, secondaryTopologyVersion), secondary.port);
-primaryFailPoint.wait();
-secondaryFailPoint.wait();
-
-// Each node has one isMaster request waiting on a topology change.
-let numAwaitingTopologyChangeOnPrimary =
- primaryDB.serverStatus().connections.awaitingTopologyChanges;
-let numAwaitingTopologyChangeOnSecondary =
- secondaryDB.serverStatus().connections.awaitingTopologyChanges;
-assert.eq(1, numAwaitingTopologyChangeOnPrimary);
-assert.eq(1, numAwaitingTopologyChangeOnSecondary);
-
-// Doing a reconfig that changes the horizon should respond to all waiting isMasters with an error.
-let rsConfig = primary.getDB("local").system.replset.findOne();
let idx = 0;
-rsConfig.members.forEach(function(member) {
- member.horizons = {specialHorizon: 'horizon.com:100' + idx};
- idx++;
-});
-rsConfig.version++;
-
-jsTest.log('Calling replSetReconfig with config: ' + tojson(rsConfig));
-assert.commandWorked(primary.adminCommand({replSetReconfig: rsConfig}));
-awaitIsMasterHorizonChangeOnPrimary();
-awaitIsMasterHorizonChangeOnSecondary();
-
-// All isMaster requests should have been responded to after the reconfig.
-numAwaitingTopologyChangeOnPrimary = primaryDB.serverStatus().connections.awaitingTopologyChanges;
-numAwaitingTopologyChangeOnSecondary =
- secondaryDB.serverStatus().connections.awaitingTopologyChanges;
-assert.eq(0, numAwaitingTopologyChangeOnPrimary);
-assert.eq(0, numAwaitingTopologyChangeOnSecondary);
-
-const primaryRespAfterHorizonChange = assert.commandWorked(primaryDB.runCommand({isMaster: 1}));
-const secondaryRespAfterHorizonChange = assert.commandWorked(secondaryDB.runCommand({isMaster: 1}));
-const primaryTopVersionAfterHorizonChange = primaryRespAfterHorizonChange.topologyVersion;
-const secondaryTopVersionAfterHorizonChange = secondaryRespAfterHorizonChange.topologyVersion;
-
-// Doing a reconfig that doesn't change the horizon should increment the topologyVersion and reply
-// to waiting isMasters with a successful response.
-rsConfig = primary.getDB("local").system.replset.findOne();
-rsConfig.members.forEach(function(member) {
- if (member.host == primary.host) {
- member.tags = {dc: 'ny'};
- } else {
- member.tags = {dc: 'sf'};
- }
-});
-rsConfig.version++;
-
-// Reconfigure the failpoint to refresh the number of times the failpoint has been entered.
-primaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
-secondaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
-// Send an awaitable isMaster request. This will block until maxAwaitTimeMS has elapsed or a
-// topology change happens.
-let primaryAwaitIsMasterBeforeAddingTags = startParallelShell(
- funWithArgs(runAwaitableIsMaster, primaryTopVersionAfterHorizonChange), primary.port);
-let secondaryAaitIsMasterBeforeAddingTags = startParallelShell(
- funWithArgs(runAwaitableIsMaster, secondaryTopVersionAfterHorizonChange), secondary.port);
-primaryFailPoint.wait();
-secondaryFailPoint.wait();
-
-jsTest.log('Calling replSetReconfig with config: ' + tojson(rsConfig));
-assert.commandWorked(primary.adminCommand({replSetReconfig: rsConfig}));
-primaryAwaitIsMasterBeforeAddingTags();
-secondaryAaitIsMasterBeforeAddingTags();
+// runTest takes in the hello command or its aliases, isMaster and ismaster.
+function runTest(cmd) {
+ const primaryFirstResponse = assert.commandWorked(primaryDB.runCommand({[cmd]: 1}));
+ const primaryTopologyVersion = primaryFirstResponse.topologyVersion;
+
+ const secondaryFirstResponse = assert.commandWorked(secondaryDB.runCommand({[cmd]: 1}));
+ const secondaryTopologyVersion = secondaryFirstResponse.topologyVersion;
+
+ // A failpoint signalling that the server has received the hello/isMaster request and is waiting
+ // for a topology change.
+ let primaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
+ let secondaryFailPoint = configureFailPoint(secondary, "waitForIsMasterResponse");
+ // Send an awaitable hello/isMaster request. This will block until there is a topology change.
+ const awaitCmdHorizonChangeOnPrimary = startParallelShell(
+ funWithArgs(runAwaitableCmdBeforeHorizonChange, cmd, primaryTopologyVersion), primary.port);
+ const awaitCmdHorizonChangeOnSecondary = startParallelShell(
+ funWithArgs(runAwaitableCmdBeforeHorizonChange, cmd, secondaryTopologyVersion),
+ secondary.port);
+ primaryFailPoint.wait();
+ secondaryFailPoint.wait();
+
+ // Each node has one hello/isMaster request waiting on a topology change.
+ let numAwaitingTopologyChangeOnPrimary =
+ primaryDB.serverStatus().connections.awaitingTopologyChanges;
+ let numAwaitingTopologyChangeOnSecondary =
+ secondaryDB.serverStatus().connections.awaitingTopologyChanges;
+ assert.eq(1, numAwaitingTopologyChangeOnPrimary);
+ assert.eq(1, numAwaitingTopologyChangeOnSecondary);
+
+ // Doing a reconfig that changes the horizon should respond to all waiting hello/isMaster
+ // requests with an error.
+ let rsConfig = primary.getDB("local").system.replset.findOne();
+ rsConfig.members.forEach(function(member) {
+ member.horizons = {specialHorizon: 'horizon.com:100' + idx};
+ idx++;
+ });
+ rsConfig.version++;
+
+ jsTest.log('Calling replSetReconfig with config: ' + tojson(rsConfig));
+ assert.commandWorked(primary.adminCommand({replSetReconfig: rsConfig}));
+ awaitCmdHorizonChangeOnPrimary();
+ awaitCmdHorizonChangeOnSecondary();
+
+ // All hello/isMaster requests should have been responded to after the reconfig.
+ numAwaitingTopologyChangeOnPrimary =
+ primaryDB.serverStatus().connections.awaitingTopologyChanges;
+ numAwaitingTopologyChangeOnSecondary =
+ secondaryDB.serverStatus().connections.awaitingTopologyChanges;
+ assert.eq(0, numAwaitingTopologyChangeOnPrimary);
+ assert.eq(0, numAwaitingTopologyChangeOnSecondary);
+
+ const primaryRespAfterHorizonChange = assert.commandWorked(primaryDB.runCommand({[cmd]: 1}));
+ const secondaryRespAfterHorizonChange =
+ assert.commandWorked(secondaryDB.runCommand({[cmd]: 1}));
+ const primaryTopVersionAfterHorizonChange = primaryRespAfterHorizonChange.topologyVersion;
+ const secondaryTopVersionAfterHorizonChange = secondaryRespAfterHorizonChange.topologyVersion;
+
+ // Doing a reconfig that doesn't change the horizon should increment the topologyVersion and
+ // reply to waiting hello/isMaster requests with a successful response.
+ rsConfig = primary.getDB("local").system.replset.findOne();
+ rsConfig.members.forEach(function(member) {
+ if (member.host == primary.host) {
+ member.tags = {dc: 'ny'};
+ } else {
+ member.tags = {dc: 'sf'};
+ }
+ });
+ rsConfig.version++;
+
+ // Reconfigure the failpoint to refresh the number of times the failpoint has been entered.
+ primaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
+ secondaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
+ // Send an awaitable hello/isMaster request. This will block until maxAwaitTimeMS has elapsed or
+ // a topology change happens.
+ let primaryAwaitCmdBeforeAddingTags = startParallelShell(
+ funWithArgs(runAwaitableCmd, cmd, primaryTopVersionAfterHorizonChange), primary.port);
+ let secondaryAwaitCmdBeforeAddingTags = startParallelShell(
+ funWithArgs(runAwaitableCmd, cmd, secondaryTopVersionAfterHorizonChange), secondary.port);
+ primaryFailPoint.wait();
+ secondaryFailPoint.wait();
+
+ jsTest.log('Calling replSetReconfig with config: ' + tojson(rsConfig));
+ assert.commandWorked(primary.adminCommand({replSetReconfig: rsConfig}));
+ primaryAwaitCmdBeforeAddingTags();
+ secondaryAwaitCmdBeforeAddingTags();
+}
+runTest("hello");
+runTest("isMaster");
+runTest("ismaster");
replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/awaitable_ismaster_fcv_change.js b/jstests/replsets/awaitable_ismaster_fcv_change.js
index df2175a93fb..39e0e0ad64d 100644
--- a/jstests/replsets/awaitable_ismaster_fcv_change.js
+++ b/jstests/replsets/awaitable_ismaster_fcv_change.js
@@ -1,5 +1,5 @@
/**
- * This tests that upgrading and downgrading FCV will unblock and reply to waiting isMaster
+ * This tests that upgrading and downgrading FCV will unblock and reply to waiting hello
* requests.
*
* @tags: [multiversion_incompatible]
@@ -19,11 +19,11 @@ const secondary = rst.getSecondary();
const primaryAdminDB = primary.getDB("admin");
const secondaryAdminDB = secondary.getDB("admin");
-function runAwaitableIsMasterBeforeFCVChange(
+function runAwaitableHelloBeforeFCVChange(
topologyVersionField, targetFCV, isPrimary, prevMinWireVersion, serverMaxWireVersion) {
db.getMongo().setSecondaryOk();
let response = assert.commandWorked(db.runCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
internalClient:
@@ -35,10 +35,10 @@ function runAwaitableIsMasterBeforeFCVChange(
// 1. Setting featureCompatibilityVersion from downgrading to fullyDowngraded.
// 2. Setting featureCompatibilityVersion from fullyDowngraded to upgrading.
assert.eq(topologyVersionField.counter + 1, response.topologyVersion.counter, response);
- const expectedIsMasterValue = isPrimary;
+ const expectedHelloValue = isPrimary;
const expectedSecondaryValue = !isPrimary;
- assert.eq(expectedIsMasterValue, response.ismaster, response);
+ assert.eq(expectedHelloValue, response.isWritablePrimary, response);
assert.eq(expectedSecondaryValue, response.secondary, response);
const minWireVersion = response.minWireVersion;
@@ -57,23 +57,23 @@ function runAwaitableIsMasterBeforeFCVChange(
function runTest(downgradeFCV) {
jsTestLog("Running test with downgradeFCV: " + downgradeFCV);
- // This test manually runs isMaster with the 'internalClient' field, which means that to the
+ // This test manually runs hello with the 'internalClient' field, which means that to the
// mongod, the connection appears to be from another server. This makes mongod to return an
- // isMaster response with a real 'minWireVersion' for internal clients instead of 0.
+ // hello response with a real 'minWireVersion' for internal clients instead of 0.
//
- // The value of 'internalClient.maxWireVersion' in the isMaster command does not matter for the
- // purpose of this test and the isMaster command will succeed regardless because this is run
+ // The value of 'internalClient.maxWireVersion' in the hello command does not matter for the
+ // purpose of this test and the hello command will succeed regardless because this is run
// through the shell and the shell is always compatible talking to the server. In reality
// though, a real internal client with a lower binary version is expected to hang up immediately
- // after receiving the response to the isMaster command from a latest server with an upgraded
+ // after receiving the response to the hello command from a latest server with an upgraded
// FCV.
//
// And we need to use a side connection to do so in order to prevent the test connection from
// being closed on FCV changes.
- function isMasterAsInternalClient() {
+ function helloAsInternalClient() {
let connInternal = new Mongo(primary.host);
const res = assert.commandWorked(connInternal.adminCommand({
- isMaster: 1,
+ hello: 1,
internalClient: {minWireVersion: NumberInt(0), maxWireVersion: NumberInt(9)}
}));
connInternal.close();
@@ -81,14 +81,14 @@ function runTest(downgradeFCV) {
}
// Get the server topologyVersion, minWireVersion, and maxWireversion.
- const primaryResult = isMasterAsInternalClient();
+ const primaryResult = helloAsInternalClient();
assert(primaryResult.hasOwnProperty("topologyVersion"), tojson(primaryResult));
const maxWireVersion = primaryResult.maxWireVersion;
const initMinWireVersion = primaryResult.minWireVersion;
assert.eq(maxWireVersion, initMinWireVersion);
- const secondaryResult = assert.commandWorked(secondaryAdminDB.runCommand({isMaster: 1}));
+ const secondaryResult = assert.commandWorked(secondaryAdminDB.runCommand({hello: 1}));
assert(secondaryResult.hasOwnProperty("topologyVersion"), tojson(secondaryResult));
const primaryTopologyVersion = primaryResult.topologyVersion;
assert(primaryTopologyVersion.hasOwnProperty("processId"), tojson(primaryTopologyVersion));
@@ -98,23 +98,23 @@ function runTest(downgradeFCV) {
assert(secondaryTopologyVersion.hasOwnProperty("processId"), tojson(secondaryTopologyVersion));
assert(secondaryTopologyVersion.hasOwnProperty("counter"), tojson(secondaryTopologyVersion));
- // A failpoint signalling that the servers have received the isMaster request and are waiting
+ // A failpoint signalling that the servers have received the hello request and are waiting
// for a topology change.
let primaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
let secondaryFailPoint = configureFailPoint(secondary, "waitForIsMasterResponse");
- // Send an awaitable isMaster request. This will block until maxAwaitTimeMS has elapsed or a
+ // Send an awaitable hello request. This will block until maxAwaitTimeMS has elapsed or a
// topology change happens.
- let awaitIsMasterBeforeDowngradeFCVOnPrimary =
- startParallelShell(funWithArgs(runAwaitableIsMasterBeforeFCVChange,
+ let awaitHelloBeforeDowngradeFCVOnPrimary =
+ startParallelShell(funWithArgs(runAwaitableHelloBeforeFCVChange,
primaryTopologyVersion,
downgradeFCV,
true /* isPrimary */,
initMinWireVersion,
maxWireVersion),
primary.port);
- let awaitIsMasterBeforeDowngradeFCVOnSecondary =
- startParallelShell(funWithArgs(runAwaitableIsMasterBeforeFCVChange,
+ let awaitHelloBeforeDowngradeFCVOnSecondary =
+ startParallelShell(funWithArgs(runAwaitableHelloBeforeFCVChange,
secondaryTopologyVersion,
downgradeFCV,
false /* isPrimary */,
@@ -124,7 +124,7 @@ function runTest(downgradeFCV) {
primaryFailPoint.wait();
secondaryFailPoint.wait();
- // Each node has one isMaster request waiting on a topology change.
+ // Each node has one hello request waiting on a topology change.
let numAwaitingTopologyChangeOnPrimary =
primaryAdminDB.serverStatus().connections.awaitingTopologyChanges;
let numAwaitingTopologyChangeOnSecondary =
@@ -132,12 +132,12 @@ function runTest(downgradeFCV) {
assert.eq(1, numAwaitingTopologyChangeOnPrimary);
assert.eq(1, numAwaitingTopologyChangeOnSecondary);
- // Setting the FCV to the same version will not trigger an isMaster response.
+ // Setting the FCV to the same version will not trigger an hello response.
assert.commandWorked(primaryAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
checkFCV(primaryAdminDB, latestFCV);
checkFCV(secondaryAdminDB, latestFCV);
- // Each node still has one isMaster request waiting on a topology change.
+ // Each node still has one hello request waiting on a topology change.
numAwaitingTopologyChangeOnPrimary =
primaryAdminDB.serverStatus().connections.awaitingTopologyChanges;
numAwaitingTopologyChangeOnSecondary =
@@ -146,17 +146,17 @@ function runTest(downgradeFCV) {
assert.eq(1, numAwaitingTopologyChangeOnSecondary);
jsTestLog("Downgrade the featureCompatibilityVersion.");
- // Downgrading the FCV will cause the isMaster requests to respond on both primary and
+ // Downgrading the FCV will cause the hello requests to respond on both primary and
// secondary.
assert.commandWorked(primaryAdminDB.runCommand({setFeatureCompatibilityVersion: downgradeFCV}));
- awaitIsMasterBeforeDowngradeFCVOnPrimary();
- awaitIsMasterBeforeDowngradeFCVOnSecondary();
+ awaitHelloBeforeDowngradeFCVOnPrimary();
+ awaitHelloBeforeDowngradeFCVOnSecondary();
// Ensure the featureCompatibilityVersion document update has been replicated.
rst.awaitReplication();
checkFCV(primaryAdminDB, downgradeFCV);
checkFCV(secondaryAdminDB, downgradeFCV);
- // All isMaster requests should have been responded to after the FCV change.
+ // All hello requests should have been responded to after the FCV change.
numAwaitingTopologyChangeOnPrimary =
primaryAdminDB.serverStatus().connections.awaitingTopologyChanges;
numAwaitingTopologyChangeOnSecondary =
@@ -165,14 +165,14 @@ function runTest(downgradeFCV) {
assert.eq(0, numAwaitingTopologyChangeOnSecondary);
// Get the new topologyVersion.
- let primaryResponseAfterDowngrade = isMasterAsInternalClient();
+ const primaryResponseAfterDowngrade = helloAsInternalClient();
assert(primaryResponseAfterDowngrade.hasOwnProperty("topologyVersion"),
tojson(primaryResponseAfterDowngrade));
let primaryTopologyVersionAfterDowngrade = primaryResponseAfterDowngrade.topologyVersion;
let minWireVersionAfterDowngrade = primaryResponseAfterDowngrade.minWireVersion;
- let secondaryResponseAfterDowngrade =
- assert.commandWorked(secondaryAdminDB.runCommand({isMaster: 1}));
+ const secondaryResponseAfterDowngrade =
+ assert.commandWorked(secondaryAdminDB.runCommand({hello: 1}));
assert(secondaryResponseAfterDowngrade.hasOwnProperty("topologyVersion"),
tojson(secondaryResponseAfterDowngrade));
let secondaryTopologyVersionAfterDowngrade = secondaryResponseAfterDowngrade.topologyVersion;
@@ -184,16 +184,16 @@ function runTest(downgradeFCV) {
// Reconfigure the failpoint to refresh the number of times the failpoint has been entered.
primaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
secondaryFailPoint = configureFailPoint(secondary, "waitForIsMasterResponse");
- let awaitIsMasterBeforeUpgradeOnPrimary =
- startParallelShell(funWithArgs(runAwaitableIsMasterBeforeFCVChange,
+ let awaitHelloBeforeUpgradeOnPrimary =
+ startParallelShell(funWithArgs(runAwaitableHelloBeforeFCVChange,
primaryTopologyVersionAfterDowngrade,
lastContinuousFCV,
true /* isPrimary */,
minWireVersionAfterDowngrade,
maxWireVersion),
primary.port);
- let awaitIsMasterBeforeUpgradeOnSecondary =
- startParallelShell(funWithArgs(runAwaitableIsMasterBeforeFCVChange,
+ let awaitHelloBeforeUpgradeOnSecondary =
+ startParallelShell(funWithArgs(runAwaitableHelloBeforeFCVChange,
secondaryTopologyVersionAfterDowngrade,
lastContinuousFCV,
false /* isPrimary */,
@@ -203,7 +203,7 @@ function runTest(downgradeFCV) {
primaryFailPoint.wait();
secondaryFailPoint.wait();
- // Each node has one isMaster request waiting on a topology change.
+ // Each node has one hello request waiting on a topology change.
numAwaitingTopologyChangeOnPrimary =
primaryAdminDB.serverStatus().connections.awaitingTopologyChanges;
numAwaitingTopologyChangeOnSecondary =
@@ -214,15 +214,15 @@ function runTest(downgradeFCV) {
// Upgrade the FCV to last-continuous.
assert.commandWorked(primaryAdminDB.runCommand(
{setFeatureCompatibilityVersion: lastContinuousFCV, fromConfigServer: true}));
- awaitIsMasterBeforeUpgradeOnPrimary();
- awaitIsMasterBeforeUpgradeOnSecondary();
+ awaitHelloBeforeUpgradeOnPrimary();
+ awaitHelloBeforeUpgradeOnSecondary();
// Ensure the featureCompatibilityVersion document update has been replicated.
rst.awaitReplication();
checkFCV(primaryAdminDB, lastContinuousFCV);
checkFCV(secondaryAdminDB, lastContinuousFCV);
- // All isMaster requests should have been responded to after the FCV change.
+ // All hello requests should have been responded to after the FCV change.
numAwaitingTopologyChangeOnPrimary =
primaryAdminDB.serverStatus().connections.awaitingTopologyChanges;
numAwaitingTopologyChangeOnSecondary =
@@ -230,7 +230,7 @@ function runTest(downgradeFCV) {
assert.eq(0, numAwaitingTopologyChangeOnPrimary);
assert.eq(0, numAwaitingTopologyChangeOnSecondary);
- // Reset the FCV back to last-lts and the get the new isMaster parameters.
+ // Reset the FCV back to last-lts and the get the new hello parameters.
// We must upgrade to latestFCV first since downgrading from last-continuous to last-stable
// is forbidden.
assert.commandWorked(
@@ -241,14 +241,14 @@ function runTest(downgradeFCV) {
checkFCV(primaryAdminDB, lastLTSFCV);
checkFCV(secondaryAdminDB, lastLTSFCV);
- primaryResponseAfterDowngrade = isMasterAsInternalClient();
+ primaryResponseAfterDowngrade = helloAsInternalClient();
assert(primaryResponseAfterDowngrade.hasOwnProperty("topologyVersion"),
tojson(primaryResponseAfterDowngrade));
primaryTopologyVersionAfterDowngrade = primaryResponseAfterDowngrade.topologyVersion;
minWireVersionAfterDowngrade = primaryResponseAfterDowngrade.minWireVersion;
secondaryResponseAfterDowngrade =
- assert.commandWorked(secondaryAdminDB.runCommand({isMaster: 1}));
+ assert.commandWorked(secondaryAdminDB.runCommand({hello: 1}));
assert(secondaryResponseAfterDowngrade.hasOwnProperty("topologyVersion"),
tojson(secondaryResponseAfterDowngrade));
secondaryTopologyVersionAfterDowngrade = secondaryResponseAfterDowngrade.topologyVersion;
@@ -257,16 +257,16 @@ function runTest(downgradeFCV) {
// Reconfigure the failpoint to refresh the number of times the failpoint has been entered.
primaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
secondaryFailPoint = configureFailPoint(secondary, "waitForIsMasterResponse");
- let awaitIsMasterBeforeUpgradeFCVOnPrimary =
- startParallelShell(funWithArgs(runAwaitableIsMasterBeforeFCVChange,
+ let awaitHelloBeforeUpgradeFCVOnPrimary =
+ startParallelShell(funWithArgs(runAwaitableHelloBeforeFCVChange,
primaryTopologyVersionAfterDowngrade,
latestFCV,
true /* isPrimary */,
minWireVersionAfterDowngrade,
maxWireVersion),
primary.port);
- let awaitIsMasterBeforeUpgradeFCVOnSecondary =
- startParallelShell(funWithArgs(runAwaitableIsMasterBeforeFCVChange,
+ let awaitHelloBeforeUpgradeFCVOnSecondary =
+ startParallelShell(funWithArgs(runAwaitableHelloBeforeFCVChange,
secondaryTopologyVersionAfterDowngrade,
latestFCV,
false /* isPrimary */,
@@ -276,7 +276,7 @@ function runTest(downgradeFCV) {
primaryFailPoint.wait();
secondaryFailPoint.wait();
- // Each node has one isMaster request waiting on a topology change.
+ // Each node has one hello request waiting on a topology change.
numAwaitingTopologyChangeOnPrimary =
primaryAdminDB.serverStatus().connections.awaitingTopologyChanges;
numAwaitingTopologyChangeOnSecondary =
@@ -284,12 +284,12 @@ function runTest(downgradeFCV) {
assert.eq(1, numAwaitingTopologyChangeOnPrimary);
assert.eq(1, numAwaitingTopologyChangeOnSecondary);
- // Setting the FCV to the same version will not trigger an isMaster response.
+ // Setting the FCV to the same version will not trigger an hello response.
assert.commandWorked(primaryAdminDB.runCommand({setFeatureCompatibilityVersion: downgradeFCV}));
checkFCV(primaryAdminDB, downgradeFCV);
checkFCV(secondaryAdminDB, downgradeFCV);
- // Each node still has one isMaster request waiting on a topology change.
+ // Each node still has one hello request waiting on a topology change.
numAwaitingTopologyChangeOnPrimary =
primaryAdminDB.serverStatus().connections.awaitingTopologyChanges;
numAwaitingTopologyChangeOnSecondary =
@@ -298,16 +298,16 @@ function runTest(downgradeFCV) {
assert.eq(1, numAwaitingTopologyChangeOnSecondary);
jsTestLog("Upgrade the featureCompatibilityVersion.");
- // Upgrading the FCV will cause the isMaster requests to respond on both primary and secondary.
+ // Upgrading the FCV will cause the hello requests to respond on both primary and secondary.
assert.commandWorked(primaryAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- awaitIsMasterBeforeUpgradeFCVOnPrimary();
- awaitIsMasterBeforeUpgradeFCVOnSecondary();
+ awaitHelloBeforeUpgradeFCVOnPrimary();
+ awaitHelloBeforeUpgradeFCVOnSecondary();
// Ensure the featureCompatibilityVersion document update has been replicated.
rst.awaitReplication();
checkFCV(primaryAdminDB, latestFCV);
checkFCV(secondaryAdminDB, latestFCV);
- // All isMaster requests should have been responded to after the FCV change.
+ // All hello requests should have been responded to after the FCV change.
numAwaitingTopologyChangeOnPrimary =
primaryAdminDB.serverStatus().connections.awaitingTopologyChanges;
numAwaitingTopologyChangeOnSecondary =
diff --git a/jstests/replsets/awaitable_ismaster_metrics_on_state_change.js b/jstests/replsets/awaitable_ismaster_metrics_on_state_change.js
index 35ff235c673..4f6ad458ed2 100644
--- a/jstests/replsets/awaitable_ismaster_metrics_on_state_change.js
+++ b/jstests/replsets/awaitable_ismaster_metrics_on_state_change.js
@@ -1,65 +1,69 @@
/**
- * Tests that the server status metrics correctly reflect the number of waiting isMaster requests
- * before and after a state change.
+ * Tests that the server status metrics correctly reflect the number of waiting hello/isMaster
+ * requests before and after a state change.
*/
(function() {
"use strict";
load("jstests/libs/parallel_shell_helpers.js");
load("jstests/libs/fail_point_util.js");
-// Test isMaster paramaters on a single node replica set.
-const replTest = new ReplSetTest({name: "awaitable_ismaster_metrics", nodes: 1});
-replTest.startSet();
-replTest.initiate();
-
-const dbName = "awaitable_ismaster_metrics";
-const node = replTest.getPrimary();
-const db = node.getDB(dbName);
-
-const res = assert.commandWorked(db.runCommand({isMaster: 1}));
-assert(res.hasOwnProperty("topologyVersion"), res);
-
-const topologyVersionField = res.topologyVersion;
-assert(topologyVersionField.hasOwnProperty("processId"), topologyVersionField);
-assert(topologyVersionField.hasOwnProperty("counter"), topologyVersionField);
-
-function runAwaitableIsMaster(topologyVersionField) {
+function runAwaitableCmd(cmd, topologyVersionField) {
const res = assert.commandWorked(db.runCommand({
- isMaster: 1,
+ [cmd]: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}));
assert.eq(topologyVersionField.counter + 1, res.topologyVersion.counter);
}
-// A failpoint signalling that the server has received the IsMaster request and is waiting for a
-// topology change.
-let failPoint = configureFailPoint(node, "waitForIsMasterResponse");
-// Send an awaitable isMaster request. This will block until maxAwaitTimeMS has elapsed or a
-// topology change happens.
-let firstIsMasterBeforeStepDown =
- startParallelShell(funWithArgs(runAwaitableIsMaster, topologyVersionField), node.port);
-failPoint.wait();
-// awaitingTopologyChanges should increment once.
-let numAwaitingTopologyChange = db.serverStatus().connections.awaitingTopologyChanges;
-assert.eq(1, numAwaitingTopologyChange);
+function runTest(cmd) {
+ // Test hello/isMaster paramaters on a single node replica set.
+ const replTest = new ReplSetTest({name: "awaitable_cmd_metrics", nodes: 1});
+ replTest.startSet();
+ replTest.initiate();
-// Reconfigure failpoint to refresh the number of times entered.
-failPoint = configureFailPoint(node, "waitForIsMasterResponse");
-let secondIsMasterBeforeStepdown =
- startParallelShell(funWithArgs(runAwaitableIsMaster, topologyVersionField), node.port);
-failPoint.wait();
-numAwaitingTopologyChange = db.serverStatus().connections.awaitingTopologyChanges;
-assert.eq(2, numAwaitingTopologyChange);
+ const dbName = "awaitable_cmd_metrics";
+ const node = replTest.getPrimary();
+ const db = node.getDB(dbName);
-// Call stepdown to increment the server TopologyVersion and respond to the waiting isMaster
-// requests.
-assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true}));
-firstIsMasterBeforeStepDown();
-secondIsMasterBeforeStepdown();
-// All isMaster requests should have been responded to.
-numAwaitingTopologyChange = db.serverStatus().connections.awaitingTopologyChanges;
-assert.eq(0, numAwaitingTopologyChange);
+ const res = assert.commandWorked(db.runCommand({[cmd]: 1}));
+ assert(res.hasOwnProperty("topologyVersion"), res);
+ const topologyVersionField = res.topologyVersion;
+ assert(topologyVersionField.hasOwnProperty("processId"), topologyVersionField);
+ assert(topologyVersionField.hasOwnProperty("counter"), topologyVersionField);
+
+ // A failpoint signalling that the server has received the hello/isMaster request and is waiting
+ // for a topology change.
+ let failPoint = configureFailPoint(node, "waitForIsMasterResponse");
+ // Send an awaitable hello/isMaster request. This will block until maxAwaitTimeMS has elapsed or
+ // a topology change happens.
+ let firstCmdBeforeStepDown =
+ startParallelShell(funWithArgs(runAwaitableCmd, cmd, topologyVersionField), node.port);
+ failPoint.wait();
+ // awaitingTopologyChanges should increment once.
+ let numAwaitingTopologyChange = db.serverStatus().connections.awaitingTopologyChanges;
+ assert.eq(1, numAwaitingTopologyChange);
+
+ // Reconfigure failpoint to refresh the number of times entered.
+ failPoint = configureFailPoint(node, "waitForIsMasterResponse");
+ let secondCmdBeforeStepdown =
+ startParallelShell(funWithArgs(runAwaitableCmd, cmd, topologyVersionField), node.port);
+ failPoint.wait();
+ numAwaitingTopologyChange = db.serverStatus().connections.awaitingTopologyChanges;
+ assert.eq(2, numAwaitingTopologyChange);
+
+ // Call stepdown to increment the server TopologyVersion and respond to the waiting
+ // hello/isMaster requests.
+ assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true}));
+ firstCmdBeforeStepDown();
+ secondCmdBeforeStepdown();
+ // All hello/isMaster requests should have been responded to.
+ numAwaitingTopologyChange = db.serverStatus().connections.awaitingTopologyChanges;
+ assert.eq(0, numAwaitingTopologyChange);
+ replTest.stopSet();
+}
-replTest.stopSet();
+runTest("hello");
+runTest("isMaster");
+runTest("ismaster");
})();
diff --git a/jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js b/jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js
index ae75d4a948e..80d63346e58 100644
--- a/jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js
+++ b/jstests/replsets/awaitable_ismaster_on_nodes_with_invalid_configs.js
@@ -1,5 +1,5 @@
/**
- * Tests the streamable isMaster protocol against nodes with invalid replica set configs.
+ * Tests the streamable hello protocol against nodes with invalid replica set configs.
* SERVER-49428: Disable for ephemeralForTest, writeConcernMajorityJournalDefault is not off
* @tags: [incompatible_with_eft]
*/
@@ -15,30 +15,30 @@ const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}
// Start the replica set but don't initiate yet.
replTest.startSet();
-const dbName = "awaitable_ismaster_horizon_change";
+const dbName = "awaitable_hello_horizon_change";
const node0 = replTest.nodes[0];
const node1 = replTest.nodes[1];
const dbNode0 = node0.getDB(dbName);
const dbNode1 = node1.getDB(dbName);
-let responseNode0 = assert.commandWorked(dbNode0.runCommand({isMaster: 1}));
-let responseNode1 = assert.commandWorked(dbNode1.runCommand({isMaster: 1}));
+let responseNode0 = assert.commandWorked(dbNode0.runCommand({hello: 1}));
+let responseNode1 = assert.commandWorked(dbNode1.runCommand({hello: 1}));
let topologyVersionNode0 = responseNode0.topologyVersion;
let topologyVersionNode1 = responseNode1.topologyVersion;
-function runAwaitableIsMaster(topologyVersionField) {
+function runAwaitableHello(topologyVersionField) {
const result = assert.commandWorked(db.runCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}));
assert.eq(topologyVersionField.counter + 1, result.topologyVersion.counter, result);
}
-// Waiting isMasters should error when a node rejoins a replica set.
-function runAwaitableIsMasterOnRejoiningSet(topologyVersionField) {
+// Waiting hellos should error when a node rejoins a replica set.
+function runAwaitableHelloOnRejoiningSet(topologyVersionField) {
const result = assert.throws(() => db.runCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}));
@@ -48,19 +48,19 @@ function runAwaitableIsMasterOnRejoiningSet(topologyVersionField) {
assert.commandWorked(db.adminCommand({ping: 1}));
}
-// A failpoint signalling that the servers have received the isMaster request and are waiting for a
+// A failpoint signalling that the servers have received the hello request and are waiting for a
// topology change.
let node0FailPoint = configureFailPoint(node0, "waitForIsMasterResponse");
let node1FailPoint = configureFailPoint(node1, "waitForIsMasterResponse");
-// Send an awaitable isMaster request. This will block until there is a topology change.
+// Send an awaitable hello request. This will block until there is a topology change.
const firstAwaitInitiateOnNode0 =
- startParallelShell(funWithArgs(runAwaitableIsMaster, topologyVersionNode0), node0.port);
+ startParallelShell(funWithArgs(runAwaitableHello, topologyVersionNode0), node0.port);
const firstAwaitInitiateOnNode1 =
- startParallelShell(funWithArgs(runAwaitableIsMaster, topologyVersionNode1), node1.port);
+ startParallelShell(funWithArgs(runAwaitableHello, topologyVersionNode1), node1.port);
node0FailPoint.wait();
node1FailPoint.wait();
-// Each node has one isMaster request waiting on a topology change.
+// Each node has one hello request waiting on a topology change.
let numAwaitingTopologyChangeOnNode0 = dbNode0.serverStatus().connections.awaitingTopologyChanges;
let numAwaitingTopologyChangeOnNode1 = dbNode1.serverStatus().connections.awaitingTopologyChanges;
assert.eq(1, numAwaitingTopologyChangeOnNode0);
@@ -70,19 +70,19 @@ assert.eq(1, numAwaitingTopologyChangeOnNode1);
node0FailPoint = configureFailPoint(node0, "waitForIsMasterResponse");
node1FailPoint = configureFailPoint(node1, "waitForIsMasterResponse");
const secondAwaitInitiateOnNode0 =
- startParallelShell(funWithArgs(runAwaitableIsMaster, topologyVersionNode0), node0.port);
+ startParallelShell(funWithArgs(runAwaitableHello, topologyVersionNode0), node0.port);
const secondAwaitInitiateOnNode1 =
- startParallelShell(funWithArgs(runAwaitableIsMaster, topologyVersionNode1), node1.port);
+ startParallelShell(funWithArgs(runAwaitableHello, topologyVersionNode1), node1.port);
node0FailPoint.wait();
node1FailPoint.wait();
-// Each node has two isMaster requests waiting on a topology change.
+// Each node has two hellos requests waiting on a topology change.
numAwaitingTopologyChangeOnNode0 = dbNode0.serverStatus().connections.awaitingTopologyChanges;
numAwaitingTopologyChangeOnNode1 = dbNode1.serverStatus().connections.awaitingTopologyChanges;
assert.eq(2, numAwaitingTopologyChangeOnNode0);
assert.eq(2, numAwaitingTopologyChangeOnNode1);
-// Doing a replSetInitiate should respond to all waiting isMasters.
+// Doing a replSetInitiate should respond to all waiting hellos.
replTest.initiate();
firstAwaitInitiateOnNode0();
firstAwaitInitiateOnNode1();
@@ -98,37 +98,37 @@ let primary = replTest.getPrimary();
let secondary = replTest.getSecondary();
let primaryDB = primary.getDB('admin');
let secondaryDB = secondary.getDB('admin');
-const primaryRespAfterInitiate = assert.commandWorked(primaryDB.runCommand({isMaster: 1}));
+const primaryRespAfterInitiate = assert.commandWorked(primaryDB.runCommand({hello: 1}));
let primaryTopologyVersion = primaryRespAfterInitiate.topologyVersion;
// Reconfigure the failpoint to refresh the number of times the failpoint has been entered.
let primaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
-const awaitPrimaryIsMasterBeforeNodeRemoval =
- startParallelShell(funWithArgs(runAwaitableIsMaster, primaryTopologyVersion), primary.port);
+const awaitPrimaryHelloBeforeNodeRemoval =
+ startParallelShell(funWithArgs(runAwaitableHello, primaryTopologyVersion), primary.port);
primaryFailPoint.wait();
-// The primary has one isMaster request waiting on a topology change.
+// The primary has one hello request waiting on a topology change.
let numAwaitingTopologyChangeOnPrimary =
primaryDB.serverStatus().connections.awaitingTopologyChanges;
assert.eq(1, numAwaitingTopologyChangeOnPrimary);
-// Doing a reconfig that removes the secondary should respond to all waiting isMasters.
+// Doing a reconfig that removes the secondary should respond to all waiting hellos.
let config = replTest.getReplSetConfig();
config.members.splice(1, 1);
config.version = replTest.getReplSetConfigFromNode().version + 1;
assert.commandWorked(primaryDB.runCommand({replSetReconfig: config}));
-awaitPrimaryIsMasterBeforeNodeRemoval();
+awaitPrimaryHelloBeforeNodeRemoval();
// Wait for secondary to realize it is removed.
assert.soonNoExcept(
() => assert.commandFailedWithCode(secondaryDB.adminCommand({replSetGetStatus: 1}),
ErrorCodes.InvalidReplicaSetConfig));
-const primaryRespAfterRemoval = assert.commandWorked(primaryDB.runCommand({isMaster: 1}));
-const secondaryRespAfterRemoval = assert.commandWorked(secondaryDB.runCommand({isMaster: 1}));
+const primaryRespAfterRemoval = assert.commandWorked(primaryDB.runCommand({hello: 1}));
+const secondaryRespAfterRemoval = assert.commandWorked(secondaryDB.runCommand({hello: 1}));
primaryTopologyVersion = primaryRespAfterRemoval.topologyVersion;
let secondaryTopologyVersion = secondaryRespAfterRemoval.topologyVersion;
-assert.eq(false, secondaryRespAfterRemoval.ismaster, secondaryRespAfterRemoval);
+assert.eq(false, secondaryRespAfterRemoval.isWritablePrimary, secondaryRespAfterRemoval);
assert.eq(false, secondaryRespAfterRemoval.secondary, secondaryRespAfterRemoval);
assert.eq("Does not have a valid replica set config",
secondaryRespAfterRemoval.info,
@@ -137,10 +137,10 @@ assert.eq("Does not have a valid replica set config",
// Reconfigure the failpoint to refresh the number of times the failpoint has been entered.
primaryFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
let secondaryFailPoint = configureFailPoint(secondary, "waitForIsMasterResponse");
-const awaitPrimaryIsMasterBeforeReadding =
- startParallelShell(funWithArgs(runAwaitableIsMaster, primaryTopologyVersion), primary.port);
-const firstAwaitSecondaryIsMasterBeforeRejoining = startParallelShell(
- funWithArgs(runAwaitableIsMasterOnRejoiningSet, secondaryTopologyVersion), secondary.port);
+const awaitPrimaryHelloBeforeReading =
+ startParallelShell(funWithArgs(runAwaitableHello, primaryTopologyVersion), primary.port);
+const firstAwaitSecondaryHelloBeforeRejoining = startParallelShell(
+ funWithArgs(runAwaitableHelloOnRejoiningSet, secondaryTopologyVersion), secondary.port);
primaryFailPoint.wait();
secondaryFailPoint.wait();
@@ -150,23 +150,23 @@ let numAwaitingTopologyChangeOnSecondary =
assert.eq(1, numAwaitingTopologyChangeOnPrimary);
assert.eq(1, numAwaitingTopologyChangeOnSecondary);
-// Send a second isMaster to the removed secondary.
+// Send a second hello to the removed secondary.
secondaryFailPoint = configureFailPoint(secondary, "waitForIsMasterResponse");
-const secondAwaitSecondaryIsMasterBeforeRejoining = startParallelShell(
- funWithArgs(runAwaitableIsMasterOnRejoiningSet, secondaryTopologyVersion), secondary.port);
+const secondAwaitSecondaryHelloBeforeRejoining = startParallelShell(
+ funWithArgs(runAwaitableHelloOnRejoiningSet, secondaryTopologyVersion), secondary.port);
secondaryFailPoint.wait();
numAwaitingTopologyChangeOnSecondary =
secondaryDB.serverStatus().connections.awaitingTopologyChanges;
assert.eq(2, numAwaitingTopologyChangeOnSecondary);
-// Have the secondary rejoin the set. This should respond to waiting isMasters on both nodes.
+// Have the secondary rejoin the set. This should respond to waiting hellos on both nodes.
config = replTest.getReplSetConfig();
config.version = replTest.getReplSetConfigFromNode().version + 1;
assert.commandWorked(primaryDB.runCommand({replSetReconfig: config}));
-awaitPrimaryIsMasterBeforeReadding();
-firstAwaitSecondaryIsMasterBeforeRejoining();
-secondAwaitSecondaryIsMasterBeforeRejoining();
+awaitPrimaryHelloBeforeReading();
+firstAwaitSecondaryHelloBeforeRejoining();
+secondAwaitSecondaryHelloBeforeRejoining();
numAwaitingTopologyChangeOnPrimary = primaryDB.serverStatus().connections.awaitingTopologyChanges;
numAwaitingTopologyChangeOnSecondary =
diff --git a/jstests/replsets/awaitable_ismaster_stepdown_stepup.js b/jstests/replsets/awaitable_ismaster_stepdown_stepup.js
index 06384950b71..d07c9a64994 100644
--- a/jstests/replsets/awaitable_ismaster_stepdown_stepup.js
+++ b/jstests/replsets/awaitable_ismaster_stepdown_stepup.js
@@ -1,126 +1,126 @@
/**
- * Tests the fields returned by isMaster responses as a node goes through a step down and step up.
+ * Tests the fields returned by hello responses as a node goes through a step down and step up.
*/
(function() {
"use strict";
load("jstests/libs/parallel_shell_helpers.js");
load("jstests/libs/fail_point_util.js");
-// Test isMaster paramaters on a single node replica set.
-const replSetName = "awaitable_ismaster_stepup";
+// Test hello paramaters on a single node replica set.
+const replSetName = "awaitable_hello_stepup";
const replTest = new ReplSetTest({name: replSetName, nodes: 1});
replTest.startSet();
replTest.initiate();
-const dbName = "awaitable_ismaster_test";
+const dbName = "awaitable_hello_test";
const node = replTest.getPrimary();
const db = node.getDB(dbName);
-// Check isMaster response contains a topologyVersion even if maxAwaitTimeMS and topologyVersion are
+// Check hello response contains a topologyVersion even if maxAwaitTimeMS and topologyVersion are
// not included in the request.
-const res = assert.commandWorked(db.runCommand({isMaster: 1}));
+const res = assert.commandWorked(db.runCommand({hello: 1}));
assert(res.hasOwnProperty("topologyVersion"), tojson(res));
const topologyVersionField = res.topologyVersion;
assert(topologyVersionField.hasOwnProperty("processId"), tojson(topologyVersionField));
assert(topologyVersionField.hasOwnProperty("counter"), tojson(topologyVersionField));
-function runAwaitableIsMasterBeforeStepDown(topologyVersionField) {
+function runAwaitableHelloBeforeStepDown(topologyVersionField) {
const resAfterDisablingWrites = assert.commandWorked(db.runCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}));
assert.eq(topologyVersionField.counter + 1, resAfterDisablingWrites.topologyVersion.counter);
- // Validate that an isMaster response returns once writes have been disabled on the primary
+ // Validate that an hello response returns once writes have been disabled on the primary
// even though the node has yet to transition to secondary.
- assert.eq(false, resAfterDisablingWrites.ismaster, resAfterDisablingWrites);
+ assert.eq(false, resAfterDisablingWrites.isWritablePrimary, resAfterDisablingWrites);
assert.eq(false, resAfterDisablingWrites.secondary, resAfterDisablingWrites);
assert.hasFields(resAfterDisablingWrites, ["primary"]);
// The TopologyVersion from resAfterDisablingWrites should now be stale since the old primary
- // has completed its transition to secondary. This isMaster request should respond immediately.
+ // has completed its transition to secondary. This hello request should respond immediately.
const resAfterStepdownComplete = assert.commandWorked(db.runCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: resAfterDisablingWrites.topologyVersion,
maxAwaitTimeMS: 99999999,
}));
assert.eq(resAfterDisablingWrites.topologyVersion.counter + 1,
resAfterStepdownComplete.topologyVersion.counter,
resAfterStepdownComplete);
- assert.eq(false, resAfterStepdownComplete.ismaster, resAfterStepdownComplete);
+ assert.eq(false, resAfterStepdownComplete.isWritablePrimary, resAfterStepdownComplete);
assert.eq(true, resAfterStepdownComplete.secondary, resAfterStepdownComplete);
assert(!resAfterStepdownComplete.hasOwnProperty("primary"), resAfterStepdownComplete);
}
-function runAwaitableIsMasterBeforeStepUp(topologyVersionField) {
+function runAwaitableHelloBeforeStepUp(topologyVersionField) {
const resAfterEnteringDrainMode = assert.commandWorked(db.runCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}));
assert.eq(topologyVersionField.counter + 1, resAfterEnteringDrainMode.topologyVersion.counter);
- // Validate that the isMaster response returns once the primary enters drain mode. At this
- // point, we expect the 'primary' field to exist but 'ismaster' will still be false.
- assert.eq(false, resAfterEnteringDrainMode.ismaster, resAfterEnteringDrainMode);
+ // Validate that the hello response returns once the primary enters drain mode. At this
+ // point, we expect the 'primary' field to exist but 'isWritablePrimary' will still be false.
+ assert.eq(false, resAfterEnteringDrainMode.isWritablePrimary, resAfterEnteringDrainMode);
assert.eq(true, resAfterEnteringDrainMode.secondary, resAfterEnteringDrainMode);
assert.hasFields(resAfterEnteringDrainMode, ["primary"]);
// The TopologyVersion from resAfterEnteringDrainMode should now be stale since we expect
// the primary to increase the config term and increment the counter once again.
const resAfterReconfigOnStepUp = assert.commandWorked(db.runCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: resAfterEnteringDrainMode.topologyVersion,
maxAwaitTimeMS: 99999999,
}));
assert.eq(resAfterEnteringDrainMode.topologyVersion.counter + 1,
resAfterReconfigOnStepUp.topologyVersion.counter,
resAfterReconfigOnStepUp);
- assert.eq(false, resAfterReconfigOnStepUp.ismaster, resAfterReconfigOnStepUp);
+ assert.eq(false, resAfterReconfigOnStepUp.isWritablePrimary, resAfterReconfigOnStepUp);
assert.eq(true, resAfterReconfigOnStepUp.secondary, resAfterReconfigOnStepUp);
assert.hasFields(resAfterReconfigOnStepUp, ["primary"]);
}
-function runAwaitableIsMasterAfterStepUp(topologyVersionField) {
+function runAwaitableHelloAfterStepUp(topologyVersionField) {
// The TopologyVersion from resAfterReconfigOnStepUp should now be stale since we expect
// the primary to exit drain mode and increment the counter once again.
const resAfterExitingDrainMode = assert.commandWorked(db.runCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}));
assert.eq(topologyVersionField.counter + 1, resAfterExitingDrainMode.topologyVersion.counter);
- assert.eq(true, resAfterExitingDrainMode.ismaster, resAfterExitingDrainMode);
+ assert.eq(true, resAfterExitingDrainMode.isWritablePrimary, resAfterExitingDrainMode);
assert.eq(false, resAfterExitingDrainMode.secondary, resAfterExitingDrainMode);
assert.hasFields(resAfterExitingDrainMode, ["primary"]);
}
-// A failpoint signalling that the server has received the IsMaster request and is waiting for a
+// A failpoint signalling that the server has received the hello request and is waiting for a
// topology change.
let failPoint = configureFailPoint(node, "waitForIsMasterResponse");
-// Send an awaitable isMaster request. This will block until maxAwaitTimeMS has elapsed or a
+// Send an awaitable hello request. This will block until maxAwaitTimeMS has elapsed or a
// topology change happens.
-let awaitIsMasterBeforeStepDown = startParallelShell(
- funWithArgs(runAwaitableIsMasterBeforeStepDown, topologyVersionField), node.port);
+let awaitHelloBeforeStepDown = startParallelShell(
+ funWithArgs(runAwaitableHelloBeforeStepDown, topologyVersionField), node.port);
failPoint.wait();
-// Call stepdown to increment the server TopologyVersion and respond to the waiting isMaster
+// Call stepdown to increment the server TopologyVersion and respond to the waiting hello
// request. We expect stepDown to increment the TopologyVersion twice - once for when the writes are
// disabled and once again for when the primary completes its transition to secondary.
assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true}));
-awaitIsMasterBeforeStepDown();
+awaitHelloBeforeStepDown();
-let response = assert.commandWorked(node.getDB(dbName).runCommand({isMaster: 1}));
+let response = assert.commandWorked(node.getDB(dbName).runCommand({hello: 1}));
assert(response.hasOwnProperty("topologyVersion"), tojson(res));
const topologyVersionAfterStepDown = response.topologyVersion;
// Reconfigure the failpoint to refresh the number of times the failpoint has been entered.
failPoint = configureFailPoint(node, "waitForIsMasterResponse");
const hangFailPoint = configureFailPoint(node, "hangAfterReconfigOnDrainComplete");
-// Send an awaitable isMaster request. This will block until maxAwaitTimeMS has elapsed or a
+// Send an awaitable hello request. This will block until maxAwaitTimeMS has elapsed or a
// topology change happens.
-let awaitIsMasterBeforeStepUp = startParallelShell(
- funWithArgs(runAwaitableIsMasterBeforeStepUp, topologyVersionAfterStepDown), node.port);
+let awaitHelloBeforeStepUp = startParallelShell(
+ funWithArgs(runAwaitableHelloBeforeStepUp, topologyVersionAfterStepDown), node.port);
failPoint.wait();
// Unfreezing the old primary will cause the node to step up in a single node replica set.
@@ -128,22 +128,22 @@ assert.commandWorked(node.adminCommand({replSetFreeze: 0}));
// Wait until stepup thread hangs after the reconfig.
hangFailPoint.wait();
-awaitIsMasterBeforeStepUp();
+awaitHelloBeforeStepUp();
-response = assert.commandWorked(node.getDB(dbName).runCommand({isMaster: 1}));
+response = assert.commandWorked(node.getDB(dbName).runCommand({hello: 1}));
assert(response.hasOwnProperty("topologyVersion"), tojson(res));
const topologyVersionAfterStepUp = response.topologyVersion;
// Reconfigure the failpoint to refresh the number of times the failpoint has been entered.
failPoint = configureFailPoint(node, "waitForIsMasterResponse");
-// Send an awaitable isMaster request. This will block until maxAwaitTimeMS has elapsed or a
+// Send an awaitable hello request. This will block until maxAwaitTimeMS has elapsed or a
// topology change happens.
-let awaitIsMasterAfterStepUp = startParallelShell(
- funWithArgs(runAwaitableIsMasterAfterStepUp, topologyVersionAfterStepUp), node.port);
+let awaitHelloAfterStepUp = startParallelShell(
+ funWithArgs(runAwaitableHelloAfterStepUp, topologyVersionAfterStepUp), node.port);
failPoint.wait();
// Let the stepup thread to continue.
hangFailPoint.off();
-awaitIsMasterAfterStepUp();
+awaitHelloAfterStepUp();
replTest.stopSet();
})();
diff --git a/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js b/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
index 4b296ea4db7..25d6bb767ee 100644
--- a/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
+++ b/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
@@ -67,7 +67,7 @@ assert.commandWorked(nodeA.adminCommand({replSetStepUp: 1}));
restartServerReplication([nodeA, nodeC, nodeD]);
assert.soon(() => {
// We cannot use getPrimary() here because 2 nodes report they are primary.
- return assert.commandWorked(nodeA.adminCommand({ismaster: 1})).ismaster;
+ return assert.commandWorked(nodeA.adminCommand({hello: 1})).isWritablePrimary;
});
assert.commandWorked(
nodeA.getDB(dbName)[collName].insert({term: 3}, {writeConcern: {w: "majority"}}));
diff --git a/jstests/replsets/drain.js b/jstests/replsets/drain.js
index bd944eb492a..e65a0b1baae 100644
--- a/jstests/replsets/drain.js
+++ b/jstests/replsets/drain.js
@@ -63,7 +63,7 @@ replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
// Ensure new primary is not yet writable
jsTestLog('New primary should not be writable yet');
assert.writeError(secondary.getDB("foo").flag.insert({sentinel: 2}));
-assert(!secondary.getDB("admin").runCommand({"isMaster": 1}).ismaster);
+assert(!secondary.getDB("admin").runCommand({"hello": 1}).isWritablePrimary);
// Ensure new primary is not yet readable without secondaryOk bit.
secondary.setSecondaryOk(false);
@@ -77,7 +77,7 @@ assert.eq(ErrorCodes.NotPrimaryNoSecondaryOk,
secondary.setSecondaryOk();
assert.commandWorked(secondary.getDB("foo").runCommand({find: "foo"}));
-assert(!secondary.adminCommand({"isMaster": 1}).ismaster);
+assert(!secondary.adminCommand({"hello": 1}).isWritablePrimary);
// Allow draining to complete
jsTestLog('Disabling fail point on new primary to allow draining to complete');
diff --git a/jstests/replsets/linearizable_read_concern.js b/jstests/replsets/linearizable_read_concern.js
index 657320f0275..80821a2507e 100644
--- a/jstests/replsets/linearizable_read_concern.js
+++ b/jstests/replsets/linearizable_read_concern.js
@@ -21,10 +21,10 @@ var send_linearizable_read = function() {
// The primary will step down and throw an exception, which is expected.
var coll = db.getSiblingDB("test").foo;
jsTestLog('Sending in linearizable read in secondary thread');
- // 'isMaster' ensures that the following command fails (and returns a response rather than
+ // 'hello' ensures that the following command fails (and returns a response rather than
// an exception) before its connection is cut because of the primary step down. Refer to
// SERVER-24574.
- assert.commandWorked(coll.runCommand({isMaster: 1, hangUpOnStepDown: false}));
+ assert.commandWorked(coll.runCommand({hello: 1, hangUpOnStepDown: false}));
assert.commandFailedWithCode(
coll.runCommand({'find': 'foo', readConcern: {level: "linearizable"}, maxTimeMS: 60000}),
ErrorCodes.InterruptedDueToReplStateChange);
diff --git a/jstests/replsets/localhost1.js b/jstests/replsets/localhost1.js
index a036c265abd..a8ca886f87a 100644
--- a/jstests/replsets/localhost1.js
+++ b/jstests/replsets/localhost1.js
@@ -12,6 +12,6 @@ const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}))
assert(resp.me.startsWith('127.0.0.1'), tojson(resp.me) + " does not start with 127.0.0.1:");
// Wait for the primary to complete its election before shutting down the set.
-assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
+assert.soon(() => db.runCommand({hello: 1}).isWritablePrimary);
rt.stopSet();
})();
diff --git a/jstests/replsets/localhost2.js b/jstests/replsets/localhost2.js
index f89dd7c644f..0e801127ee8 100644
--- a/jstests/replsets/localhost2.js
+++ b/jstests/replsets/localhost2.js
@@ -14,6 +14,6 @@ assert(!resp.me.startsWith('0.0.0.0:'), tojson(resp.me) + " should not start wit
assert(!resp.me.startsWith('localhost:'), tojson(resp.me) + " should not start with localhost:");
// Wait for the primary to complete its election before shutting down the set.
-assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
+assert.soon(() => db.runCommand({hello: 1}).isWritablePrimary);
rt.stopSet();
})();
diff --git a/jstests/replsets/localhost3.js b/jstests/replsets/localhost3.js
index 0cd8dfb39fe..4fb963473f8 100644
--- a/jstests/replsets/localhost3.js
+++ b/jstests/replsets/localhost3.js
@@ -12,6 +12,6 @@ const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}))
assert(resp.me.startsWith('localhost:'), tojson(resp.me) + " should start with localhost:");
// Wait for the primary to complete its election before shutting down the set.
-assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
+assert.soon(() => db.runCommand({hello: 1}).isWritablePrimary);
rt.stopSet();
})();
diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js
index 5ad4bd55620..7becd2e0422 100644
--- a/jstests/replsets/maintenance.js
+++ b/jstests/replsets/maintenance.js
@@ -20,7 +20,7 @@ for (i = 0; i < 20; i++) {
replTest.awaitReplication();
assert.soon(function() {
- return conns[1].getDB("admin").isMaster().secondary;
+ return conns[1].getDB("admin").hello().secondary;
});
join =
@@ -33,10 +33,10 @@ print("check secondary becomes a secondary again");
var secondarySoon = function() {
var x = 0;
assert.soon(function() {
- var im = conns[1].getDB("admin").isMaster();
+ var helloRes = conns[1].getDB("admin").hello();
if (x++ % 5 == 0)
- printjson(im);
- return im.secondary;
+ printjson(helloRes);
+ return helloRes.secondary;
});
};
@@ -69,10 +69,10 @@ assert.eq(result.ok, 1, tojson(result));
print("make sure secondary goes into recovering");
var x = 0;
assert.soon(function() {
- var im = conns[1].getDB("admin").isMaster();
+ var helloRes = conns[1].getDB("admin").hello();
if (x++ % 5 == 0)
- printjson(im);
- return !im.secondary && !im.ismaster;
+ printjson(helloRes);
+ return !helloRes.secondary && !helloRes.isWritablePrimary;
});
var recv = conns[1].getDB("admin").runCommand({find: "foo"});
diff --git a/jstests/replsets/maintenance_non-blocking.js b/jstests/replsets/maintenance_non-blocking.js
index 497bae360c2..15fe20ce4b0 100644
--- a/jstests/replsets/maintenance_non-blocking.js
+++ b/jstests/replsets/maintenance_non-blocking.js
@@ -26,18 +26,18 @@ doTest = function() {
print("******* replSetMaintenance called on secondary ************* ");
assert.commandWorked(sDB.adminCommand("replSetMaintenance"));
- var ismaster = assert.commandWorked(sColl.runCommand("ismaster"));
- assert.eq(false, ismaster.ismaster);
- assert.eq(false, ismaster.secondary);
+ var hello = assert.commandWorked(sColl.runCommand("hello"));
+ assert.eq(false, hello.isWritablePrimary);
+ assert.eq(false, hello.secondary);
print("******* writing to primary ************* ");
assert.commandWorked(mColl.save({_id: -1}));
printjson(sDB.currentOp());
assert.neq(null, mColl.findOne());
- var ismaster = assert.commandWorked(sColl.runCommand("ismaster"));
- assert.eq(false, ismaster.ismaster);
- assert.eq(false, ismaster.secondary);
+ var hello = assert.commandWorked(sColl.runCommand("hello"));
+ assert.eq(false, hello.isWritablePrimary);
+ assert.eq(false, hello.secondary);
print("******* fsyncUnlock'n secondary ************* ");
sDB.fsyncUnlock();
diff --git a/jstests/replsets/minimum_visible_with_cluster_time.js b/jstests/replsets/minimum_visible_with_cluster_time.js
index 7bbbff02522..fab209d3819 100644
--- a/jstests/replsets/minimum_visible_with_cluster_time.js
+++ b/jstests/replsets/minimum_visible_with_cluster_time.js
@@ -27,7 +27,7 @@ function bumpClusterTime() {
while (true) {
const higherClusterTime = new Timestamp(clusterTime.getTime() + 20, 1);
const res = assert.commandWorked(db.adminCommand({
- 'isMaster': 1,
+ 'hello': 1,
'$clusterTime': {
'clusterTime': higherClusterTime,
'signature':
diff --git a/jstests/replsets/not_master_unacknowledged_write.js b/jstests/replsets/not_master_unacknowledged_write.js
index 1fc65ddb7ba..d91711ee1d6 100644
--- a/jstests/replsets/not_master_unacknowledged_write.js
+++ b/jstests/replsets/not_master_unacknowledged_write.js
@@ -30,7 +30,7 @@ jsTestLog("Reading from secondary ...");
{name: "count", fn: () => secondaryColl.find().count()},
].map(({name, fn}) => {
assert.doesNotThrow(fn);
- assert.eq(assert.commandWorked(secondary.getDB("admin").isMaster()).ismaster, false);
+ assert.eq(assert.commandWorked(secondary.getDB("admin").hello()).isWritablePrimary, false);
});
const postReadingCounter = getNotPrimaryUnackWritesCounter();
assert.eq(preReadingCounter, postReadingCounter);
@@ -48,12 +48,12 @@ jsTestLog("Primary on port " + primary.port + " hangs up on unacknowledged write
var result = assert.throws(function() {
// Provoke the server to hang up.
fn({writeConcern: {w: 0}});
- // The connection is now broken and isMaster throws a network error.
- secondary.getDB("admin").isMaster();
+ // The connection is now broken and hello() throws a network error.
+ secondary.getDB("admin").hello();
}, [], "network error from " + name);
assert.includes(result.toString(),
- "network error while attempting to run command 'isMaster'",
+ "network error while attempting to run command 'hello'",
"after " + name);
});
@@ -81,9 +81,9 @@ awaitShell({checkExitSuccess: false});
jsTestLog("Unacknowledged insert during stepdown provoked disconnect");
var result = assert.throws(function() {
- primary.getDB("admin").isMaster();
+ primary.getDB("admin").hello();
}, [], "network");
-assert.includes(result.toString(), "network error while attempting to run command 'isMaster'");
+assert.includes(result.toString(), "network error while attempting to run command 'hello'");
// Validate the number of unacknowledged writes failed due to step down resulted in network
// disconnection.
diff --git a/jstests/replsets/quiesce_mode.js b/jstests/replsets/quiesce_mode.js
index 29a50dce1d7..6b234146bf7 100644
--- a/jstests/replsets/quiesce_mode.js
+++ b/jstests/replsets/quiesce_mode.js
@@ -1,6 +1,6 @@
/**
* Tests the behavior of quiesce mode: the period during secondary shutdown where existing
- * operations are allowed to continue and new operations are accepted, but isMaster requests return
+ * operations are allowed to continue and new operations are accepted, but hello requests return
* a ShutdownInProgress error, so that clients begin routing operations elsewhere.
* @tags: [
* requires_fcv_47,
@@ -38,9 +38,9 @@ function checkRemainingQuiesceTime(res) {
assert(res.hasOwnProperty("remainingQuiesceTimeMillis"), res);
}
-function runAwaitableIsMaster(topologyVersionField) {
+function runAwaitableHello(topologyVersionField) {
let res = assert.commandFailedWithCode(db.runCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}),
@@ -67,13 +67,13 @@ let findCmdFailPoint = configureFailPoint(secondary, "waitInFindBeforeMakingBatc
let findCmd = startParallelShell(runFind, secondary.port);
findCmdFailPoint.wait();
-jsTestLog("Create a hanging isMaster on the secondary.");
-res = assert.commandWorked(secondary.adminCommand({isMaster: 1}));
+jsTestLog("Create a hanging hello on the secondary.");
+res = assert.commandWorked(secondary.adminCommand({hello: 1}));
assert(res.hasOwnProperty("topologyVersion"), res);
let topologyVersionField = res.topologyVersion;
let isMasterFailPoint = configureFailPoint(secondary, "waitForIsMasterResponse");
-let isMaster =
- startParallelShell(funWithArgs(runAwaitableIsMaster, topologyVersionField), secondary.port);
+let hello =
+ startParallelShell(funWithArgs(runAwaitableHello, topologyVersionField), secondary.port);
isMasterFailPoint.wait();
assert.eq(1, secondary.getDB("admin").serverStatus().connections.awaitingTopologyChanges);
@@ -84,20 +84,20 @@ replTest.stop(
secondary, null /*signal*/, {skipValidation: true}, {forRestart: true, waitpid: false});
quiesceModeFailPoint.wait();
-jsTestLog("The waiting isMaster returns a ShutdownInProgress error.");
-isMaster();
+jsTestLog("The waiting hello returns a ShutdownInProgress error.");
+hello();
// We cannot check the metrics because serverStatus returns ShutdownInProgress.
assert.commandFailedWithCode(secondaryDB.adminCommand({serverStatus: 1}),
ErrorCodes.ShutdownInProgress);
-jsTestLog("New isMaster commands return a ShutdownInProgress error.");
-res = assert.commandFailedWithCode(secondary.adminCommand({isMaster: 1}),
- ErrorCodes.ShutdownInProgress);
+jsTestLog("New hello commands return a ShutdownInProgress error.");
+res =
+ assert.commandFailedWithCode(secondary.adminCommand({hello: 1}), ErrorCodes.ShutdownInProgress);
checkTopologyVersion(res, topologyVersionField);
checkRemainingQuiesceTime(res);
res = assert.commandFailedWithCode(secondary.adminCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}),
@@ -159,13 +159,12 @@ let postStepdownFailpoint = configureFailPoint(primary, "hangInShutdownAfterStep
replTest.stop(primary, null /*signal*/, {skipValidation: true}, {forRestart: true, waitpid: false});
postStepdownFailpoint.wait();
-jsTestLog("Create a hanging isMaster on the primary.");
-res = assert.commandWorked(primary.adminCommand({isMaster: 1}));
+jsTestLog("Create a hanging hello on the primary.");
+res = assert.commandWorked(primary.adminCommand({hello: 1}));
assert(res.hasOwnProperty("topologyVersion"), res);
topologyVersionField = res.topologyVersion;
isMasterFailPoint = configureFailPoint(primary, "waitForIsMasterResponse");
-isMaster =
- startParallelShell(funWithArgs(runAwaitableIsMaster, topologyVersionField), primary.port);
+hello = startParallelShell(funWithArgs(runAwaitableHello, topologyVersionField), primary.port);
isMasterFailPoint.wait();
assert.eq(1, primary.getDB("admin").serverStatus().connections.awaitingTopologyChanges);
@@ -174,20 +173,19 @@ quiesceModeFailPoint = configureFailPoint(primary, "hangDuringQuiesceMode");
postStepdownFailpoint.off();
quiesceModeFailPoint.wait();
-jsTestLog("The waiting isMaster returns a ShutdownInProgress error.");
-isMaster();
+jsTestLog("The waiting hello returns a ShutdownInProgress error.");
+hello();
// We cannot check the metrics because serverStatus returns ShutdownInProgress.
assert.commandFailedWithCode(primaryDB.adminCommand({serverStatus: 1}),
ErrorCodes.ShutdownInProgress);
-jsTestLog("New isMaster commands return a ShutdownInProgress error.");
-res = assert.commandFailedWithCode(primary.adminCommand({isMaster: 1}),
- ErrorCodes.ShutdownInProgress);
+jsTestLog("New hello commands return a ShutdownInProgress error.");
+res = assert.commandFailedWithCode(primary.adminCommand({hello: 1}), ErrorCodes.ShutdownInProgress);
checkTopologyVersion(res, topologyVersionField);
checkRemainingQuiesceTime(res);
res = assert.commandFailedWithCode(primary.adminCommand({
- isMaster: 1,
+ hello: 1,
topologyVersion: topologyVersionField,
maxAwaitTimeMS: 99999999,
}),
diff --git a/jstests/replsets/read_committed_after_rollback.js b/jstests/replsets/read_committed_after_rollback.js
index a7e46e15e86..a4c67fb7eee 100644
--- a/jstests/replsets/read_committed_after_rollback.js
+++ b/jstests/replsets/read_committed_after_rollback.js
@@ -78,10 +78,10 @@ assert.eq(doCommittedRead(oldPrimaryColl), 'old');
oldPrimary.setSecondaryOk();
oldPrimary.disconnect(arbiters);
newPrimary.reconnect(arbiters);
-assert.soon(() => newPrimary.adminCommand('isMaster').ismaster, '', 60 * 1000);
+assert.soon(() => newPrimary.adminCommand('hello').isWritablePrimary, '', 60 * 1000);
assert.soon(function() {
try {
- return !oldPrimary.adminCommand('isMaster').ismaster;
+ return !oldPrimary.adminCommand('hello').isWritablePrimary;
} catch (e) {
return false; // ignore disconnect errors.
}
@@ -105,8 +105,7 @@ assert.eq(doCommittedRead(oldPrimaryColl), 'old');
oldPrimary.reconnect(newPrimary);
assert.soon(function() {
try {
- return oldPrimary.adminCommand('isMaster').secondary &&
- doDirtyRead(oldPrimaryColl) == 'new';
+ return oldPrimary.adminCommand('hello').secondary && doDirtyRead(oldPrimaryColl) == 'new';
} catch (e) {
return false; // ignore disconnect errors.
}
diff --git a/jstests/replsets/read_committed_stale_history.js b/jstests/replsets/read_committed_stale_history.js
index 400e8878ac5..afd78695e01 100644
--- a/jstests/replsets/read_committed_stale_history.js
+++ b/jstests/replsets/read_committed_stale_history.js
@@ -36,7 +36,7 @@ rst.initiate();
*/
function waitForPrimary(node) {
assert.soon(function() {
- return node.adminCommand('ismaster').ismaster;
+ return node.adminCommand('hello').isWritablePrimary;
});
}
diff --git a/jstests/replsets/read_concern_uninitated_set.js b/jstests/replsets/read_concern_uninitated_set.js
index 3ad97dff062..7a3abd78899 100644
--- a/jstests/replsets/read_concern_uninitated_set.js
+++ b/jstests/replsets/read_concern_uninitated_set.js
@@ -12,7 +12,7 @@ rst.startSet();
const localDB = rst.nodes[0].getDB('local');
assert.commandWorked(localDB.test.insert({_id: 0}));
assert.commandWorked(localDB.runCommand({
- isMaster: 1,
+ hello: 1,
"$clusterTime": {
"clusterTime": Timestamp(1, 1),
"signature": {"hash": BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId": NumberLong(0)}
diff --git a/jstests/replsets/reconfig_add_remove_arbiter.js b/jstests/replsets/reconfig_add_remove_arbiter.js
index 0832d853064..0ae14b64b0d 100644
--- a/jstests/replsets/reconfig_add_remove_arbiter.js
+++ b/jstests/replsets/reconfig_add_remove_arbiter.js
@@ -2,7 +2,7 @@
* Test that replSetReconfig can add and remove arbiters.
*/
-// isMaster fails on the arbiter once it's removed, which blocks all checks.
+// hello fails on the arbiter once it's removed, which blocks all checks.
TestData.skipCheckDBHashes = true;
(function() {
diff --git a/jstests/replsets/reconfig_avoids_diverging_configs.js b/jstests/replsets/reconfig_avoids_diverging_configs.js
index 34d7d5a2345..203cb2f0c9b 100644
--- a/jstests/replsets/reconfig_avoids_diverging_configs.js
+++ b/jstests/replsets/reconfig_avoids_diverging_configs.js
@@ -60,7 +60,7 @@ const parallelShell = startParallelShell(
assert.commandWorked(node1.adminCommand({replSetStepUp: 1}));
rst.awaitNodesAgreeOnPrimary(rst.kDefaultTimeoutMS, [node1, node2, node3], node1);
jsTestLog("Current replica set topology: [node0 (Primary)] [node1 (Primary), node2, node3]");
-assert.soon(() => node1.getDB('admin').runCommand({ismaster: 1}).ismaster);
+assert.soon(() => node1.getDB('admin').runCommand({hello: 1}).isWritablePrimary);
assert.soon(() => isConfigCommitted(node1));
// Reconfig to remove a secondary. We need to specify the node to get the original
diff --git a/jstests/replsets/reconfig_tags.js b/jstests/replsets/reconfig_tags.js
index 86a2419f4e9..cc5ab814d24 100644
--- a/jstests/replsets/reconfig_tags.js
+++ b/jstests/replsets/reconfig_tags.js
@@ -1,4 +1,4 @@
-// test that reconfigging only tag changes is properly reflected in isMaster
+// test that reconfigging only tag changes is properly reflected in hello
var replTest = new ReplSetTest({nodes: 2});
replTest.startSet({oplogSize: 10});
@@ -34,8 +34,8 @@ replTest.awaitSecondaryNodes();
var testDB = primary.getDB('test');
var newConn = new Mongo(primary.host);
-var isMaster = newConn.adminCommand({isMaster: 1});
-assert(isMaster.tags != null, 'isMaster: ' + tojson(isMaster));
+var hello = newConn.adminCommand({hello: 1});
+assert(hello.tags != null, 'hello: ' + tojson(hello));
-print('success: ' + tojson(isMaster));
+print('success: ' + tojson(hello));
replTest.stopSet();
diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js
index b54edd6d7dc..7f0296db749 100644
--- a/jstests/replsets/remove1.js
+++ b/jstests/replsets/remove1.js
@@ -25,11 +25,11 @@ print("Start set with two nodes");
var replTest = new ReplSetTest({name: name, nodes: 2});
var nodes = replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var secondary = replTest.getSecondary();
print("Initial sync");
-master.getDB("foo").bar.baz.insert({x: 1});
+primary.getDB("foo").bar.baz.insert({x: 1});
replTest.awaitReplication();
@@ -49,7 +49,7 @@ assert.eq(secondary.getDB("admin").runCommand({ping: 1}).ok,
"we should be connected to the secondary");
try {
- master.getDB("admin").runCommand({replSetReconfig: config});
+ primary.getDB("admin").runCommand({replSetReconfig: config});
} catch (e) {
print(e);
}
@@ -68,10 +68,10 @@ assert.soon(function() {
assert.eq(
secondary.getDB("admin").runCommand({ping: 1}).ok, 1, "we aren't connected to the secondary");
-reconnect(master);
+reconnect(primary);
assert.soon(function() {
- var c = master.getDB("local").system.replset.findOne();
+ var c = primary.getDB("local").system.replset.findOne();
return c.version == nextVersion;
});
@@ -89,15 +89,15 @@ assert.soon(function() {
return false;
}
});
-master = replTest.getPrimary();
+primary = replTest.getPrimary();
// Wait and account for 'newlyAdded' automatic reconfig.
nextVersion++;
replTest.waitForAllNewlyAddedRemovals();
secondary = replTest.getSecondary();
-printjson(master.getDB("admin").runCommand({replSetGetStatus: 1}));
-var newConfig = master.getDB("local").system.replset.findOne();
+printjson(primary.getDB("admin").runCommand({replSetGetStatus: 1}));
+var newConfig = primary.getDB("local").system.replset.findOne();
print("newConfig: " + tojson(newConfig));
assert.eq(newConfig.version, nextVersion);
@@ -106,27 +106,27 @@ replTest.stop(secondary);
assert.soon(function() {
try {
- return master.getDB("admin").runCommand({isMaster: 1}).secondary;
+ return primary.getDB("admin").runCommand({hello: 1}).secondary;
} catch (e) {
- print("trying to get master: " + e);
+ print("trying to get primary: " + e);
}
}, "waiting for primary to step down", (60 * 1000), 1000);
nextVersion++;
config.version = nextVersion;
-config.members = config.members.filter(node => node.host == master.host);
+config.members = config.members.filter(node => node.host == primary.host);
try {
- master.getDB("admin").runCommand({replSetReconfig: config, force: true});
+ primary.getDB("admin").runCommand({replSetReconfig: config, force: true});
} catch (e) {
print(e);
}
-reconnect(master);
+reconnect(primary);
assert.soon(function() {
- return master.getDB("admin").runCommand({isMaster: 1}).ismaster;
+ return primary.getDB("admin").runCommand({hello: 1}).isWritablePrimary;
}, "waiting for old primary to accept reconfig and step up", (60 * 1000), 1000);
-config = master.getDB("local").system.replset.findOne();
+config = primary.getDB("local").system.replset.findOne();
printjson(config);
assert.gt(config.version, nextVersion);
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index 35ceb70121f..3ad42615db9 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -83,9 +83,9 @@ var doTest = function(signal) {
// Now, let's make sure that the old primary comes up as a secondary
assert.soon(function() {
- var res = secondary.getDB("admin").runCommand({ismaster: 1});
+ var res = secondary.getDB("admin").runCommand({hello: 1});
printjson(res);
- return res['ok'] == 1 && res['ismaster'] == false;
+ return res['ok'] == 1 && res['isWritablePrimary'] == false;
});
// And we need to make sure that the replset comes back up
diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js
index 310b5709ebf..0ac4141a6df 100644
--- a/jstests/replsets/replset4.js
+++ b/jstests/replsets/replset4.js
@@ -18,8 +18,8 @@ doTest = function(signal) {
assert.soon(function() {
try {
- var result = primary.getDB("admin").runCommand({ismaster: 1});
- return (result['ok'] == 1 && result['ismaster'] == false);
+ var result = primary.getDB("admin").runCommand({hello: 1});
+ return (result['ok'] == 1 && result['isWritablePrimary'] == false);
} catch (e) {
print("replset4.js caught " + e);
return false;
diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js
index cda2c371180..14066bf7762 100644
--- a/jstests/replsets/replsetarb2.js
+++ b/jstests/replsets/replsetarb2.js
@@ -26,7 +26,7 @@ assert.soon(function() {
return res.myState === 7;
}, "Aribiter failed to initialize.");
-var result = conns[1].getDB("admin").runCommand({isMaster: 1});
+var result = conns[1].getDB("admin").runCommand({hello: 1});
assert(result.arbiterOnly);
assert(!result.passive);
diff --git a/jstests/replsets/replsetfreeze.js b/jstests/replsets/replsetfreeze.js
index a9b3fdc85ba..011989e012f 100644
--- a/jstests/replsets/replsetfreeze.js
+++ b/jstests/replsets/replsetfreeze.js
@@ -2,12 +2,12 @@
* 1: initialize set
* 2: step down m1
* 3: freeze set for 30 seconds
- * 4: check no one is master for 30 seconds
- * 5: check for new master
- * 6: step down new master
+ * 4: check no one is primary for 30 seconds
+ * 5: check for new primary
+ * 6: step down new primary
* 7: freeze for 30 seconds
* 8: unfreeze
- * 9: check we get a new master within 30 seconds
+ * 9: check we get a new primary within 30 seconds
*/
var w = 0;
@@ -53,7 +53,7 @@ var r = replTest.initiate(config);
replTest.awaitNodesAgreeOnPrimary();
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var secondary = replTest.getSecondary();
jsTestLog('2: freeze secondary ' + secondary.host +
@@ -62,48 +62,48 @@ jsTestLog('2: freeze secondary ' + secondary.host +
assert.commandWorked(secondary.getDB("admin").runCommand({replSetFreeze: 600}));
assert.commandFailedWithCode(
- master.getDB("admin").runCommand({replSetFreeze: 30}),
+ primary.getDB("admin").runCommand({replSetFreeze: 30}),
ErrorCodes.NotSecondary,
- 'replSetFreeze should return error when run on primary ' + master.host);
+ 'replSetFreeze should return error when run on primary ' + primary.host);
-jsTestLog('3: step down primary ' + master.host);
-assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown: 10, force: 1}));
-printjson(master.getDB("admin").runCommand({replSetGetStatus: 1}));
+jsTestLog('3: step down primary ' + primary.host);
+assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 10, force: 1}));
+printjson(primary.getDB("admin").runCommand({replSetGetStatus: 1}));
-jsTestLog('4: freeze stepped down primary ' + master.host + ' for 30 seconds');
+jsTestLog('4: freeze stepped down primary ' + primary.host + ' for 30 seconds');
var start = (new Date()).getTime();
-assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze: 30}));
+assert.commandWorked(primary.getDB("admin").runCommand({replSetFreeze: 30}));
-jsTestLog('5: check no one is master for 30 seconds');
+jsTestLog('5: check no one is primary for 30 seconds');
while ((new Date()).getTime() - start <
(28 * 1000)) { // we need less 30 since it takes some time to return... hacky
- var result = master.getDB("admin").runCommand({isMaster: 1});
- assert.eq(result.ismaster, false);
+ var result = primary.getDB("admin").runCommand({hello: 1});
+ assert.eq(result.isWritablePrimary, false);
assert.eq(result.primary, undefined);
sleep(1000);
}
jsTestLog('6: check for new primary');
var newPrimary = replTest.getPrimary();
-assert.eq(master.host,
+assert.eq(primary.host,
newPrimary.host,
'new primary should be the same node as primary that previously stepped down');
-jsTestLog('7: step down new master ' + master.host);
-assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown: 10, force: 1}));
+jsTestLog('7: step down new primary ' + primary.host);
+assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 10, force: 1}));
-jsTestLog('8: freeze stepped down primary ' + master.host + ' for 30 seconds');
-master.getDB("admin").runCommand({replSetFreeze: 30});
+jsTestLog('8: freeze stepped down primary ' + primary.host + ' for 30 seconds');
+primary.getDB("admin").runCommand({replSetFreeze: 30});
sleep(1000);
-jsTestLog('9: unfreeze stepped down primary ' + master.host + ' after waiting for 1 second');
-master.getDB("admin").runCommand({replSetFreeze: 0});
+jsTestLog('9: unfreeze stepped down primary ' + primary.host + ' after waiting for 1 second');
+primary.getDB("admin").runCommand({replSetFreeze: 0});
-jsTestLog('10: wait for unfrozen node ' + master.host + ' to become primary again');
+jsTestLog('10: wait for unfrozen node ' + primary.host + ' to become primary again');
newPrimary = replTest.getPrimary();
jsTestLog('Primary after unfreezing node: ' + newPrimary.host);
assert.eq(
- master.host,
+ primary.host,
newPrimary.host,
'new primary after unfreezing should be the same node as primary that previously stepped down');
diff --git a/jstests/replsets/rollback_auth.js b/jstests/replsets/rollback_auth.js
index 372f84ce645..6a4bb9ec392 100644
--- a/jstests/replsets/rollback_auth.js
+++ b/jstests/replsets/rollback_auth.js
@@ -42,9 +42,9 @@ replTest.initiate({
]
});
-// Make sure we have a master
+// Make sure we have a primary
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var a_conn = conns[0];
var b_conn = conns[1];
a_conn.setSecondaryOk();
@@ -53,8 +53,8 @@ var A = a_conn.getDB("admin");
var B = b_conn.getDB("admin");
var a = a_conn.getDB("test");
var b = b_conn.getDB("test");
-assert.eq(master, conns[0], "conns[0] assumed to be master");
-assert.eq(a_conn, master);
+assert.eq(primary, conns[0], "conns[0] assumed to be primary");
+assert.eq(a_conn, primary);
// Make sure we have an arbiter
assert.soon(function() {
@@ -115,15 +115,15 @@ assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode
jsTestLog("Doing writes that will eventually be rolled back");
-// down A and wait for B to become master
+// down A and wait for B to become primary
replTest.stop(0);
assert.soon(function() {
try {
- return B.isMaster().ismaster;
+ return B.hello().isWritablePrimary;
} catch (e) {
return false;
}
-}, "B didn't become master");
+}, "B didn't become primary");
printjson(b.adminCommand('replSetGetStatus'));
// Modify the the user and role in a way that will be rolled back.
@@ -146,18 +146,18 @@ assert.commandWorked(b.runCommand({collStats: 'bar'}));
assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
-// down B, bring A back up, then wait for A to become master
+// down B, bring A back up, then wait for A to become primary
// insert new data into A so that B will need to rollback when it reconnects to A
replTest.stop(1);
replTest.restart(0);
assert.soon(function() {
try {
- return A.isMaster().ismaster;
+ return A.hello().isWritablePrimary;
} catch (e) {
return false;
}
-}, "A didn't become master");
+}, "A didn't become primary");
// A should not have the new data as it was down
assert.commandWorked(a.runCommand({dbStats: 1}));
diff --git a/jstests/replsets/rollback_crud_op_sequences.js b/jstests/replsets/rollback_crud_op_sequences.js
index cd42c303a96..74a10594bb7 100644
--- a/jstests/replsets/rollback_crud_op_sequences.js
+++ b/jstests/replsets/rollback_crud_op_sequences.js
@@ -41,17 +41,17 @@ replTest.initiate({
]
});
-// Make sure we have a master and that that master is node A
+// Make sure we have a primary and that that primary is node A
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var a_conn = conns[0];
a_conn.setSecondaryOk();
var A = a_conn.getDB("admin");
var b_conn = conns[1];
b_conn.setSecondaryOk();
var B = b_conn.getDB("admin");
-assert.eq(master, conns[0], "conns[0] assumed to be master");
-assert.eq(a_conn, master);
+assert.eq(primary, conns[0], "conns[0] assumed to be primary");
+assert.eq(a_conn, primary);
// Wait for initial replication
var a = a_conn.getDB("foo");
@@ -71,16 +71,16 @@ assert.commandWorked(a.kap.insert({foo: 1}));
a.createCollection("kap2", {capped: true, size: 5501});
replTest.awaitReplication();
-// isolate A and wait for B to become master
+// isolate A and wait for B to become primary
conns[0].disconnect(conns[1]);
conns[0].disconnect(conns[2]);
assert.soon(function() {
try {
- return B.isMaster().ismaster;
+ return B.hello().isWritablePrimary;
} catch (e) {
return false;
}
-}, "node B did not become master as expected", ReplSetTest.kDefaultTimeoutMS);
+}, "node B did not become primary as expected", ReplSetTest.kDefaultTimeoutMS);
// do operations on B and B alone, these will be rolled back
assert.commandWorked(b.bar.insert({q: 4}));
@@ -97,12 +97,12 @@ assert.commandWorked(b.newcoll.insert({a: true}));
// create a new empty collection (need to roll back the whole thing)
b.createCollection("abc");
-// isolate B, bring A back into contact with the arbiter, then wait for A to become master
+// isolate B, bring A back into contact with the arbiter, then wait for A to become primary
// insert new data into A so that B will need to rollback when it reconnects to A
conns[1].disconnect(conns[2]);
assert.soon(function() {
try {
- return !B.isMaster().ismaster;
+ return !B.hello().isWritablePrimary;
} catch (e) {
return false;
}
@@ -111,7 +111,7 @@ assert.soon(function() {
conns[0].reconnect(conns[2]);
assert.soon(function() {
try {
- return A.isMaster().ismaster;
+ return A.hello().isWritablePrimary;
} catch (e) {
return false;
}
diff --git a/jstests/replsets/rollback_ddl_op_sequences.js b/jstests/replsets/rollback_ddl_op_sequences.js
index 62b2fb9cae2..0c5c2f27fad 100644
--- a/jstests/replsets/rollback_ddl_op_sequences.js
+++ b/jstests/replsets/rollback_ddl_op_sequences.js
@@ -50,17 +50,17 @@ replTest.initiate({
]
});
-// Make sure we have a master and that that master is node A
+// Make sure we have a primary and that that primary is node A
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var a_conn = conns[0];
a_conn.setSecondaryOk();
var A = a_conn.getDB("admin");
var b_conn = conns[1];
b_conn.setSecondaryOk();
var B = b_conn.getDB("admin");
-assert.eq(master, conns[0], "conns[0] assumed to be master");
-assert.eq(a_conn, master);
+assert.eq(primary, conns[0], "conns[0] assumed to be primary");
+assert.eq(a_conn, primary);
// Wait for initial replication
var a = a_conn.getDB("foo");
@@ -88,12 +88,12 @@ a.createCollection("kap", {capped: true, size: 5000});
assert.commandWorked(a.kap.insert({foo: 1}));
replTest.awaitReplication();
-// isolate A and wait for B to become master
+// isolate A and wait for B to become primary
conns[0].disconnect(conns[1]);
conns[0].disconnect(conns[2]);
assert.soon(function() {
try {
- return B.isMaster().ismaster;
+ return B.hello().isWritablePrimary;
} catch (e) {
return false;
}
@@ -128,12 +128,12 @@ var abc = b.getSiblingDB("abc");
assert.commandWorked(abc.foo.insert({x: 1}));
assert.commandWorked(abc.bar.insert({y: 999}));
-// isolate B, bring A back into contact with the arbiter, then wait for A to become master
+// isolate B, bring A back into contact with the arbiter, then wait for A to become primary
// insert new data into A so that B will need to rollback when it reconnects to A
conns[1].disconnect(conns[2]);
assert.soon(function() {
try {
- return !B.isMaster().ismaster;
+ return !B.hello().isWritablePrimary;
} catch (e) {
return false;
}
@@ -142,7 +142,7 @@ assert.soon(function() {
conns[0].reconnect(conns[2]);
assert.soon(function() {
try {
- return A.isMaster().ismaster;
+ return A.hello().isWritablePrimary;
} catch (e) {
return false;
}
diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js
index 61a43152288..4e5e4806758 100644
--- a/jstests/replsets/slavedelay1.js
+++ b/jstests/replsets/slavedelay1.js
@@ -93,7 +93,7 @@ doTest = function(signal) {
// wait for node to become secondary
assert.soon(function() {
- var result = conn.getDB("admin").isMaster();
+ var result = conn.getDB("admin").hello();
printjson(result);
return result.secondary;
});
diff --git a/jstests/replsets/split_horizon_hostname_validation.js b/jstests/replsets/split_horizon_hostname_validation.js
index acf790033ea..723445854cc 100644
--- a/jstests/replsets/split_horizon_hostname_validation.js
+++ b/jstests/replsets/split_horizon_hostname_validation.js
@@ -28,7 +28,7 @@ function testConfig(hostName, horizonName, expectedReject, options = {}) {
assert.commandWorked(mongod.adminCommand(
{replSetInitiate: {_id: "test", members: [{_id: 0, host: "localhost:" + mongod.port}]}}));
assert.soon(() => {
- return assert.commandWorked(mongod.adminCommand({isMaster: 1})).ismaster;
+ return assert.commandWorked(mongod.adminCommand({hello: 1})).isWritablePrimary;
});
// Make sure replSetReconfig fails with correct error
diff --git a/jstests/replsets/step_down_during_draining.js b/jstests/replsets/step_down_during_draining.js
index 13387a12610..9bf41d85daf 100644
--- a/jstests/replsets/step_down_during_draining.js
+++ b/jstests/replsets/step_down_during_draining.js
@@ -82,8 +82,8 @@ reconnect(secondary);
replSet.stepUp(secondary, {awaitReplicationBeforeStepUp: false, awaitWritablePrimary: false});
// Secondary doesn't allow writes yet.
-var res = secondary.getDB("admin").runCommand({"isMaster": 1});
-assert(!res.ismaster);
+var res = secondary.getDB("admin").runCommand({"hello": 1});
+assert(!res.isWritablePrimary);
// Original primary steps up.
reconnect(primary);
diff --git a/jstests/replsets/step_down_during_draining2.js b/jstests/replsets/step_down_during_draining2.js
index 436590ecb80..0599357b1de 100644
--- a/jstests/replsets/step_down_during_draining2.js
+++ b/jstests/replsets/step_down_during_draining2.js
@@ -85,8 +85,8 @@ reconnect(secondary);
replSet.stepUp(secondary, {awaitReplicationBeforeStepUp: false, awaitWritablePrimary: false});
// Secondary doesn't allow writes yet.
-var res = secondary.getDB("admin").runCommand({"isMaster": 1});
-assert(!res.ismaster);
+var res = secondary.getDB("admin").runCommand({"hello": 1});
+assert(!res.isWritablePrimary);
// Prevent the current primary from stepping down
jsTest.log("disallowing heartbeat stepdown " + secondary.host);
@@ -111,16 +111,16 @@ assert.soon(function() {
return secondary.getDB("foo").foo.find().itcount() == numDocuments;
});
-jsTestLog("Checking that node is PRIMARY but not master");
+jsTestLog("Checking that node is PRIMARY but not writable");
assert.eq(ReplSetTest.State.PRIMARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
-assert(!secondary.adminCommand('ismaster').ismaster);
+assert(!secondary.adminCommand('hello').isWritablePrimary);
jsTest.log("allowing heartbeat stepdown " + secondary.host);
blockHeartbeatStepdownFailPoint.off();
jsTestLog("Checking that node successfully stepped down");
replSet.waitForState(secondary, ReplSetTest.State.SECONDARY);
-assert(!secondary.adminCommand('ismaster').ismaster);
+assert(!secondary.adminCommand('hello').isWritablePrimary);
// Now ensure that the node can successfully become primary again.
replSet.restart(0);
@@ -128,7 +128,7 @@ replSet.restart(2);
replSet.stepUp(secondary, {awaitReplicationBeforeStepUp: false, awaitWritablePrimary: false});
assert.soon(function() {
- return secondary.adminCommand('ismaster').ismaster;
+ return secondary.adminCommand('hello').isWritablePrimary;
});
jsTestLog('Ensure new primary is writable.');
diff --git a/jstests/replsets/step_down_during_draining3.js b/jstests/replsets/step_down_during_draining3.js
index 71df8287150..a52736cc5f1 100644
--- a/jstests/replsets/step_down_during_draining3.js
+++ b/jstests/replsets/step_down_during_draining3.js
@@ -74,14 +74,14 @@ reconnect(secondary);
replSet.stepUp(secondary, {awaitReplicationBeforeStepUp: false, awaitWritablePrimary: false});
// Secondary doesn't allow writes yet.
-var res = secondary.getDB("admin").runCommand({"isMaster": 1});
-assert(!res.ismaster);
+var res = secondary.getDB("admin").runCommand({"hello": 1});
+assert(!res.isWritablePrimary);
assert.commandWorked(secondary.adminCommand({replSetStepDown: 60, force: true}));
// Assert stepdown was successful.
assert.eq(ReplSetTest.State.SECONDARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
-assert(!secondary.adminCommand('ismaster').ismaster);
+assert(!secondary.adminCommand('hello').isWritablePrimary);
// Prevent the producer from fetching new ops
assert.commandWorked(
diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js
index 05f3275f11b..005a22c7e10 100644
--- a/jstests/replsets/stepdown.js
+++ b/jstests/replsets/stepdown.js
@@ -153,10 +153,10 @@ try {
"failed",
0);
- jsTestLog('Checking isMaster on ' + primary);
- var r2 = assert.commandWorked(primary.getDB("admin").runCommand({ismaster: 1}));
- jsTestLog('Result from running isMaster on ' + primary + ': ' + tojson(r2));
- assert.eq(r2.ismaster, false);
+ jsTestLog('Checking hello on ' + primary);
+ var r2 = assert.commandWorked(primary.getDB("admin").runCommand({hello: 1}));
+ jsTestLog('Result from running hello on ' + primary + ': ' + tojson(r2));
+ assert.eq(r2.isWritablePrimary, false);
assert.eq(r2.secondary, true);
// Check that the 'electionCandidateMetrics' section of the replSetGetStatus response has been
@@ -214,7 +214,7 @@ replTest.awaitReplication();
// of this assert.soon
assert.soon(function() {
try {
- var result = primary.getDB("admin").runCommand({isMaster: 1});
+ var result = primary.getDB("admin").runCommand({hello: 1});
return new RegExp(":" + replTest.nodes[0].port + "$").test(result.primary);
} catch (x) {
return false;
diff --git a/jstests/replsets/temp_namespace.js b/jstests/replsets/temp_namespace.js
index de276222b95..f8a6f854a68 100644
--- a/jstests/replsets/temp_namespace.js
+++ b/jstests/replsets/temp_namespace.js
@@ -17,28 +17,25 @@ replTest.initiate({
]
});
-var master = replTest.getPrimary();
-var second = replTest.getSecondary();
+var primary = replTest.getPrimary();
+var secondary = replTest.getSecondary();
-var masterId = replTest.getNodeId(master);
-var secondId = replTest.getNodeId(second);
-
-var masterDB = master.getDB('test');
-var secondDB = second.getDB('test');
+var primaryDB = primary.getDB('test');
+var secondaryDB = secondary.getDB('test');
// set up collections
-assert.commandWorked(masterDB.runCommand(
- {applyOps: [{op: "c", ns: masterDB.getName() + ".$cmd", o: {create: "temp1", temp: true}}]}));
-masterDB.temp1.ensureIndex({x: 1});
-assert.commandWorked(masterDB.runCommand(
- {applyOps: [{op: "c", ns: masterDB.getName() + ".$cmd", o: {create: "temp2", temp: 1}}]}));
-masterDB.temp2.ensureIndex({x: 1});
-assert.commandWorked(masterDB.runCommand(
- {applyOps: [{op: "c", ns: masterDB.getName() + ".$cmd", o: {create: "keep1", temp: false}}]}));
-assert.commandWorked(masterDB.runCommand(
- {applyOps: [{op: "c", ns: masterDB.getName() + ".$cmd", o: {create: "keep2", temp: 0}}]}));
-masterDB.runCommand({create: 'keep3'});
-assert.commandWorked(masterDB.keep4.insert({}, {writeConcern: {w: 2}}));
+assert.commandWorked(primaryDB.runCommand(
+ {applyOps: [{op: "c", ns: primaryDB.getName() + ".$cmd", o: {create: "temp1", temp: true}}]}));
+primaryDB.temp1.ensureIndex({x: 1});
+assert.commandWorked(primaryDB.runCommand(
+ {applyOps: [{op: "c", ns: primaryDB.getName() + ".$cmd", o: {create: "temp2", temp: 1}}]}));
+primaryDB.temp2.ensureIndex({x: 1});
+assert.commandWorked(primaryDB.runCommand(
+ {applyOps: [{op: "c", ns: primaryDB.getName() + ".$cmd", o: {create: "keep1", temp: false}}]}));
+assert.commandWorked(primaryDB.runCommand(
+ {applyOps: [{op: "c", ns: primaryDB.getName() + ".$cmd", o: {create: "keep2", temp: 0}}]}));
+primaryDB.runCommand({create: 'keep3'});
+assert.commandWorked(primaryDB.keep4.insert({}, {writeConcern: {w: 2}}));
// make sure they exist on primary and secondary
function countCollection(mydb, nameFilter) {
@@ -59,21 +56,21 @@ function countIndexesFor(mydb, nameFilter) {
return total;
}
-assert.eq(countCollection(masterDB, /temp\d$/), 2); // collections
-assert.eq(countIndexesFor(masterDB, /temp\d$/), 4); // indexes (2 _id + 2 x)
-assert.eq(countCollection(masterDB, /keep\d$/), 4);
+assert.eq(countCollection(primaryDB, /temp\d$/), 2); // collections
+assert.eq(countIndexesFor(primaryDB, /temp\d$/), 4); // indexes (2 _id + 2 x)
+assert.eq(countCollection(primaryDB, /keep\d$/), 4);
-assert.eq(countCollection(secondDB, /temp\d$/), 2); // collections
-assert.eq(countIndexesFor(secondDB, /temp\d$/), 4); // indexes (2 _id + 2 x)
-assert.eq(countCollection(secondDB, /keep\d$/), 4);
+assert.eq(countCollection(secondaryDB, /temp\d$/), 2); // collections
+assert.eq(countIndexesFor(secondaryDB, /temp\d$/), 4); // indexes (2 _id + 2 x)
+assert.eq(countCollection(secondaryDB, /keep\d$/), 4);
// restart secondary and reconnect
-replTest.restart(secondId, {}, /*wait=*/true);
+replTest.restart(replTest.getNodeId(secondary), {}, /*wait=*/true);
// wait for the secondary to achieve secondary status
assert.soon(function() {
try {
- res = second.getDB("admin").runCommand({replSetGetStatus: 1});
+ res = secondary.getDB("admin").runCommand({replSetGetStatus: 1});
return res.myState == 2;
} catch (e) {
return false;
@@ -81,25 +78,25 @@ assert.soon(function() {
}, "took more than a minute for the secondary to become secondary again", 60 * 1000);
// make sure restarting secondary didn't drop collections
-assert.eq(countCollection(secondDB, /temp\d$/), 2); // collections
-assert.eq(countIndexesFor(secondDB, /temp\d$/), 4); // indexes (2 _id + 2 x)
-assert.eq(countCollection(secondDB, /keep\d$/), 4);
+assert.eq(countCollection(secondaryDB, /temp\d$/), 2); // collections
+assert.eq(countIndexesFor(secondaryDB, /temp\d$/), 4); // indexes (2 _id + 2 x)
+assert.eq(countCollection(secondaryDB, /keep\d$/), 4);
// step down primary and make sure former secondary (now primary) drops collections
-assert.commandWorked(master.adminCommand({replSetStepDown: 50, force: true}));
+assert.commandWorked(primary.adminCommand({replSetStepDown: 50, force: true}));
assert.soon(function() {
- return secondDB.isMaster().ismaster;
+ return secondaryDB.hello().isWritablePrimary;
}, '', 75 * 1000); // must wait for secondary to be willing to promote self
-assert.eq(countCollection(secondDB, /temp\d$/), 0); // collections
-assert.eq(countIndexesFor(secondDB, /temp\d$/), 0); // indexes (2 _id + 2 x)
-assert.eq(countCollection(secondDB, /keep\d$/), 4);
+assert.eq(countCollection(secondaryDB, /temp\d$/), 0); // collections
+assert.eq(countIndexesFor(secondaryDB, /temp\d$/), 0); // indexes (2 _id + 2 x)
+assert.eq(countCollection(secondaryDB, /keep\d$/), 4);
// check that former primary dropped collections
replTest.awaitReplication();
-assert.eq(countCollection(masterDB, /temp\d$/), 0); // collections
-assert.eq(countIndexesFor(masterDB, /temp\d$/), 0); // indexes (2 _id + 2 x)
-assert.eq(countCollection(masterDB, /keep\d$/), 4);
+assert.eq(countCollection(primaryDB, /temp\d$/), 0); // collections
+assert.eq(countIndexesFor(primaryDB, /temp\d$/), 0); // indexes (2 _id + 2 x)
+assert.eq(countCollection(primaryDB, /keep\d$/), 4);
replTest.stopSet();
diff --git a/jstests/replsets/unconditional_step_down.js b/jstests/replsets/unconditional_step_down.js
index febe57677ba..0fc1271f6ec 100644
--- a/jstests/replsets/unconditional_step_down.js
+++ b/jstests/replsets/unconditional_step_down.js
@@ -67,7 +67,7 @@ function runStepDownTest({testMsg, stepDownFn, toRemovedState}) {
var startSafeParallelShell = (func, port) => {
TestData.func = func;
var safeFunc = (toRemovedState) ? () => {
- assert.commandWorked(db.adminCommand({isMaster: 1, hangUpOnStepDown: false}));
+ assert.commandWorked(db.adminCommand({hello: 1, hangUpOnStepDown: false}));
TestData.func();
} : func;
return startParallelShell(safeFunc, port);
diff --git a/jstests/replsets/write_concern_after_stepdown.js b/jstests/replsets/write_concern_after_stepdown.js
index 39d9dab22e8..ef08710356a 100644
--- a/jstests/replsets/write_concern_after_stepdown.js
+++ b/jstests/replsets/write_concern_after_stepdown.js
@@ -27,7 +27,7 @@ rst.initiate();
function waitForPrimary(node) {
assert.soon(function() {
- return node.adminCommand('ismaster').ismaster;
+ return node.adminCommand('hello').isWritablePrimary;
});
}
@@ -52,10 +52,10 @@ assert.commandWorked(
jsTestLog("Do w:majority write that will block waiting for replication.");
var doMajorityWrite = function() {
- // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
+ // Run hello command with 'hangUpOnStepDown' set to false to mark this connection as
// one that shouldn't be closed when the node steps down. This makes it easier to detect
// the error returned by the write concern failure.
- assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
+ assert.commandWorked(db.adminCommand({hello: 1, hangUpOnStepDown: false}));
jsTestLog("Begin waiting for w:majority write");
var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
diff --git a/jstests/replsets/write_concern_after_stepdown_and_stepup.js b/jstests/replsets/write_concern_after_stepdown_and_stepup.js
index 1bcaad85244..c4321d8e92b 100644
--- a/jstests/replsets/write_concern_after_stepdown_and_stepup.js
+++ b/jstests/replsets/write_concern_after_stepdown_and_stepup.js
@@ -28,7 +28,7 @@ rst.initiate();
function waitForPrimary(node) {
assert.soon(function() {
- return node.adminCommand('ismaster').ismaster;
+ return node.adminCommand('hello').isWritablePrimary;
});
}
@@ -58,10 +58,10 @@ const hangBeforeWaitingForWriteConcern =
jsTestLog("Do w:majority write that won't enter awaitReplication() until after the primary " +
"has stepped down and back up");
var doMajorityWrite = function() {
- // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
+ // Run hello command with 'hangUpOnStepDown' set to false to mark this connection as
// one that shouldn't be closed when the node steps down. This simulates the scenario where
// the write was coming from a mongos.
- assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
+ assert.commandWorked(db.adminCommand({hello: 1, hangUpOnStepDown: false}));
var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
writeConcern: {w: 'majority'}