summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js6
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background_unique.js4
-rw-r--r--jstests/core/failcommand_failpoint.js50
-rw-r--r--jstests/hooks/run_reconfig_background.js2
-rw-r--r--jstests/libs/override_methods/continuous_stepdown.js2
-rw-r--r--jstests/libs/override_methods/network_error_and_txn_override.js4
-rw-r--r--jstests/libs/override_methods/validate_collections_on_shutdown.js2
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js3
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js4
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js4
-rw-r--r--jstests/multiVersion/minor_version_upgrade_replset.js4
-rw-r--r--jstests/noPassthrough/auto_retry_on_network_error.js2
-rw-r--r--jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js6
-rw-r--r--jstests/noPassthrough/replica_set_connection_error_codes.js10
-rw-r--r--jstests/noPassthrough/server_transaction_metrics_secondary.js6
-rw-r--r--jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js4
-rw-r--r--jstests/noPassthrough/txn_override_causal_consistency.js2
-rw-r--r--jstests/replsets/catchup_takeover_two_nodes_ahead.js4
-rw-r--r--jstests/replsets/emptycapped.js2
-rw-r--r--jstests/replsets/libs/rollback_test.js2
-rw-r--r--jstests/replsets/linearizable_read_concern.js2
-rw-r--r--jstests/replsets/notmaster_errors_return_topology_version.js22
-rw-r--r--jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js6
-rw-r--r--jstests/replsets/retryable_commit_transaction_after_failover.js2
-rw-r--r--jstests/replsets/step_down_on_secondary.js2
-rw-r--r--jstests/replsets/stepdown.js2
-rw-r--r--jstests/replsets/stepdown_killop.js10
-rw-r--r--jstests/replsets/stepdown_long_wait_time.js9
-rw-r--r--jstests/replsets/transactions_on_secondaries_not_allowed.js14
-rw-r--r--jstests/replsets/transactions_only_allowed_on_primaries.js13
-rw-r--r--jstests/replsets/txn_override_unittests.js148
-rw-r--r--jstests/replsets/validate_fails_during_rollback.js3
-rw-r--r--jstests/sharding/change_stream_error_label.js2
-rw-r--r--jstests/sharding/linearizable_read_concern.js2
-rw-r--r--jstests/sharding/migration_coordinator_failover_include.js4
-rw-r--r--jstests/sharding/read_write_concern_defaults_commands_api.js6
-rw-r--r--jstests/sharding/retryable_write_error_labels.js2
-rw-r--r--jstests/sharding/transient_txn_error_labels.js21
-rw-r--r--jstests/sharding/txn_two_phase_commit_basic.js2
-rw-r--r--src/mongo/base/error_codes.yml2
-rw-r--r--src/mongo/client/dbclient_connection.cpp2
-rw-r--r--src/mongo/client/dbclient_rs.cpp2
-rw-r--r--src/mongo/client/scanning_replica_set_monitor.cpp4
-rw-r--r--src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp4
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/catalog/create_collection.cpp4
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp4
-rw-r--r--src/mongo/db/catalog/drop_database.cpp2
-rw-r--r--src/mongo/db/catalog/drop_database_test.cpp4
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp2
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp2
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp4
-rw-r--r--src/mongo/db/cloner.cpp4
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp4
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp2
-rw-r--r--src/mongo/db/commands/oplog_note.cpp2
-rw-r--r--src/mongo/db/commands/shutdown_d.cpp4
-rw-r--r--src/mongo/db/error_labels_test.cpp19
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp6
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp2
-rw-r--r--src/mongo/db/mongod_main.cpp4
-rw-r--r--src/mongo/db/op_observer_impl_test.cpp3
-rw-r--r--src/mongo/db/read_concern_mongod.cpp4
-rw-r--r--src/mongo/db/repl/apply_ops.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/primary_only_service.cpp4
-rw-r--r--src/mongo/db/repl/primary_only_service.h2
-rw-r--r--src/mongo/db/repl/primary_only_service_test.cpp8
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp14
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp8
-rw-r--r--src/mongo/db/repl/tenant_oplog_applier.cpp4
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp2
-rw-r--r--src/mongo/db/repl/topology_coordinator.h4
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp2
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp2
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp2
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp4
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util_test.cpp6
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp9
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.h4
-rw-r--r--src/mongo/db/s/transaction_coordinator_service_test.cpp6
-rw-r--r--src/mongo/db/service_entry_point_common.cpp17
-rw-r--r--src/mongo/db/sessions_collection_rs.cpp4
-rw-r--r--src/mongo/db/system_index.cpp2
-rw-r--r--src/mongo/db/transaction_participant.cpp8
-rw-r--r--src/mongo/db/transaction_participant.h2
-rw-r--r--src/mongo/db/transaction_participant_test.cpp20
-rw-r--r--src/mongo/db/write_concern.h2
-rw-r--r--src/mongo/rpc/op_msg_integration_test.cpp14
-rw-r--r--src/mongo/s/async_requests_sender.cpp2
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl.cpp2
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp4
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.cpp6
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager_test.cpp4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp23
-rw-r--r--src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp20
-rw-r--r--src/mongo/s/client/shard_registry.h2
-rw-r--r--src/mongo/s/commands/batch_downconvert.cpp3
-rw-r--r--src/mongo/s/query/async_results_merger.cpp2
-rw-r--r--src/mongo/s/sessions_collection_sharded_test.cpp8
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp4
-rw-r--r--src/mongo/shell/utils.js2
-rw-r--r--src/mongo/util/assert_util_test.cpp29
114 files changed, 411 insertions, 373 deletions
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 62ae4849682..02f357d1486 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -390,7 +390,7 @@ var Cluster = function(options) {
assert(this.isSharded(), 'cluster is not sharded');
// If we are continuously stepping down shards, the config server may have stale view of the
- // cluster, so retry on retryable errors, e.g. NotMaster.
+ // cluster, so retry on retryable errors, e.g. NotWritablePrimary.
if (this.shouldPerformContinuousStepdowns()) {
assert.soon(() => {
try {
@@ -404,7 +404,7 @@ var Cluster = function(options) {
// done.
//
// TODO SERVER-30949: Remove this try catch block once listCollections and
- // listIndexes automatically retry on NotMaster errors.
+ // listIndexes automatically retry on NotWritablePrimary errors.
if (e.code === 18630 || // listCollections failure
e.code === 18631) { // listIndexes failure
print("Caught retryable error from shardCollection, retrying: " +
@@ -515,7 +515,7 @@ var Cluster = function(options) {
// mongos is stale.
//
// TODO SERVER-30949: listCollections through mongos should automatically retry on
- // NotMaster errors. Once that is true, remove this check.
+ // NotWritablePrimary errors. Once that is true, remove this check.
if (isSteppingDownConfigServers && isMongos &&
(dbInfo.name === "admin" || dbInfo.name === "config")) {
return;
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique.js b/jstests/concurrency/fsm_workloads/create_index_background_unique.js
index af074da8965..9f43db15b88 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_unique.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_unique.js
@@ -51,12 +51,12 @@ var $config = (function() {
ErrorCodes.SnapshotUnavailable,
ErrorCodes.SnapshotTooOld,
ErrorCodes.NoMatchingDocument,
- ErrorCodes.NotMaster,
+ ErrorCodes.NotWritablePrimary,
],
[
ErrorCodes.IndexBuildAborted,
ErrorCodes.NoMatchingDocument,
- ErrorCodes.NotMaster,
+ ErrorCodes.NotWritablePrimary,
]);
}
diff --git a/jstests/core/failcommand_failpoint.js b/jstests/core/failcommand_failpoint.js
index e76a72c2c19..9f246dc177e 100644
--- a/jstests/core/failcommand_failpoint.js
+++ b/jstests/core/failcommand_failpoint.js
@@ -34,23 +34,23 @@ assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: "alwaysOn",
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["ping"],
threadName: threadName,
}
}));
-assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotWritablePrimary);
// Configure failCommand again and verify that it still works correctly.
assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: "alwaysOn",
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["ping"],
threadName: threadName,
}
}));
-assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotWritablePrimary);
assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
// Test switching command sets.
@@ -58,17 +58,17 @@ assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: "alwaysOn",
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["ping"],
threadName: threadName,
}
}));
-assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotWritablePrimary);
assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: "alwaysOn",
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["isMaster"],
threadName: threadName,
}
@@ -136,12 +136,12 @@ assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: "alwaysOn",
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["ping"],
threadName: threadName,
}
}));
-assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotWritablePrimary);
assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
// Test that only commands specified in failCommands fail.
@@ -179,14 +179,14 @@ assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: {skip: 2},
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["ping"],
threadName: threadName,
}
}));
assert.commandWorked(testDB.runCommand({ping: 1}));
assert.commandWorked(testDB.runCommand({ping: 1}));
-assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotWritablePrimary);
assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
// Test times when failing with a particular error code.
@@ -194,13 +194,13 @@ assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: {times: 2},
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["ping"],
threadName: threadName,
}
}));
-assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
-assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotWritablePrimary);
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotWritablePrimary);
assert.commandWorked(testDB.runCommand({ping: 1}));
assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
@@ -524,19 +524,19 @@ assert.commandWorked(adminDB.runCommand({
mode: "alwaysOn",
data: {
failCommands: ["ping"],
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
threadName: threadName,
appName: appName,
}
}));
-assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotWritablePrimary);
assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: "alwaysOn",
data: {
failCommands: ["ping"],
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
threadName: threadName,
appName: "made up app name",
}
@@ -557,7 +557,7 @@ assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: {times: 1},
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["insert"],
errorLabels: ["Foo"],
threadName: threadName
@@ -567,7 +567,7 @@ assert.commandWorked(adminDB.runCommand({
res = assert.commandFailedWithCode(
testDB.runCommand(
{insert: "test", documents: [{x: "retryable_write"}], txnNumber: NumberLong(0)}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
// Test that failCommand overrides the error label to "Foo".
assert.eq(res.errorLabels, ["Foo"], res);
@@ -576,7 +576,7 @@ assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: {times: 1},
data: {
- writeConcernError: {code: ErrorCodes.NotMaster, errmsg: "hello"},
+ writeConcernError: {code: ErrorCodes.NotWritablePrimary, errmsg: "hello"},
failCommands: ["insert"],
errorLabels: ["Foo"],
threadName: threadName
@@ -585,7 +585,7 @@ assert.commandWorked(adminDB.runCommand({
// This normally fails with RetryableWriteError label.
res = testDB.runCommand(
{insert: "test", documents: [{x: "retryable_write"}], txnNumber: NumberLong(0)});
-assert.eq(res.writeConcernError, {code: ErrorCodes.NotMaster, errmsg: "hello"});
+assert.eq(res.writeConcernError, {code: ErrorCodes.NotWritablePrimary, errmsg: "hello"});
// Test that failCommand overrides the error label to "Foo".
assert.eq(res.errorLabels, ["Foo"], res);
@@ -594,7 +594,7 @@ assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: {times: 1},
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["insert"],
errorLabels: [],
threadName: threadName
@@ -604,7 +604,7 @@ assert.commandWorked(adminDB.runCommand({
res = assert.commandFailedWithCode(
testDB.runCommand(
{insert: "test", documents: [{x: "retryable_write"}], txnNumber: NumberLong(0)}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
// There should be no errorLabels field if no error labels provided in failCommand.
assert(!res.hasOwnProperty("errorLabels"), res);
@@ -613,7 +613,7 @@ assert.commandWorked(adminDB.runCommand({
configureFailPoint: "failCommand",
mode: {times: 1},
data: {
- writeConcernError: {code: ErrorCodes.NotMaster, errmsg: "hello"},
+ writeConcernError: {code: ErrorCodes.NotWritablePrimary, errmsg: "hello"},
failCommands: ["insert"],
errorLabels: [],
threadName: threadName
@@ -622,7 +622,7 @@ assert.commandWorked(adminDB.runCommand({
// This normally fails with RetryableWriteError label.
res = testDB.runCommand(
{insert: "test", documents: [{x: "retryable_write"}], txnNumber: NumberLong(0)});
-assert.eq(res.writeConcernError, {code: ErrorCodes.NotMaster, errmsg: "hello"});
+assert.eq(res.writeConcernError, {code: ErrorCodes.NotWritablePrimary, errmsg: "hello"});
// There should be no errorLabels field if no error labels provided in failCommand.
assert(!res.hasOwnProperty("errorLabels"), res);
}());
diff --git a/jstests/hooks/run_reconfig_background.js b/jstests/hooks/run_reconfig_background.js
index e03d39306e3..e6be7a7401c 100644
--- a/jstests/hooks/run_reconfig_background.js
+++ b/jstests/hooks/run_reconfig_background.js
@@ -16,7 +16,7 @@ load('jstests/libs/parallelTester.js'); // For Thread.
* Returns true if the error code is transient.
*/
function isIgnorableError(codeName) {
- if (codeName == "NotMaster" || codeName == "InterruptedDueToReplStateChange" ||
+ if (codeName == "NotWritablePrimary" || codeName == "InterruptedDueToReplStateChange" ||
codeName == "PrimarySteppedDown" || codeName === "NodeNotFound" ||
codeName === "ShutdownInProgress") {
return true;
diff --git a/jstests/libs/override_methods/continuous_stepdown.js b/jstests/libs/override_methods/continuous_stepdown.js
index 48e4a79f197..72286c15f05 100644
--- a/jstests/libs/override_methods/continuous_stepdown.js
+++ b/jstests/libs/override_methods/continuous_stepdown.js
@@ -82,7 +82,7 @@ const StepdownThread = function() {
assert.commandWorkedOrFailedWithCode(
primary.adminCommand(
{replSetStepDown: options.stepdownDurationSecs, force: true}),
- [ErrorCodes.NotMaster, ErrorCodes.ConflictingOperationInProgress]);
+ [ErrorCodes.NotWritablePrimary, ErrorCodes.ConflictingOperationInProgress]);
// Wait for primary to get elected and allow the test to make some progress
// before attempting another stepdown.
diff --git a/jstests/libs/override_methods/network_error_and_txn_override.js b/jstests/libs/override_methods/network_error_and_txn_override.js
index 8faa01def6c..e852f9dff11 100644
--- a/jstests/libs/override_methods/network_error_and_txn_override.js
+++ b/jstests/libs/override_methods/network_error_and_txn_override.js
@@ -836,8 +836,8 @@ function shouldRetryWithNetworkErrorOverride(
if (!res.ok) {
if (RetryableWritesUtil.isRetryableCode(res.code)) {
// Don't decrement retries, because the command returned before the connection was
- // closed, so a subsequent attempt will receive a network error (or NotMaster error)
- // and need to retry.
+ // closed, so a subsequent attempt will receive a network error (or NotWritablePrimary
+ // error) and need to retry.
logError("Retrying failed response with retryable code");
return kContinue;
}
diff --git a/jstests/libs/override_methods/validate_collections_on_shutdown.js b/jstests/libs/override_methods/validate_collections_on_shutdown.js
index e3803e2e9c6..a1e56fd1ca8 100644
--- a/jstests/libs/override_methods/validate_collections_on_shutdown.js
+++ b/jstests/libs/override_methods/validate_collections_on_shutdown.js
@@ -67,7 +67,7 @@ MongoRunner.validateCollectionsCallback = function(port) {
conn.adminCommand(
{replSetStepDown: kFreezeTimeSecs, force: true}),
[
- ErrorCodes.NotMaster,
+ ErrorCodes.NotWritablePrimary,
ErrorCodes.NotYetInitialized,
ErrorCodes.Unauthorized,
ErrorCodes.ConflictingOperationInProgress
diff --git a/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js b/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js
index ed76bf2a35d..3699cf45218 100644
--- a/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js
+++ b/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js
@@ -81,7 +81,8 @@ for (let oldVersion of ["last-lts", "last-continuous"]) {
// Since the primary from before the upgrade took place was restarted as part of the
// upgrade/downgrade process, we explicitly reconnect to it so that sending it an update
- // operation silently fails with an unchecked NotMaster error rather than a network error.
+ // operation silently fails with an unchecked NotWritablePrimary error rather than a network
+ // error.
reconnect(oldPrimary.getDB("admin"));
joinFindInsert();
diff --git a/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js b/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js
index d08ac37d586..e5c79d024bd 100644
--- a/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js
+++ b/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js
@@ -63,8 +63,8 @@ function runDowngradeTest(downgradeVersion) {
printjson(rst.status());
// Since the old primary was restarted as part of the downgrade process, we explicitly reconnect
- // to it so that sending it an update operation silently fails with an unchecked NotMaster error
- // rather than a network error.
+ // to it so that sending it an update operation silently fails with an unchecked
+ // NotWritablePrimary error rather than a network error.
reconnect(oldPrimary.getDB("admin"));
joinFindInsert();
rst.stopSet();
diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js
index 4b936ca4d99..c1ac19b3d6e 100644
--- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js
+++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js
@@ -12,8 +12,8 @@ load('./jstests/multiVersion/libs/multi_cluster.js');
// When checking UUID consistency, the shell attempts to run a command on the node it believes is
// primary in each shard. However, this test restarts shards, and the node that is elected primary
// after the restart may be different from the original primary. Since the shell does not retry on
-// NotMaster errors, and whether or not it detects the new primary before issuing the command is
-// nondeterministic, skip the consistency check for this test.
+// NotWritablePrimary errors, and whether or not it detects the new primary before issuing the
+// command is nondeterministic, skip the consistency check for this test.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
const kMinVersion = 5;
diff --git a/jstests/multiVersion/minor_version_upgrade_replset.js b/jstests/multiVersion/minor_version_upgrade_replset.js
index 21676767ebd..7efb48074e2 100644
--- a/jstests/multiVersion/minor_version_upgrade_replset.js
+++ b/jstests/multiVersion/minor_version_upgrade_replset.js
@@ -72,8 +72,8 @@ printjson(rst.status());
sleep(10 * 1000);
// Since the old primary was restarted as part of the upgrade process, we explicitly reconnect to it
-// so that sending it an update operation silently fails with an unchecked NotMaster error rather
-// than a network error.
+// so that sending it an update operation silently fails with an unchecked NotWritablePrimary error
+// rather than a network error.
reconnect(oldPrimary.getDB("admin"));
joinFindInsert();
diff --git a/jstests/noPassthrough/auto_retry_on_network_error.js b/jstests/noPassthrough/auto_retry_on_network_error.js
index fa20b1d61c1..569db1e4dd1 100644
--- a/jstests/noPassthrough/auto_retry_on_network_error.js
+++ b/jstests/noPassthrough/auto_retry_on_network_error.js
@@ -52,7 +52,7 @@ const dbName = "test";
const collName = "auto_retry";
// The override requires the connection to be run under a session. Use the replica set URL to
-// allow automatic re-targeting of the primary on NotMaster errors.
+// allow automatic re-targeting of the primary on NotWritablePrimary errors.
const db = new Mongo(rst.getURL()).startSession({retryWrites: true}).getDatabase(dbName);
// Commands with no disconnections should work as normal.
diff --git a/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js b/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js
index b0716cc748f..1b8134f607d 100644
--- a/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js
+++ b/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js
@@ -1,6 +1,6 @@
/**
* Tests that when $out/$merge is run on a secondary and the primary steps down, the command
- * will fail with a `NotMaster` error.
+ * will fail with a `NotWritablePrimary` error.
*
* @tags: [requires_replication]
*/
@@ -32,8 +32,8 @@ replTest.awaitReplication();
/**
* Given an agg 'writeStage' (an $out or $merge), passed as a string, enables and waits for
* 'failpoint' to be reached by the aggregate containing 'writeStage' running on a secondary and
- * verifies that the aggregate fails with a 'NotMaster' error when the primary is forced to step
- * down.
+ * verifies that the aggregate fails with a 'NotWritablePrimary' error when the primary is forced to
+ * step down.
*/
let runTest = function(writeStage, failpoint) {
let outFn = `
diff --git a/jstests/noPassthrough/replica_set_connection_error_codes.js b/jstests/noPassthrough/replica_set_connection_error_codes.js
index 90c78e8521a..44d40696e77 100644
--- a/jstests/noPassthrough/replica_set_connection_error_codes.js
+++ b/jstests/noPassthrough/replica_set_connection_error_codes.js
@@ -1,6 +1,6 @@
/**
- * Tests that DBClientRS performs re-targeting when it sees an ErrorCodes.NotMaster error response
- * from a command even if "not master" doesn't appear in the message.
+ * Tests that DBClientRS performs re-targeting when it sees an ErrorCodes.NotWritablePrimary error
+ * response from a command even if "not master" doesn't appear in the message.
* @tags: [requires_replication]
*/
(function() {
@@ -49,12 +49,12 @@ const awaitShell = stepDownPrimary(rst);
rst.getPrimary();
rst.awaitNodesAgreeOnPrimary();
-// DBClientRS should discover the current primary eventually and get NotMaster errors in the
-// meantime.
+// DBClientRS should discover the current primary eventually and get NotWritablePrimary errors in
+// the meantime.
assert.soon(() => {
const res = rsConn.getDB("test").runCommand({create: "mycoll"});
if (!res.ok) {
- assert(res.code == ErrorCodes.NotMaster);
+ assert(res.code == ErrorCodes.NotWritablePrimary);
}
return res.ok;
});
diff --git a/jstests/noPassthrough/server_transaction_metrics_secondary.js b/jstests/noPassthrough/server_transaction_metrics_secondary.js
index 5302e0b05c4..9282b19bea0 100644
--- a/jstests/noPassthrough/server_transaction_metrics_secondary.js
+++ b/jstests/noPassthrough/server_transaction_metrics_secondary.js
@@ -48,7 +48,8 @@ assert.eq(0, metrics.totalCommitted);
assert.eq(0, metrics.totalStarted);
jsTestLog("Run transaction statement.");
-assert.eq(assert.throws(() => secDb[collName].findOne({_id: 0})).code, ErrorCodes.NotMaster);
+assert.eq(assert.throws(() => secDb[collName].findOne({_id: 0})).code,
+ ErrorCodes.NotWritablePrimary);
// The metrics are not affected.
metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
@@ -61,7 +62,8 @@ assert.eq(0, metrics.totalCommitted);
assert.eq(0, metrics.totalStarted);
jsTestLog("Abort the transaction.");
-assert.commandFailedWithCode(secondarySession.abortTransaction_forTesting(), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(secondarySession.abortTransaction_forTesting(),
+ ErrorCodes.NotWritablePrimary);
// The metrics are not affected.
metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
diff --git a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js b/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
index 65ec2a0f9a0..b1827e8f922 100644
--- a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
+++ b/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
@@ -3,8 +3,8 @@
* TransientTransactionError and the error is reported at the top level, not in a writeErrors array.
*
* Other transient transaction errors are tested elsewhere: WriteConflict is tested in
- * transactions_write_conflicts.js, NotMaster is tested in transient_txn_error_labels.js, and
- * NoSuchTransaction is tested in transient_txn_error_labels_with_write_concern.js.
+ * transactions_write_conflicts.js, NotWritablePrimary is tested in transient_txn_error_labels.js,
+ * and NoSuchTransaction is tested in transient_txn_error_labels_with_write_concern.js.
*
* @tags: [uses_transactions]
*/
diff --git a/jstests/noPassthrough/txn_override_causal_consistency.js b/jstests/noPassthrough/txn_override_causal_consistency.js
index 7b5176ec56b..4f5f312f0b2 100644
--- a/jstests/noPassthrough/txn_override_causal_consistency.js
+++ b/jstests/noPassthrough/txn_override_causal_consistency.js
@@ -194,7 +194,7 @@ function runTest() {
testCommit(conn, isCausal, true /*expectRetry*/);
// Network error on commit attempt.
- mockFirstCommitResponse = {ok: 0, code: ErrorCodes.NotMaster};
+ mockFirstCommitResponse = {ok: 0, code: ErrorCodes.NotWritablePrimary};
testCommit(conn, isCausal, true /*expectRetry*/);
}
}
diff --git a/jstests/replsets/catchup_takeover_two_nodes_ahead.js b/jstests/replsets/catchup_takeover_two_nodes_ahead.js
index f176e8ffdc7..ae39a89c033 100644
--- a/jstests/replsets/catchup_takeover_two_nodes_ahead.js
+++ b/jstests/replsets/catchup_takeover_two_nodes_ahead.js
@@ -52,9 +52,9 @@ assert.eq(ReplSetTest.State.PRIMARY,
jsTestLog('node 2 is now primary, but cannot accept writes');
// Make sure that node 2 cannot write anything. Because it is lagged and replication
-// has been stopped, it shouldn't be able to become master.
+// has been stopped, it shouldn't be able to become primary.
assert.commandFailedWithCode(nodes[2].getDB(name).bar.insert({z: 100}, writeConcern),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
// Confirm that the most up-to-date node becomes primary after the default catchup delay.
replSet.waitForState(0, ReplSetTest.State.PRIMARY, 60 * 1000);
diff --git a/jstests/replsets/emptycapped.js b/jstests/replsets/emptycapped.js
index ca389275513..bed8e0ea7eb 100644
--- a/jstests/replsets/emptycapped.js
+++ b/jstests/replsets/emptycapped.js
@@ -40,7 +40,7 @@ assert.eq(primaryTestDB.capped.find().itcount(),
// Truncate a capped collection on a secondary.
assert.commandFailedWithCode(secondaryTestDB.runCommand({emptycapped: 'capped'}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
// Truncate the oplog.
assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "oplog.rs"}),
diff --git a/jstests/replsets/libs/rollback_test.js b/jstests/replsets/libs/rollback_test.js
index b776d58f750..4e3a3c2690a 100644
--- a/jstests/replsets/libs/rollback_test.js
+++ b/jstests/replsets/libs/rollback_test.js
@@ -489,7 +489,7 @@ function RollbackTest(name = "RollbackTest", replSet) {
// the rest of the replica set, so it physically can't become the primary.
assert.soon(() => {
const res = curPrimary.adminCommand({replSetStepDown: 1, force: true});
- return (res.ok || res.code === ErrorCodes.NotMaster);
+ return (res.ok || res.code === ErrorCodes.NotWritablePrimary);
});
} catch (e) {
// Stepdown may fail if the node has already started stepping down.
diff --git a/jstests/replsets/linearizable_read_concern.js b/jstests/replsets/linearizable_read_concern.js
index 58f4f633a6c..657320f0275 100644
--- a/jstests/replsets/linearizable_read_concern.js
+++ b/jstests/replsets/linearizable_read_concern.js
@@ -66,7 +66,7 @@ var badCmd = assert.commandFailed(secondaries[0].getDB("test").runCommand(
{"find": "foo", readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
assert.eq(badCmd.errmsg, "cannot satisfy linearizable read concern on non-primary node");
-assert.eq(badCmd.code, ErrorCodes.NotMaster);
+assert.eq(badCmd.code, ErrorCodes.NotWritablePrimary);
// This fails because you cannot specify 'afterOpTime' for linearizable read.
var opTimeCmd = assert.commandFailed(primary.getDB("test").runCommand({
diff --git a/jstests/replsets/notmaster_errors_return_topology_version.js b/jstests/replsets/notmaster_errors_return_topology_version.js
index 54943fc3998..45561adf2f5 100644
--- a/jstests/replsets/notmaster_errors_return_topology_version.js
+++ b/jstests/replsets/notmaster_errors_return_topology_version.js
@@ -1,5 +1,5 @@
/**
- * This tests that NotMaster errors include a TopologyVersion field. (Note that shutdown errors
+ * This tests that NotPrimary errors include a TopologyVersion field. (Note that shutdown errors
* include a TopologyVersion field when the server is in quiesce mode. This is tested in
* quiesce_mode.js)
*/
@@ -12,13 +12,13 @@ rst.startSet();
rst.initiate();
const dbName = "test";
-const collName = "notmaster_errors_return_topology_version";
+const collName = "notprimary_errors_return_topology_version";
const primary = rst.getPrimary();
const primaryDB = primary.getDB(dbName);
-const notMasterErrorCodes = [
+const notPrimaryErrorCodes = [
ErrorCodes.InterruptedDueToReplStateChange,
- ErrorCodes.NotMaster,
+ ErrorCodes.NotWritablePrimary,
ErrorCodes.NotMasterNoSlaveOk,
ErrorCodes.NotMasterOrSecondary,
ErrorCodes.PrimarySteppedDown
@@ -53,8 +53,8 @@ function runFailInCommandDispatch(errorCode, isWCError) {
const res = primaryDB.runCommand({insert: collName, documents: [{x: 1}]});
assert.commandFailedWithCode(res, errorCode);
- // Only NotMaster errors should return TopologyVersion in the response.
- if (notMasterErrorCodes.includes(errorCode)) {
+ // Only NotPrimary errors should return TopologyVersion in the response.
+ if (notPrimaryErrorCodes.includes(errorCode)) {
assert(res.hasOwnProperty("topologyVersion"), tojson(res));
} else {
assert(!res.hasOwnProperty("topologyVersion"), tojson(res));
@@ -76,8 +76,8 @@ function runFailInRunCommand(errorCode) {
const res = primaryDB.runCommand({insert: collName, documents: [{x: 1}]});
assert.commandFailedWithCode(res, errorCode);
- // Only NotMaster errors should return TopologyVersion in the response.
- if (notMasterErrorCodes.includes(errorCode)) {
+ // Only NotPrimary errors should return TopologyVersion in the response.
+ if (notPrimaryErrorCodes.includes(errorCode)) {
assert(res.hasOwnProperty("topologyVersion"), tojson(res));
} else {
assert(!res.hasOwnProperty("topologyVersion"), tojson(res));
@@ -88,18 +88,18 @@ function runFailInRunCommand(errorCode) {
primary.adminCommand({configureFailPoint: "failWithErrorCodeInRunCommand", mode: "off"}));
}
-notMasterErrorCodes.forEach(function(code) {
+notPrimaryErrorCodes.forEach(function(code) {
runFailInCommandDispatch(code, true /* isWCError */);
runFailInCommandDispatch(code, false /* isWCError */);
});
-// Test that errors that are not NotMaster errors will not return a TopologyVersion.
+// Test that errors that are not NotPrimary errors will not return a TopologyVersion.
otherErrorCodes.forEach(function(code) {
runFailInCommandDispatch(code, true /* isWCError */);
runFailInCommandDispatch(code, false /* isWCError */);
});
-notMasterErrorCodes.forEach(function(code) {
+notPrimaryErrorCodes.forEach(function(code) {
runFailInRunCommand(code);
});
otherErrorCodes.forEach(function(code) {
diff --git a/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js b/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js
index 101c12252a0..a1e4561edd7 100644
--- a/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js
+++ b/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js
@@ -46,7 +46,7 @@ jsTestLog("Test that prepare fails on a secondary");
const txnNumber = NumberLong(priSession.getTxnNumber_forTesting());
assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand(
{prepareTransaction: 1, txnNumber: txnNumber, autocommit: false}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
const prepareTimestamp = PrepareHelpers.prepareTransaction(priSession);
rst.awaitReplication();
@@ -60,12 +60,12 @@ assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand({
txnNumber: txnNumber,
autocommit: false
}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
jsTestLog("Test that prepared abort fails on a secondary");
assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand(
{abortTransaction: 1, txnNumber: txnNumber, autocommit: false}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
jsTestLog("Test that we can still commit the transaction");
assert.commandWorked(PrepareHelpers.commitTransaction(priSession, commitTimestamp));
diff --git a/jstests/replsets/retryable_commit_transaction_after_failover.js b/jstests/replsets/retryable_commit_transaction_after_failover.js
index e9c81e6b81c..d5e9efacc60 100644
--- a/jstests/replsets/retryable_commit_transaction_after_failover.js
+++ b/jstests/replsets/retryable_commit_transaction_after_failover.js
@@ -73,7 +73,7 @@ rst.stepUp(oldSecondary);
assert.eq(oldSecondary, rst.getPrimary());
// Reconnect the connection to the new primary.
sessionDb.getMongo()._markNodeAsFailed(
- oldPrimary.host, ErrorCodes.NotMaster, "Notice that primary is not master");
+ oldPrimary.host, ErrorCodes.NotWritablePrimary, "Notice that primary is not master");
reconnect(sessionDb);
jsTest.log("commitTransaction command is retryable after failover");
diff --git a/jstests/replsets/step_down_on_secondary.js b/jstests/replsets/step_down_on_secondary.js
index 9bef6a2de10..64fcf73c3a2 100644
--- a/jstests/replsets/step_down_on_secondary.js
+++ b/jstests/replsets/step_down_on_secondary.js
@@ -47,7 +47,7 @@ const joinStepDownThread = startParallelShell(() => {
const freezeSecs = 24 * 60 * 60; // 24 hours
assert.commandFailedWithCode(db.adminCommand({"replSetStepDown": freezeSecs, "force": true}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
}, primary.port);
waitForCurOpByFailPointNoNS(primaryDB, "stepdownHangBeforeRSTLEnqueue");
diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js
index 7ce17186af4..318d9b79fb0 100644
--- a/jstests/replsets/stepdown.js
+++ b/jstests/replsets/stepdown.js
@@ -174,7 +174,7 @@ try {
jsTestLog('Do stepdown of primary ' + master + ' that should not work');
assert.commandFailedWithCode(master.getDB("admin").runCommand(
{replSetStepDown: ReplSetTest.kDefaultTimeoutMS, force: true}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
// Check that the 'total' and 'failed' fields of 'replSetStepDown' and
// 'replSetStepDownWithForce' have been incremented in serverStatus.
diff --git a/jstests/replsets/stepdown_killop.js b/jstests/replsets/stepdown_killop.js
index e3446a5cdb5..a280e13e6b9 100644
--- a/jstests/replsets/stepdown_killop.js
+++ b/jstests/replsets/stepdown_killop.js
@@ -2,7 +2,8 @@
// 1. Start up a 3 node set (1 arbiter).
// 2. Stop replication on the SECONDARY using a fail point.
// 3. Do one write and then spin up a second shell which asks the PRIMARY to StepDown.
-// 4. Once StepDown has begun, attempt to do writes and confirm that they fail with NotMaster.
+// 4. Once StepDown has begun, attempt to do writes and confirm that they fail with
+// NotWritablePrimary.
// 5. Kill the stepDown operation.
// 6. Writes should become allowed again and the primary should stay primary.
@@ -59,13 +60,14 @@ assert.soon(function() {
return false;
}, "No pending stepdown command found");
-jsTestLog("Ensure that writes start failing with NotMaster errors");
+jsTestLog("Ensure that writes start failing with NotWritablePrimary errors");
assert.soonNoExcept(function() {
- assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
+ assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}),
+ ErrorCodes.NotWritablePrimary);
return true;
});
-jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
+jsTestLog("Ensure that even though writes are failing with NotWritablePrimary, we still report " +
"ourselves as PRIMARY");
assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
diff --git a/jstests/replsets/stepdown_long_wait_time.js b/jstests/replsets/stepdown_long_wait_time.js
index 6ece19ba068..b2f8ed89483 100644
--- a/jstests/replsets/stepdown_long_wait_time.js
+++ b/jstests/replsets/stepdown_long_wait_time.js
@@ -2,7 +2,7 @@
// 1. Start up a 3 node set (1 arbiter).
// 2. Stop replication on the SECONDARY using a fail point.
// 3. Do one write and then spin up a second shell which asks the PRIMARY to StepDown.
-// 4. Once StepDown has begun, try to do a write and ensure that it fails with NotMaster
+// 4. Once StepDown has begun, try to do a write and ensure that it fails with NotWritablePrimary
// 5. Restart replication on the SECONDARY.
// 6. Wait for PRIMARY to StepDown.
@@ -52,13 +52,14 @@ assert.soon(function() {
return false;
}, "No pending stepdown command found");
-jsTestLog("Ensure that writes start failing with NotMaster errors");
+jsTestLog("Ensure that writes start failing with NotWritablePrimary errors");
assert.soonNoExcept(function() {
- assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
+ assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}),
+ ErrorCodes.NotWritablePrimary);
return true;
});
-jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
+jsTestLog("Ensure that even though writes are failing with NotWritablePrimary, we still report " +
"ourselves as PRIMARY");
assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
diff --git a/jstests/replsets/transactions_on_secondaries_not_allowed.js b/jstests/replsets/transactions_on_secondaries_not_allowed.js
index 59784afe1f3..8826fff666c 100644
--- a/jstests/replsets/transactions_on_secondaries_not_allowed.js
+++ b/jstests/replsets/transactions_on_secondaries_not_allowed.js
@@ -49,11 +49,11 @@ jsTestLog("Start a read-only transaction on the secondary.");
session.startTransaction({readConcern: {level: "snapshot"}});
// Try to read a document (the first statement in the transaction) and verify that this fails.
-assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotWritablePrimary);
-// The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
+// The check for "NotWritablePrimary" supercedes the check for "NoSuchTransaction" in this case.
jsTestLog("Make sure we are not allowed to run the commitTransaction command on the secondary.");
-assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NotWritablePrimary);
/**
* Test starting a transaction and issuing an abortTransaction command.
@@ -63,18 +63,18 @@ jsTestLog("Start a different read-only transaction on the secondary.");
session.startTransaction({readConcern: {level: "snapshot"}});
// Try to read a document (the first statement in the transaction) and verify that this fails.
-assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotWritablePrimary);
-// The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
+// The check for "NotWritablePrimary" supercedes the check for "NoSuchTransaction" in this case.
jsTestLog("Make sure we are not allowed to run the abortTransaction command on the secondary.");
-assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NotWritablePrimary);
/**
* Test starting a retryable write.
*/
jsTestLog("Start a retryable write");
-assert.commandFailedWithCode(sessionDb.foo.insert({_id: 0}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(sessionDb.foo.insert({_id: 0}), ErrorCodes.NotWritablePrimary);
/**
* Test starting a read with txnNumber, but without autocommit. This fails in general because
diff --git a/jstests/replsets/transactions_only_allowed_on_primaries.js b/jstests/replsets/transactions_only_allowed_on_primaries.js
index 910c37740ca..7b71cf3eb67 100644
--- a/jstests/replsets/transactions_only_allowed_on_primaries.js
+++ b/jstests/replsets/transactions_only_allowed_on_primaries.js
@@ -61,7 +61,8 @@ function testCommands(session, commands, expectedErrorCode, readPref) {
// Call abort for good measure, even though the transaction should have already been
// aborted on the server.
- assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NotMaster);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NotWritablePrimary);
}
}
@@ -85,17 +86,17 @@ let readCommands = [
jsTestLog("Testing read commands.");
// Make sure read commands can not start transactions with any supported read preference.
-testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondary");
-testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondaryPreferred");
-testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "primaryPreferred");
-testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, null);
+testCommands(secondarySession, readCommands, ErrorCodes.NotWritablePrimary, "secondary");
+testCommands(secondarySession, readCommands, ErrorCodes.NotWritablePrimary, "secondaryPreferred");
+testCommands(secondarySession, readCommands, ErrorCodes.NotWritablePrimary, "primaryPreferred");
+testCommands(secondarySession, readCommands, ErrorCodes.NotWritablePrimary, null);
// Test one write command. Normal write commands should already be
// disallowed on secondaries so we don't test them exhaustively here.
let writeCommands = [{insert: collName, documents: [{_id: 0}]}];
jsTestLog("Testing write commands.");
-testCommands(secondarySession, writeCommands, ErrorCodes.NotMaster, "secondary");
+testCommands(secondarySession, writeCommands, ErrorCodes.NotWritablePrimary, "secondary");
secondarySession.endSession();
diff --git a/jstests/replsets/txn_override_unittests.js b/jstests/replsets/txn_override_unittests.js
index 8abc243b821..7c51d6a5284 100644
--- a/jstests/replsets/txn_override_unittests.js
+++ b/jstests/replsets/txn_override_unittests.js
@@ -339,28 +339,28 @@ const retryOnNetworkErrorTests = [
}
},
{
- name: "retry on NotMaster",
+ name: "retry on NotWritablePrimary",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
}
},
{
- name: "retry on NotMaster ordered",
+ name: "retry on NotWritablePrimary ordered",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandFailed(
testDB.runCommand({insert: collName1, documents: [{_id: 2}], ordered: true}));
}
},
{
- name: "retry on NotMaster with object change",
+ name: "retry on NotWritablePrimary with object change",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
let obj1 = {_id: 1, x: 5};
let obj2 = {_id: 2, x: 5};
assert.commandWorked(coll1.insert(obj1));
@@ -375,7 +375,7 @@ const retryOnNetworkErrorTests = [
name: "implicit collection creation with stepdown",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -386,9 +386,10 @@ const retryOnNetworkErrorTests = [
name: "implicit collection creation with WriteConcernError",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["insert"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ failCommandWithFailPoint(["insert"], {
+ writeConcernError:
+ {code: ErrorCodes.NotWritablePrimary, codeName: "NotWritablePrimary"}
+ });
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -399,8 +400,11 @@ const retryOnNetworkErrorTests = [
name: "implicit collection creation with WriteConcernError and normal stepdown error",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun(
- "insert", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
+ failCommandWithErrorAndWCENoRun("insert",
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary",
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -413,8 +417,8 @@ const retryOnNetworkErrorTests = [
failCommandWithErrorAndWCENoRun("insert",
ErrorCodes.OperationFailed,
"OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -443,7 +447,7 @@ const retryOnNetworkErrorTests = [
name: "implicit collection creation with WriteConcernError no success",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("insert", ErrorCodes.NotMaster, "NotMaster");
+ failCommandWithWCENoRun("insert", ErrorCodes.NotWritablePrimary, "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -454,7 +458,7 @@ const retryOnNetworkErrorTests = [
name: "update with stepdown",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
@@ -487,7 +491,7 @@ const retryOnNetworkErrorTests = [
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
failCommandWithFailPoint(["update"],
- {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
+ {errorCode: ErrorCodes.NotWritablePrimary, mode: {times: 2}});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
@@ -500,10 +504,10 @@ const retryOnNetworkErrorTests = [
name: "update with chained stepdown errors",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
// Chain multiple update errors together.
attachPostCmdFunction("update", function() {
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
@@ -533,7 +537,7 @@ const retryOnNetworkErrorTests = [
const session = testDB.getSession();
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
@@ -541,7 +545,7 @@ const retryOnNetworkErrorTests = [
session.startTransaction();
assert.commandFailedWithCode(
testDB.runCommand({update: collName1, updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}]}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
assert.commandFailedWithCode(session.abortTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
@@ -577,7 +581,8 @@ const retryOnNetworkErrorTests = [
const session = testDB.getSession();
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NotWritablePrimary});
session.startTransaction();
assert.commandWorked(coll1.insert({_id: 1}));
@@ -658,7 +663,8 @@ const retryOnNetworkErrorTests = [
const session = testDB.getSession();
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["abortTransaction"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["abortTransaction"],
+ {errorCode: ErrorCodes.NotWritablePrimary});
session.startTransaction();
assert.commandWorked(coll1.insert({_id: 1}));
@@ -743,7 +749,7 @@ const retryOnNetworkErrorTests = [
setCommandMockResponse("createIndexes", {
ok: 0,
raw: {
- shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
+ shardOne: {code: ErrorCodes.NotWritablePrimary, errmsg: "dummy"},
shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
}
});
@@ -767,7 +773,7 @@ const retryOnNetworkErrorTests = [
raw: {
// Raw responses only omit a top-level code if more than one error was
// returned from a shard, so a third shard is needed.
- shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
+ shardOne: {code: ErrorCodes.NotWritablePrimary, errmsg: "dummy"},
shardTwo: {ok: 1},
shardThree: {code: ErrorCodes.InternalError, errmsg: "dummy"},
}
@@ -1067,7 +1073,7 @@ const txnOverrideTests = [
name: "update with stepdown",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
@@ -1116,7 +1122,7 @@ const txnOverrideTests = [
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
failCommandWithFailPoint(["update"],
- {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
+ {errorCode: ErrorCodes.NotWritablePrimary, mode: {times: 2}});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
@@ -1132,10 +1138,10 @@ const txnOverrideTests = [
name: "update with chained stepdown errors",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
// Chain multiple update errors together.
attachPostCmdFunction("update", function() {
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
@@ -1172,7 +1178,8 @@ const txnOverrideTests = [
name: "commit transaction with stepdown",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
assert.throws(() => endCurrentTransactionIfOpen());
@@ -1182,9 +1189,10 @@ const txnOverrideTests = [
name: "commit transaction with WriteConcernError",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["commitTransaction"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ failCommandWithFailPoint(["commitTransaction"], {
+ writeConcernError:
+ {code: ErrorCodes.NotWritablePrimary, codeName: "NotWritablePrimary"}
+ });
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
assert.throws(() => endCurrentTransactionIfOpen());
@@ -1195,10 +1203,10 @@ const txnOverrideTests = [
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NotMaster,
- "NotMaster",
- ErrorCodes.NotMaster,
- "NotMaster");
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary",
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
assert.throws(() => endCurrentTransactionIfOpen());
@@ -1211,8 +1219,8 @@ const txnOverrideTests = [
failCommandWithErrorAndWCENoRun("commitTransaction",
ErrorCodes.OperationFailed,
"OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
assert.throws(() => endCurrentTransactionIfOpen());
@@ -1236,8 +1244,8 @@ const txnOverrideTests = [
failCommandWithErrorAndWCENoRun("commitTransaction",
ErrorCodes.NoSuchTransaction,
"NoSuchTransaction",
- ErrorCodes.NotMaster,
- "NotMaster");
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
assert.throws(() => endCurrentTransactionIfOpen());
@@ -1270,7 +1278,8 @@ const txnOverrideTests = [
name: "commit transaction with WriteConcernError no success",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
+ failCommandWithWCENoRun(
+ "commitTransaction", ErrorCodes.NotWritablePrimary, "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
assert.throws(() => endCurrentTransactionIfOpen());
@@ -1334,10 +1343,10 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
failCommandWithErrorAndWCENoRun("drop",
ErrorCodes.NamespaceNotFound,
"NamespaceNotFound",
- ErrorCodes.NotMaster,
- "NotMaster");
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
coll1.drop();
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({a: 2, b: {c: 7, d: "d is good"}}));
const cursor = coll1.find({
@@ -1369,10 +1378,10 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
}
},
{
- name: "retry on NotMaster",
+ name: "retry on NotWritablePrimary",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -1381,10 +1390,10 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
}
},
{
- name: "retry on NotMaster with object change",
+ name: "retry on NotWritablePrimary with object change",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
let obj1 = {_id: 1, x: 5};
let obj2 = {_id: 2, x: 5};
assert.commandWorked(coll1.insert(obj1));
@@ -1402,7 +1411,7 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
name: "update with stepdown",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
@@ -1455,7 +1464,7 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
failCommandWithFailPoint(["update"],
- {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
+ {errorCode: ErrorCodes.NotWritablePrimary, mode: {times: 2}});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
@@ -1471,10 +1480,10 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
name: "update with chained stepdown errors",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
// Chain multiple update errors together.
attachPostCmdFunction("update", function() {
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotWritablePrimary});
});
assert.commandWorked(coll1.insert({_id: 1}));
assert.eq(coll1.find().toArray(), [{_id: 1}]);
@@ -1491,7 +1500,8 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
name: "commit transaction with stepdown",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NotWritablePrimary});
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -1506,9 +1516,10 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
name: "commit transaction with WriteConcernError",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["commitTransaction"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ failCommandWithFailPoint(["commitTransaction"], {
+ writeConcernError:
+ {code: ErrorCodes.NotWritablePrimary, codeName: "NotWritablePrimary"}
+ });
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -1524,10 +1535,10 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NotMaster,
- "NotMaster",
- ErrorCodes.NotMaster,
- "NotMaster");
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary",
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -1545,8 +1556,8 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
failCommandWithErrorAndWCENoRun("commitTransaction",
ErrorCodes.OperationFailed,
"OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -1563,8 +1574,8 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
failCommandWithErrorAndWCENoRun("commitTransaction",
ErrorCodes.OperationFailed,
"OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
// After commitTransaction fails, fail it again with just the ordinary error.
attachPostCmdFunction("commitTransaction", function() {
failCommandWithFailPoint(["commitTransaction"],
@@ -1597,8 +1608,8 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
failCommandWithErrorAndWCENoRun("commitTransaction",
ErrorCodes.NoSuchTransaction,
"NoSuchTransaction",
- ErrorCodes.NotMaster,
- "NotMaster");
+ ErrorCodes.NotWritablePrimary,
+ "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
@@ -1643,7 +1654,8 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
name: "commit transaction with WriteConcernError no success",
test: function() {
assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
+ failCommandWithWCENoRun(
+ "commitTransaction", ErrorCodes.NotWritablePrimary, "NotWritablePrimary");
assert.commandWorked(coll1.insert({_id: 1}));
assert.commandWorked(coll2.insert({_id: 1}));
assert.eq(coll1.find().itcount(), 1);
diff --git a/jstests/replsets/validate_fails_during_rollback.js b/jstests/replsets/validate_fails_during_rollback.js
index 8c67d33fd1a..5d8b8aff7df 100644
--- a/jstests/replsets/validate_fails_during_rollback.js
+++ b/jstests/replsets/validate_fails_during_rollback.js
@@ -24,7 +24,8 @@ rollbackTest.transitionToSyncSourceOperationsDuringRollback();
// Wait for rollback to hang.
checkLog.contains(rollbackNode, "rollbackHangAfterTransitionToRollback fail point enabled.");
-// Try to run the validate command on the rollback node. This should fail with a NotMaster error.
+// Try to run the validate command on the rollback node. This should fail with a
+// NotMasterOrSecondary error.
assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand({"validate": collName}),
ErrorCodes.NotMasterOrSecondary);
diff --git a/jstests/sharding/change_stream_error_label.js b/jstests/sharding/change_stream_error_label.js
index db3dec9a60e..a869f9539be 100644
--- a/jstests/sharding/change_stream_error_label.js
+++ b/jstests/sharding/change_stream_error_label.js
@@ -32,7 +32,7 @@ const expectedStopShardErrors = [
ErrorCodes.SocketException,
ErrorCodes.ShutdownInProgress,
ErrorCodes.PrimarySteppedDown,
- ErrorCodes.NotMaster,
+ ErrorCodes.NotWritablePrimary,
ErrorCodes.InterruptedAtShutdown,
ErrorCodes.InterruptedDueToReplStateChange,
ErrorCodes.NotMasterNoSlaveOk,
diff --git a/jstests/sharding/linearizable_read_concern.js b/jstests/sharding/linearizable_read_concern.js
index d83362a898c..0ac630639a4 100644
--- a/jstests/sharding/linearizable_read_concern.js
+++ b/jstests/sharding/linearizable_read_concern.js
@@ -85,7 +85,7 @@ var res = assert.commandFailed(testDB.runReadCommand({
readConcern: {level: "linearizable"},
maxTimeMS: shard0ReplTest.kDefaultTimeoutMS
}));
-assert.eq(res.code, ErrorCodes.NotMaster);
+assert.eq(res.code, ErrorCodes.NotWritablePrimary);
jsTestLog("Testing linearizable read from primaries.");
diff --git a/jstests/sharding/migration_coordinator_failover_include.js b/jstests/sharding/migration_coordinator_failover_include.js
index b6abbd1ce1e..faeb33b0ad9 100644
--- a/jstests/sharding/migration_coordinator_failover_include.js
+++ b/jstests/sharding/migration_coordinator_failover_include.js
@@ -22,7 +22,7 @@ function runMoveChunkMakeDonorStepDownAfterFailpoint(st,
expectAbortDecisionWithCode + "; ns is " + ns);
// Wait for mongos to see a primary node on the primary shard, because mongos does not retry
- // writes on NotMaster errors, and we are about to insert docs through mongos.
+ // writes on NotPrimary errors, and we are about to insert docs through mongos.
awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true});
// Insert some docs into the collection so that the migration leaves orphans on either the
@@ -99,4 +99,4 @@ function runMoveChunkMakeDonorStepDownAfterFailpoint(st,
assert.commandWorked(st.configRS.getPrimary().adminCommand(
{configureFailPoint: "migrationCommitVersionError", mode: "off"}));
}
-} \ No newline at end of file
+}
diff --git a/jstests/sharding/read_write_concern_defaults_commands_api.js b/jstests/sharding/read_write_concern_defaults_commands_api.js
index 9575870733e..2e433244f2d 100644
--- a/jstests/sharding/read_write_concern_defaults_commands_api.js
+++ b/jstests/sharding/read_write_concern_defaults_commands_api.js
@@ -324,7 +324,7 @@ jsTestLog("Testing standalone replica set...");
assert.commandFailedWithCode(
rst.getSecondary().adminCommand(
{setDefaultRWConcern: 1, defaultReadConcern: {level: "local"}}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
rst.stopSet();
}
@@ -347,7 +347,7 @@ jsTestLog("Testing sharded cluster...");
assert.commandFailedWithCode(
st.rs0.getSecondary().adminCommand(
{setDefaultRWConcern: 1, defaultReadConcern: {level: "local"}}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
// Config server primary succeeds.
verifyDefaultRWCommandsValidInput(st.configRS.getPrimary());
@@ -359,7 +359,7 @@ jsTestLog("Testing sharded cluster...");
assert.commandFailedWithCode(
st.configRS.getSecondary().adminCommand(
{setDefaultRWConcern: 1, defaultReadConcern: {level: "local"}}),
- ErrorCodes.NotMaster);
+ ErrorCodes.NotWritablePrimary);
st.stop();
}
diff --git a/jstests/sharding/retryable_write_error_labels.js b/jstests/sharding/retryable_write_error_labels.js
index 05027284f3d..6795e6906fb 100644
--- a/jstests/sharding/retryable_write_error_labels.js
+++ b/jstests/sharding/retryable_write_error_labels.js
@@ -124,7 +124,7 @@ function runTest(errorCode, isWCError) {
const retryableCodes = [
ErrorCodes.InterruptedAtShutdown,
ErrorCodes.InterruptedDueToReplStateChange,
- ErrorCodes.NotMaster,
+ ErrorCodes.NotWritablePrimary,
ErrorCodes.NotMasterNoSlaveOk,
ErrorCodes.NotMasterOrSecondary,
ErrorCodes.PrimarySteppedDown,
diff --git a/jstests/sharding/transient_txn_error_labels.js b/jstests/sharding/transient_txn_error_labels.js
index 36715f301f7..0962862e891 100644
--- a/jstests/sharding/transient_txn_error_labels.js
+++ b/jstests/sharding/transient_txn_error_labels.js
@@ -46,7 +46,7 @@ let res = secondarySessionDb.runCommand({
startTransaction: true,
autocommit: false
});
-assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert.commandFailedWithCode(res, ErrorCodes.NotWritablePrimary);
assert.eq(res.errorLabels, ["TransientTransactionError"]);
jsTest.log("failCommand with errorLabels but without errorCode or writeConcernError should not " +
@@ -66,7 +66,7 @@ res = secondarySessionDb.runCommand({
startTransaction: true,
autocommit: false
});
-assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert.commandFailedWithCode(res, ErrorCodes.NotWritablePrimary);
// Server should continue to return TransientTransactionError label.
assert.eq(res.errorLabels, ["TransientTransactionError"]);
assert.commandWorked(secondary.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
@@ -77,7 +77,7 @@ txnNumber++;
res = secondarySessionDb.runCommand(
{insert: collName, documents: [{_id: "insert-1"}], txnNumber: NumberLong(txnNumber)});
-assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert.commandFailedWithCode(res, ErrorCodes.NotWritablePrimary);
assert.eq(res.errorLabels, ["RetryableWriteError"], res);
secondarySession.endSession();
@@ -116,24 +116,25 @@ assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
assert.eq(res.errorLabels, ["TransientTransactionError"]);
assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-jsTest.log("NotMaster returned by commitTransaction command is not TransientTransactionError but" +
- " RetryableWriteError");
+jsTest.log(
+ "NotWritablePrimary returned by commitTransaction command is not TransientTransactionError but" +
+ " RetryableWriteError");
// commitTransaction will attempt to perform a noop write in response to a NoSuchTransaction
-// error and non-empty writeConcern. This will throw NotMaster.
+// error and non-empty writeConcern. This will throw NotWritablePrimary.
res = secondarySessionDb.adminCommand({
commitTransaction: 1,
txnNumber: NumberLong(secondarySession.getTxnNumber_forTesting() + 1),
autocommit: false,
writeConcern: {w: "majority"}
});
-assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert.commandFailedWithCode(res, ErrorCodes.NotWritablePrimary);
assert.eq(res.errorLabels, ["RetryableWriteError"], res);
jsTest.log(
- "NotMaster returned by coordinateCommitTransaction command is not TransientTransactionError" +
+ "NotWritablePrimary returned by coordinateCommitTransaction command is not TransientTransactionError" +
" but RetryableWriteError");
// coordinateCommitTransaction will attempt to perform a noop write in response to a
-// NoSuchTransaction error and non-empty writeConcern. This will throw NotMaster.
+// NoSuchTransaction error and non-empty writeConcern. This will throw NotWritablePrimary.
res = secondarySessionDb.adminCommand({
coordinateCommitTransaction: 1,
participants: [],
@@ -141,7 +142,7 @@ res = secondarySessionDb.adminCommand({
autocommit: false,
writeConcern: {w: "majority"}
});
-assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert.commandFailedWithCode(res, ErrorCodes.NotWritablePrimary);
assert.eq(res.errorLabels, ["RetryableWriteError"], res);
jsTest.log("ShutdownInProgress returned by write commands is TransientTransactionError");
diff --git a/jstests/sharding/txn_two_phase_commit_basic.js b/jstests/sharding/txn_two_phase_commit_basic.js
index 96217023565..87fa9a394b8 100644
--- a/jstests/sharding/txn_two_phase_commit_basic.js
+++ b/jstests/sharding/txn_two_phase_commit_basic.js
@@ -84,7 +84,7 @@ const startSimulatingNetworkFailures = function(connArray) {
configureFailPoint: "failCommand",
mode: {times: 10},
data: {
- errorCode: ErrorCodes.NotMaster,
+ errorCode: ErrorCodes.NotWritablePrimary,
failCommands: ["prepareTransaction", "abortTransaction", "commitTransaction"]
}
}));
diff --git a/src/mongo/base/error_codes.yml b/src/mongo/base/error_codes.yml
index 676d675dc00..59e2ef6f696 100644
--- a/src/mongo/base/error_codes.yml
+++ b/src/mongo/base/error_codes.yml
@@ -405,7 +405,7 @@ error_codes:
- {code: 9001,name: SocketException,categories: [NetworkError,RetriableError]}
- {code: 9996,name: OBSOLETE_RecvStaleConfig}
- {code: 10003,name: CannotGrowDocumentInCappedNamespace}
- - {code: 10107,name: NotMaster,categories: [NotMasterError,RetriableError]}
+ - {code: 10107,name: NotWritablePrimary,categories: [NotMasterError,RetriableError]}
- {code: 10334,name: BSONObjectTooLarge}
- {code: 11000,name: DuplicateKey,extra: DuplicateKeyErrorInfo}
- {code: 11600,name: InterruptedAtShutdown,categories: [Interruption,ShutdownError,CancelationError,RetriableError]}
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index 66714d742de..a8b771be019 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -819,7 +819,7 @@ void DBClientConnection::handleNotMasterResponse(const BSONObj& replyBody,
auto monitor = ReplicaSetMonitor::get(_parentReplSetName);
if (monitor) {
monitor->failedHost(_serverAddress,
- {ErrorCodes::NotMaster,
+ {ErrorCodes::NotWritablePrimary,
str::stream() << "got not master from: " << _serverAddress
<< " of repl set: " << _parentReplSetName});
}
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index b9871482aac..0f3df602910 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -707,7 +707,7 @@ void DBClientReplicaSet::isntMaster() {
// monitor doesn't exist.
_rsm->failedHost(
_masterHost,
- {ErrorCodes::NotMaster, str::stream() << "got not master for: " << _masterHost});
+ {ErrorCodes::NotWritablePrimary, str::stream() << "got not master for: " << _masterHost});
resetMaster();
}
diff --git a/src/mongo/client/scanning_replica_set_monitor.cpp b/src/mongo/client/scanning_replica_set_monitor.cpp
index ee5a1144861..ed6058bcc76 100644
--- a/src/mongo/client/scanning_replica_set_monitor.cpp
+++ b/src/mongo/client/scanning_replica_set_monitor.cpp
@@ -850,7 +850,7 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa
// since they don't have the same ordering with pv1 electionId.
if (reply.configVersion < _set->configVersion) {
return {
- ErrorCodes::NotMaster,
+ ErrorCodes::NotWritablePrimary,
str::stream() << "Node " << from << " believes it is primary, but its config version "
<< reply.configVersion << " is older than the most recent config version "
<< _set->configVersion};
@@ -863,7 +863,7 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa
if (reply.configVersion == _set->configVersion && _set->maxElectionId.isSet() &&
_set->maxElectionId.compare(reply.electionId) > 0) {
return {
- ErrorCodes::NotMaster,
+ ErrorCodes::NotWritablePrimary,
str::stream() << "Node " << from << " believes it is primary, but its election id "
<< reply.electionId << " is older than the most recent election id "
<< _set->maxElectionId};
diff --git a/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp b/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp
index ef0eb80e61a..497d3d04f2a 100644
--- a/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp
+++ b/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp
@@ -111,7 +111,7 @@ public:
ErrorCodes::NotMasterOrSecondary,
ErrorCodes::PrimarySteppedDown,
ErrorCodes::ShutdownInProgress,
- ErrorCodes::NotMaster,
+ ErrorCodes::NotWritablePrimary,
ErrorCodes::NotMasterNoSlaveOk};
inline static const std::string kSetName = "setName";
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 5dd3533fc91..81c9661c819 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -63,7 +63,7 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, collectionName);
if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream()
<< "Not primary while truncating collection: " << collectionName);
}
@@ -249,7 +249,7 @@ void convertToCapped(OperationContext* opCtx, const NamespaceString& ns, long lo
bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns);
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while converting " << ns << " to a capped collection",
!userInitiatedWritesAndNotPrimary);
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 8a75e773a89..e46ec8fd298 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -385,7 +385,7 @@ Status _collModInternal(OperationContext* opCtx,
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss);
if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while setting collection options on " << nss);
}
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 43303c64a1e..2fef0686a47 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -70,7 +70,7 @@ Status _createView(OperationContext* opCtx,
if (opCtx->writesAreReplicated() &&
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) {
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while creating collection " << nss);
}
@@ -130,7 +130,7 @@ Status _createCollection(OperationContext* opCtx,
if (opCtx->writesAreReplicated() &&
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) {
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while creating collection " << nss);
}
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index c3470d56941..3ed987e475f 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -60,7 +60,7 @@ Status _checkNssAndReplState(OperationContext* opCtx, const Collection* coll) {
if (opCtx->writesAreReplicated() &&
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, coll->ns())) {
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while dropping collection " << coll->ns());
}
@@ -103,7 +103,7 @@ Status _dropView(OperationContext* opCtx,
if (opCtx->writesAreReplicated() &&
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, collectionName)) {
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while dropping collection " << collectionName);
}
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 1d7e149adec..61c5dd953bd 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -71,7 +71,7 @@ Status _checkNssAndReplState(OperationContext* opCtx, Database* db, const std::s
opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName);
if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while dropping database " << dbName);
}
diff --git a/src/mongo/db/catalog/drop_database_test.cpp b/src/mongo/db/catalog/drop_database_test.cpp
index da0fc666faa..8ee0a7bf6b2 100644
--- a/src/mongo/db/catalog/drop_database_test.cpp
+++ b/src/mongo/db/catalog/drop_database_test.cpp
@@ -213,12 +213,12 @@ TEST_F(DropDatabaseTest, DropDatabaseReturnsNamespaceNotFoundIfDatabaseDoesNotEx
dropDatabaseForApplyOps(_opCtx.get(), _nss.db().toString()));
}
-TEST_F(DropDatabaseTest, DropDatabaseReturnsNotMasterIfNotPrimary) {
+TEST_F(DropDatabaseTest, DropDatabaseReturnsNotWritablePrimaryIfNotPrimary) {
_createCollection(_opCtx.get(), _nss);
ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY));
ASSERT_TRUE(_opCtx->writesAreReplicated());
ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.db()));
- ASSERT_EQUALS(ErrorCodes::NotMaster,
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary,
dropDatabaseForApplyOps(_opCtx.get(), _nss.db().toString()));
}
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index f44a977ae49..08def9db579 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -79,7 +79,7 @@ Status checkReplState(OperationContext* opCtx,
bool writesAreReplicatedAndNotPrimary = opCtx->writesAreReplicated() && !canAcceptWrites;
if (writesAreReplicatedAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while dropping indexes on database "
<< dbAndUUID.db() << " with collection " << dbAndUUID.uuid());
}
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 111629b3fb4..08383c041bc 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -93,7 +93,7 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx,
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (opCtx->writesAreReplicated() && !replCoord->canAcceptWritesFor(opCtx, source))
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while renaming collection " << source << " to "
<< target);
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index 31895d624f8..726265f469f 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -529,12 +529,12 @@ TEST_F(RenameCollectionTest,
ASSERT_TRUE(_collectionExists(_opCtx.get(), dropPendingNss));
}
-TEST_F(RenameCollectionTest, RenameCollectionReturnsNotMasterIfNotPrimary) {
+TEST_F(RenameCollectionTest, RenameCollectionReturnsNotWritablePrimaryIfNotPrimary) {
_createCollection(_opCtx.get(), _sourceNss);
ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY));
ASSERT_TRUE(_opCtx->writesAreReplicated());
ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _sourceNss.db()));
- ASSERT_EQUALS(ErrorCodes::NotMaster,
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary,
renameCollection(_opCtx.get(), _sourceNss, _targetNss, {}));
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 4d445043f6c..b0f8954adc6 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -95,7 +95,7 @@ struct Cloner::Fun {
void operator()(DBClientCursorBatchIterator& i) {
boost::optional<Lock::DBLock> dbLock;
dbLock.emplace(opCtx, _dbName, MODE_X);
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while cloning collection " << nss,
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss));
@@ -529,7 +529,7 @@ Status Cloner::copyDb(OperationContext* opCtx,
}
uassert(
- ErrorCodes::NotMaster,
+ ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while cloning database " << dBName
<< " (after getting list of collections to clone)",
!opCtx->writesAreReplicated() ||
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 353a5fe51c3..1ac73e3da17 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -119,7 +119,7 @@ public:
Lock::CollectionLock collLock(opCtx, toNs, MODE_X);
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, toNs)) {
- uasserted(ErrorCodes::NotMaster,
+ uasserted(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while cloning collection " << from << " to "
<< to << " (as capped)");
}
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 016f6d3cb02..e4a66da2d4b 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -513,7 +513,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
Lock::DBLock dbLock(opCtx, ns.db(), MODE_IS);
checkDatabaseShardingState(opCtx, ns);
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)) {
- uasserted(ErrorCodes::NotMaster,
+ uasserted(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while creating indexes in " << ns.ns());
}
@@ -662,7 +662,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
if (indexBuildsCoord->abortIndexBuildByBuildUUID(
opCtx, buildUUID, IndexBuildAction::kPrimaryAbort, abortReason)) {
LOGV2(20446,
- "Index build: aborted due to NotMaster error",
+ "Index build: aborted due to NotPrimary error",
"buildUUID"_attr = buildUUID);
} else {
// The index build may already be in the midst of tearing down.
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 5252df3c66f..c7b1af95ef2 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -107,7 +107,7 @@ public:
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
// Even though reIndex is a standalone-only command, this will return that the command is
// allowed on secondaries so that it will fail with a more useful error message to the user
- // rather than with a NotMaster error.
+ // rather than with a NotWritablePrimary error.
return AllowedOnSecondary::kAlways;
}
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 161e44d6dbd..62b4ca577bb 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -183,7 +183,7 @@ void appendCommandResponse(const PlanExecutor* exec,
}
void assertCanWrite(OperationContext* opCtx, const NamespaceString& nsString) {
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while running findAndModify command on collection "
<< nsString.ns(),
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nsString));
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index b0375a47ddc..74aeecbbc64 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -66,7 +66,7 @@ Status _performNoopWrite(OperationContext* opCtx, BSONObj msgObj, StringData not
// Its a proxy for being a primary passing "local" will cause it to return true on secondary
if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
- return {ErrorCodes::NotMaster, "Not a primary"};
+ return {ErrorCodes::NotWritablePrimary, "Not a primary"};
}
writeConflictRetry(opCtx, note, NamespaceString::kRsOplogNamespace.ns(), [&opCtx, &msgObj] {
diff --git a/src/mongo/db/commands/shutdown_d.cpp b/src/mongo/db/commands/shutdown_d.cpp
index ac5b5bf6f4a..6dc11a8220a 100644
--- a/src/mongo/db/commands/shutdown_d.cpp
+++ b/src/mongo/db/commands/shutdown_d.cpp
@@ -61,8 +61,8 @@ Status stepDownForShutdown(OperationContext* opCtx,
LOGV2(4695100, "hangInShutdownAfterStepdown failpoint enabled");
hangInShutdownAfterStepdown.pauseWhileSet(opCtx);
}
- } catch (const ExceptionFor<ErrorCodes::NotMaster>&) {
- // Ignore not master errors.
+ } catch (const ExceptionFor<ErrorCodes::NotWritablePrimary>&) {
+ // Ignore NotWritablePrimary errors.
} catch (const DBException& e) {
if (!forceShutdown) {
return e.toStatus();
diff --git a/src/mongo/db/error_labels_test.cpp b/src/mongo/db/error_labels_test.cpp
index 6a176c495ea..a9d4cc27f0c 100644
--- a/src/mongo/db/error_labels_test.cpp
+++ b/src/mongo/db/error_labels_test.cpp
@@ -51,8 +51,9 @@ TEST(IsTransientTransactionErrorTest, NetworkErrorsAreNotTransientOnCommit) {
}
TEST(IsTransientTransactionErrorTest, RetryableWriteErrorsAreNotTransientOnAbort) {
- ASSERT_FALSE(isTransientTransactionError(
- ErrorCodes::NotMaster, false /* hasWriteConcernError */, true /* isCommitOrAbort */));
+ ASSERT_FALSE(isTransientTransactionError(ErrorCodes::NotWritablePrimary,
+ false /* hasWriteConcernError */,
+ true /* isCommitOrAbort */));
}
TEST(IsTransientTransactionErrorTest,
@@ -129,7 +130,7 @@ TEST_F(ErrorLabelBuilderTest, NonTransientTransactionErrorsHaveNoTransientTransa
sessionInfo.setAutocommit(false);
std::string commandName = "commitTransaction";
ErrorLabelBuilder builder(
- opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false);
+ opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false);
ASSERT_FALSE(builder.isTransientTransactionError());
}
@@ -147,7 +148,7 @@ TEST_F(ErrorLabelBuilderTest, NonRetryableWritesHaveNoRetryableWriteErrorLabel)
OperationSessionInfoFromClient sessionInfo;
std::string commandName = "insert";
ErrorLabelBuilder builder(
- opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false);
+ opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false);
// Test regular writes.
ASSERT_FALSE(builder.isRetryableWriteError());
@@ -172,7 +173,7 @@ TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsHaveRetryableWriteErrorLabel)
sessionInfo.setTxnNumber(1);
std::string commandName = "update";
ErrorLabelBuilder builder(
- opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false);
+ opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false);
ASSERT_TRUE(builder.isRetryableWriteError());
}
@@ -182,7 +183,7 @@ TEST_F(ErrorLabelBuilderTest,
sessionInfo.setTxnNumber(1);
std::string commandName = "update";
ErrorLabelBuilder builder(
- opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, true);
+ opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, true);
ASSERT_FALSE(builder.isRetryableWriteError());
}
@@ -222,17 +223,17 @@ TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsOnCommitAbortHaveRetryableWrit
commandName = "commitTransaction";
ErrorLabelBuilder commitBuilder(
- opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false);
+ opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false);
ASSERT_TRUE(commitBuilder.isRetryableWriteError());
commandName = "coordinateCommitTransaction";
ErrorLabelBuilder coordinateCommitBuilder(
- opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false);
+ opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false);
ASSERT_TRUE(coordinateCommitBuilder.isRetryableWriteError());
commandName = "abortTransaction";
ErrorLabelBuilder abortBuilder(
- opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false);
+ opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false);
ASSERT_TRUE(abortBuilder.isRetryableWriteError());
}
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index d62ddd23692..c8caafc318f 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -1245,7 +1245,7 @@ bool IndexBuildsCoordinator::abortIndexBuildByBuildUUID(OperationContext* opCtx,
if (IndexBuildAction::kPrimaryAbort == signalAction &&
!replCoord->canAcceptWritesFor(opCtx, dbAndUUID)) {
- uassertStatusOK({ErrorCodes::NotMaster,
+ uassertStatusOK({ErrorCodes::NotWritablePrimary,
str::stream()
<< "Unable to abort index build because we are not primary: "
<< buildUUID});
@@ -2064,7 +2064,7 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild
// so we must fail the index build. During initial sync, there is no commit timestamp set.
if (replSetAndNotPrimary &&
indexBuildOptions.applicationMode != ApplicationMode::kInitialSync) {
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
str::stream() << "Replication state changed while setting up the index build: "
<< replState->buildUUID,
!startTimestamp.isNull());
@@ -2786,7 +2786,7 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
// commit, trigger a self-abort.
if (!isMaster && IndexBuildAction::kSinglePhaseCommit == action) {
uassertStatusOK(
- {ErrorCodes::NotMaster,
+ {ErrorCodes::NotWritablePrimary,
str::stream() << "Unable to commit index build because we are no longer primary: "
<< replState->buildUUID});
}
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 0d08ee0e8de..f503f4f572d 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -174,7 +174,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
Lock::GlobalLock globalLk(opCtx, MODE_IX);
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
"Not primary while waiting to start an index build",
replCoord->canAcceptWritesFor(opCtx, nssOrUuid));
}
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 658ae826cad..c04117cfae3 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -506,8 +506,8 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
"error"_attr = redact(status));
if (status == ErrorCodes::AuthSchemaIncompatible) {
exitCleanly(EXIT_NEED_UPGRADE);
- } else if (status == ErrorCodes::NotMaster) {
- // Try creating the indexes if we become master. If we do not become master,
+ } else if (status == ErrorCodes::NotWritablePrimary) {
+ // Try creating the indexes if we become primary. If we do not become primary,
// the master will create the indexes and we will replicate them.
} else {
quickExit(EXIT_FAILURE);
diff --git a/src/mongo/db/op_observer_impl_test.cpp b/src/mongo/db/op_observer_impl_test.cpp
index 856f650f6c3..51614c4b62e 100644
--- a/src/mongo/db/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer_impl_test.cpp
@@ -456,7 +456,8 @@ TEST_F(OpObserverTest, MustBePrimaryToWriteOplogEntries) {
WriteUnitOfWork wunit(opCtx.get());
// No-op writes should be prohibited.
- ASSERT_THROWS_CODE(opObserver.onOpMessage(opCtx.get(), {}), DBException, ErrorCodes::NotMaster);
+ ASSERT_THROWS_CODE(
+ opObserver.onOpMessage(opCtx.get(), {}), DBException, ErrorCodes::NotWritablePrimary);
}
/**
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 3663b9cdcc5..30b76024344 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -295,7 +295,7 @@ Status waitForReadConcernImpl(OperationContext* opCtx,
}
if (!replCoord->getMemberState().primary()) {
- return {ErrorCodes::NotMaster,
+ return {ErrorCodes::NotWritablePrimary,
"cannot satisfy linearizable read concern on non-primary node"};
}
}
@@ -442,7 +442,7 @@ Status waitForLinearizableReadConcernImpl(OperationContext* opCtx, const int rea
{
AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite);
if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
- return {ErrorCodes::NotMaster,
+ return {ErrorCodes::NotWritablePrimary,
"No longer primary when waiting for linearizable read concern"};
}
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 0caf8565939..d9c8565507f 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -412,7 +412,7 @@ Status applyOps(OperationContext* opCtx,
opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName);
if (userInitiatedWritesAndNotPrimary)
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while applying ops to database " << dbName);
if (auto preCondition = info.getPreCondition()) {
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index c0a153252eb..185c9aa04f5 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -226,7 +226,7 @@ void _logOpsInner(OperationContext* opCtx,
ss << "(" << record.id << ", " << redact(record.data.toBson()) << ") ";
}
ss << "]";
- uasserted(ErrorCodes::NotMaster, ss);
+ uasserted(ErrorCodes::NotWritablePrimary, ss);
}
// TODO (SERVER-50598): Not allow tenant migration donor to write "commitIndexBuild" and
diff --git a/src/mongo/db/repl/primary_only_service.cpp b/src/mongo/db/repl/primary_only_service.cpp
index 7bef6832beb..4be2ac36730 100644
--- a/src/mongo/db/repl/primary_only_service.cpp
+++ b/src/mongo/db/repl/primary_only_service.cpp
@@ -110,7 +110,7 @@ public:
// ensure the OpCtx starts off immediately interrupted.
if (!clientState.allowOpCtxWhenServiceNotRunning &&
!clientState.primaryOnlyService->isRunning()) {
- opCtx->markKilled(ErrorCodes::NotMaster);
+ opCtx->markKilled(ErrorCodes::NotWritablePrimary);
}
}
void onDestroyOperationContext(OperationContext* opCtx) override {}
@@ -373,7 +373,7 @@ std::shared_ptr<PrimaryOnlyService::Instance> PrimaryOnlyService::getOrCreateIns
uassertStatusOK(_rebuildStatus);
}
uassert(
- ErrorCodes::NotMaster,
+ ErrorCodes::NotWritablePrimary,
str::stream() << "Not Primary when trying to create a new instance of PrimaryOnlyService "
<< getServiceName(),
_state == State::kRunning);
diff --git a/src/mongo/db/repl/primary_only_service.h b/src/mongo/db/repl/primary_only_service.h
index f1742a2eac9..791426a432f 100644
--- a/src/mongo/db/repl/primary_only_service.h
+++ b/src/mongo/db/repl/primary_only_service.h
@@ -235,7 +235,7 @@ protected:
* new Instance (by calling constructInstance()), registers it in _instances, and returns it.
* It is illegal to call this more than once with 'initialState' documents that have the same
* _id but are otherwise not completely identical.
- * Throws NotMaster if the node is not currently primary.
+ * Throws NotWritablePrimary if the node is not currently primary.
*/
std::shared_ptr<Instance> getOrCreateInstance(BSONObj initialState);
diff --git a/src/mongo/db/repl/primary_only_service_test.cpp b/src/mongo/db/repl/primary_only_service_test.cpp
index 076a5613181..bb75b4f8ba1 100644
--- a/src/mongo/db/repl/primary_only_service_test.cpp
+++ b/src/mongo/db/repl/primary_only_service_test.cpp
@@ -373,7 +373,7 @@ TEST_F(PrimaryOnlyServiceTest, CreateWhenNotPrimary) {
ASSERT_THROWS_CODE(
TestService::Instance::getOrCreate(_service, BSON("_id" << 0 << "state" << 0)),
DBException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
}
TEST_F(PrimaryOnlyServiceTest, CreateWithoutID) {
@@ -484,7 +484,7 @@ TEST_F(PrimaryOnlyServiceTest, StepDownBeforeRebuildingInstances) {
// Let the previous stepUp attempt continue and realize that the node has since stepped down.
PrimaryOnlyServiceHangBeforeRebuildingInstances.setMode(FailPoint::off);
- ASSERT_THROWS_CODE(getInstanceFuture.get(), DBException, ErrorCodes::NotMaster);
+ ASSERT_THROWS_CODE(getInstanceFuture.get(), DBException, ErrorCodes::NotWritablePrimary);
// Now do another stepUp that is allowed to complete this time.
stateOneFPTimesEntered = TestServiceHangDuringStateOne.setMode(FailPoint::alwaysOn);
@@ -536,7 +536,7 @@ TEST_F(PrimaryOnlyServiceTest, RecreateInstancesFails) {
ASSERT_THROWS_CODE(
TestService::Instance::getOrCreate(_service, BSON("_id" << 0 << "state" << 0)),
DBException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
// Allow the next stepUp to succeed.
PrimaryOnlyServiceFailRebuildingInstances.setMode(FailPoint::off);
@@ -564,5 +564,5 @@ TEST_F(PrimaryOnlyServiceTest, OpCtxInterruptedByStepdown) {
stepDown();
TestServiceHangBeforeWritingStateDoc.setMode(FailPoint::off);
- ASSERT_EQ(ErrorCodes::NotMaster, instance->getCompletionFuture().getNoThrow());
+ ASSERT_EQ(ErrorCodes::NotWritablePrimary, instance->getCompletionFuture().getNoThrow());
}
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index e35ff6199d5..a02a4d32ccb 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -431,8 +431,8 @@ public:
// Convert the error code to be more specific.
uasserted(ErrorCodes::CurrentConfigNotCommittedYet, status.reason());
} else if (status == ErrorCodes::PrimarySteppedDown) {
- // Return NotMaster since the command has no side effect yet.
- status = {ErrorCodes::NotMaster, status.reason()};
+ // Return NotWritablePrimary since the command has no side effect yet.
+ status = {ErrorCodes::NotWritablePrimary, status.reason()};
}
uassertStatusOK(status);
}
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index de32ec0783e..919f5a5e5a1 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -229,7 +229,7 @@ public:
* the data has been sufficiently replicated
* ErrorCodes::ExceededTimeLimit if the opCtx->getMaxTimeMicrosRemaining is reached before
* the data has been sufficiently replicated
- * ErrorCodes::NotMaster if the node is not Primary/Master
+ * ErrorCodes::NotWritablePrimary if the node is not a writable primary
* ErrorCodes::UnknownReplWriteConcern if the writeConcern.wMode contains a write concern
* mode that is not known
* ErrorCodes::ShutdownInProgress if we are mid-shutdown
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index fa0d3f6d812..ffc93f5fdf4 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2296,7 +2296,7 @@ StatusWith<OpTime> ReplicationCoordinatorImpl::getLatestWriteOpTime(OperationCon
Lock::GlobalLock globalLock(opCtx, MODE_IS);
// Check if the node is primary after acquiring global IS lock.
if (!canAcceptNonLocalWrites()) {
- return {ErrorCodes::NotMaster, "Not primary so can't get latest write optime"};
+ return {ErrorCodes::NotWritablePrimary, "Not primary so can't get latest write optime"};
}
auto oplog = LocalOplogInfo::get(opCtx)->getCollection();
if (!oplog) {
@@ -2532,7 +2532,9 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
// Note this check is inherently racy - it's always possible for the node to stepdown from some
// other path before we acquire the global exclusive lock. This check is just to try to save us
// from acquiring the global X lock unnecessarily.
- uassert(ErrorCodes::NotMaster, "not primary so can't step down", getMemberState().primary());
+ uassert(ErrorCodes::NotWritablePrimary,
+ "not primary so can't step down",
+ getMemberState().primary());
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&stepdownHangBeforeRSTLEnqueue, opCtx, "stepdownHangBeforeRSTLEnqueue");
@@ -2880,7 +2882,7 @@ Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext
if (opCtx->inMultiDocumentTransaction()) {
if (!_readWriteAbility->canAcceptNonLocalWrites_UNSAFE()) {
- return Status(ErrorCodes::NotMaster,
+ return Status(ErrorCodes::NotWritablePrimary,
"Multi-document transactions are only allowed on replica set primaries.");
}
}
@@ -3020,7 +3022,7 @@ void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result,
}
if (commitmentStatus) {
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
"commitmentStatus is only supported on primary.",
_readWriteAbility->canAcceptNonLocalWrites(lock));
auto configWriteConcern = _getConfigReplicationWriteConcern();
@@ -3297,7 +3299,7 @@ Status ReplicationCoordinatorImpl::doReplSetReconfig(OperationContext* opCtx,
if (!force && !_readWriteAbility->canAcceptNonLocalWrites(lk)) {
return Status(
- ErrorCodes::NotMaster,
+ ErrorCodes::NotWritablePrimary,
str::stream()
<< "Safe reconfig is only allowed on a writable PRIMARY. Current state is "
<< _getMemberState_inlock().toString());
@@ -3469,7 +3471,7 @@ Status ReplicationCoordinatorImpl::doReplSetReconfig(OperationContext* opCtx,
{
Lock::GlobalLock globalLock(opCtx, LockMode::MODE_IX);
if (!force && !_readWriteAbility->canAcceptNonLocalWrites(opCtx)) {
- return {ErrorCodes::NotMaster, "Stepped down when persisting new config"};
+ return {ErrorCodes::NotWritablePrimary, "Stepped down when persisting new config"};
}
// Don't write no-op for internal and external force reconfig.
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index d2fccde344a..1a4c843523a 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -1216,7 +1216,7 @@ TEST_F(ReplCoordHBV1Test, LastCommittedOpTimeOnlyUpdatesFromHeartbeatIfNotInStar
* Tests assert that stepdown via heartbeat completed, and the tests that send the new config via
* heartbeat assert that the new config was stored. Tests that send the new config with the
* replSetReconfig command don't check that it was stored; if the stepdown finished first then the
- * replSetReconfig was rejected with a NotMaster error.
+ * replSetReconfig was rejected with a NotWritablePrimary error.
*/
class HBStepdownAndReconfigTest : public ReplCoordHBV1Test {
protected:
@@ -1360,7 +1360,7 @@ Future<void> HBStepdownAndReconfigTest::startReconfigCommand() {
BSONObjBuilder result;
auto status = Status::OK();
try {
- // OK for processReplSetReconfig to return, throw NotMaster-like error, or succeed.
+ // OK for processReplSetReconfig to return, throw NotPrimary-like error, or succeed.
status = coord->processReplSetReconfig(opCtx.get(), args, &result);
} catch (const DBException&) {
status = exceptionToStatus();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 9d23c302774..cd1a089d177 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -90,7 +90,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenReconfigReceivedWhileSecondary) {
ReplSetReconfigArgs args;
args.force = false;
const auto opCtx = makeOperationContext();
- ASSERT_EQUALS(ErrorCodes::NotMaster,
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary,
getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
ASSERT_TRUE(result.obj().isEmpty());
}
@@ -123,7 +123,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningSafeReconfigWhileInDrainMod
ReplSetReconfigArgs args;
args.force = false;
auto status = getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result);
- ASSERT_EQUALS(ErrorCodes::NotMaster, status);
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status);
ASSERT_STRING_CONTAINS(status.reason(), "Safe reconfig is only allowed on a writable PRIMARY.");
ASSERT_TRUE(result.obj().isEmpty());
}
@@ -796,7 +796,7 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
<< BSON("_id" << 2 << "host"
<< "node2:12345")));
const auto opCtx = makeOperationContext();
- ASSERT_EQUALS(ErrorCodes::NotMaster,
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary,
getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
// forced should succeed
@@ -1469,7 +1469,7 @@ TEST_F(ReplCoordReconfigTest, StepdownShouldInterruptConfigWrite) {
respondToAllHeartbeats();
}
- ASSERT_EQ(status.code(), ErrorCodes::NotMaster);
+ ASSERT_EQ(status.code(), ErrorCodes::NotWritablePrimary);
ASSERT_EQ(status.reason(), "Stepped down when persisting new config");
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index ff034802740..f19ee0d2bd0 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -1212,8 +1212,8 @@ TEST_F(ReplCoordTest,
}
TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWriteConcern) {
- // Test that a thread blocked in awaitReplication will be woken up and return NotMaster
- // if the node steps down while it is waiting.
+ // Test that a thread blocked in awaitReplication will be woken up and return PrimarySteppedDown
+ // (a NotMasterError) if the node steps down while it is waiting.
assertStartSuccess(BSON("_id"
<< "mySet"
<< "version" << 2 << "members"
@@ -1917,7 +1917,7 @@ TEST_F(StepDownTestWithUnelectableNode,
ASSERT_TRUE(repl->getMemberState().secondary());
}
-TEST_F(StepDownTest, NodeReturnsNotMasterWhenAskedToStepDownAsANonPrimaryNode) {
+TEST_F(StepDownTest, NodeReturnsNotWritablePrimaryWhenAskedToStepDownAsANonPrimaryNode) {
const auto opCtx = makeOperationContext();
OpTimeWithTermOne optime1(100, 1);
@@ -1930,7 +1930,7 @@ TEST_F(StepDownTest, NodeReturnsNotMasterWhenAskedToStepDownAsANonPrimaryNode) {
ASSERT_THROWS_CODE(
getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(0)),
AssertionException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
}
diff --git a/src/mongo/db/repl/tenant_oplog_applier.cpp b/src/mongo/db/repl/tenant_oplog_applier.cpp
index 520c4f5b0f8..f55ce6a539f 100644
--- a/src/mongo/db/repl/tenant_oplog_applier.cpp
+++ b/src/mongo/db/repl/tenant_oplog_applier.cpp
@@ -439,8 +439,8 @@ Status TenantOplogApplier::_applyOplogEntryOrGroupedInserts(
OperationContext* opCtx,
const OplogEntryOrGroupedInserts& entryOrGroupedInserts,
OplogApplication::Mode oplogApplicationMode) {
- // We must ensure the opCtx uses replicated writes, because that will ensure we get a NotMaster
- // error if a stepdown occurs.
+ // We must ensure the opCtx uses replicated writes, because that will ensure we get a
+ // NotWritablePrimary error if a stepdown occurs.
invariant(opCtx->writesAreReplicated());
// Ensure context matches that of _applyOplogBatchPerWorker.
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index f99dbbda5d5..4cc30799822 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -1665,7 +1665,7 @@ TopologyCoordinator::prepareForStepDownAttempt() {
}
if (_leaderMode == LeaderMode::kNotLeader) {
- return Status{ErrorCodes::NotMaster, "This node is not a primary."};
+ return Status{ErrorCodes::NotWritablePrimary, "This node is not a primary."};
}
invariant(_leaderMode == LeaderMode::kMaster || _leaderMode == LeaderMode::kLeaderElect);
diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h
index 0373dab95de..5f7f9fbe2c1 100644
--- a/src/mongo/db/repl/topology_coordinator.h
+++ b/src/mongo/db/repl/topology_coordinator.h
@@ -630,11 +630,11 @@ public:
* when we receive a stepdown command (which can fail if not enough secondaries are caught up)
* to ensure that we never process more than one stepdown request at a time.
* Returns OK if it is safe to continue with the stepdown attempt, or returns:
- * - NotMaster if this node is not a leader.
+ * - NotWritablePrimary if this node is not a leader.
* - ConflictingOperationInProgess if this node is already processing a stepdown request of any
* kind.
* On an OK return status also returns a function object that can be called to abort the
- * pending stepdown attempt and return this node to normal primary/master state.
+ * pending stepdown attempt and return this node to normal (writable) primary state.
*/
StatusWith<StepDownAttemptAbortFn> prepareForStepDownAttempt();
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index c3b36b81bda..d0625259077 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -2152,7 +2152,7 @@ TEST_F(TopoCoordTest, PrepareStepDownAttemptFailsIfNotLeader) {
<< "protocolVersion" << 1),
0);
getTopoCoord().changeMemberState_forTest(MemberState::RS_SECONDARY);
- Status expectedStatus(ErrorCodes::NotMaster, "This node is not a primary. ");
+ Status expectedStatus(ErrorCodes::NotWritablePrimary, "This node is not a primary. ");
ASSERT_EQUALS(expectedStatus, getTopoCoord().prepareForStepDownAttempt().getStatus());
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 1bb71524d3a..f03ee603838 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -367,7 +367,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
<< connectionString.toString() << " as a shard");
}
if (!isMaster) {
- return {ErrorCodes::NotMaster,
+ return {ErrorCodes::NotWritablePrimary,
str::stream()
<< connectionString.toString()
<< " does not have a master. If this is a replica set, ensure that it has a"
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 0b7b5cbc1d2..40d2853424e 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -781,7 +781,7 @@ StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::_callRecipient(const BSONO
responseStatus = args.response;
});
- // TODO: Update RemoteCommandTargeter on NotMaster errors.
+ // TODO: Update RemoteCommandTargeter on NotWritablePrimary errors.
if (!scheduleStatus.isOK()) {
return scheduleStatus.getStatus();
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 3d8ba900e92..e6e4de4ac75 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -718,7 +718,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(
// Checks that the collection's UUID matches the donor's.
auto checkUUIDsMatch = [&](const Collection* collection) {
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
str::stream() << "Unable to create collection " << nss.ns()
<< " because the node is not primary",
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss));
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index 8948c5c5725..f8a321aea1a 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -143,7 +143,7 @@ public:
autoDb.emplace(opCtx, nss.db(), MODE_IS);
// Slave nodes cannot support set shard version
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
str::stream() << "setShardVersion with collection version is only supported "
"against primary nodes, but it was received for namespace "
<< nss.ns(),
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index a4f9a02e009..922e9121690 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -501,7 +501,7 @@ void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opC
boost::optional<uint64_t> taskNumToWait;
while (true) {
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
str::stream() << "Unable to wait for collection metadata flush for " << nss.ns()
<< " because the node's replication role changed.",
_role == ReplicaSetRole::Primary && _term == initialTerm);
@@ -552,7 +552,7 @@ void ShardServerCatalogCacheLoader::waitForDatabaseFlush(OperationContext* opCtx
boost::optional<uint64_t> taskNumToWait;
while (true) {
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
str::stream() << "Unable to wait for database metadata flush for "
<< dbName.toString()
<< " because the node's replication role changed.",
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
index e46ab94599f..4278c3367ff 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
@@ -250,7 +250,7 @@ TEST(TransactionCoordinatorFuturesUtilTest,
promises[0].setError(errorStatus1);
ASSERT(!resultFuture.isReady());
- Status errorStatus2{ErrorCodes::NotMaster, "dummy error"};
+ Status errorStatus2{ErrorCodes::NotWritablePrimary, "dummy error"};
promises[1].setError(errorStatus2);
ASSERT(!resultFuture.isReady());
@@ -642,11 +642,11 @@ TEST_F(AsyncWorkSchedulerTest, DestroyingSchedulerCapturedInFutureCallback) {
future.get();
}
-TEST_F(AsyncWorkSchedulerTest, NotifiesRemoteCommandTargeter_CmdResponseNotMasterError) {
+TEST_F(AsyncWorkSchedulerTest, NotifiesRemoteCommandTargeter_CmdResponseNotWritablePrimaryError) {
ASSERT_EQ(0UL, getShardTargeterMock(kShardIds[1])->getAndClearMarkedDownHosts().size());
scheduleAWSRemoteCommandWithResponse(kShardIds[1],
- BSON("ok" << 0 << "code" << ErrorCodes::NotMaster
+ BSON("ok" << 0 << "code" << ErrorCodes::NotWritablePrimary
<< "errmsg"
<< "dummy"));
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index a6392223182..98f0944d510 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -93,8 +93,8 @@ void TransactionCoordinatorService::reportCoordinators(OperationContext* opCtx,
std::shared_ptr<CatalogAndScheduler> cas;
try {
cas = _getCatalogAndScheduler(opCtx);
- } catch (ExceptionFor<ErrorCodes::NotMaster>&) {
- // If we are not master, don't include any output for transaction coordinators in
+ } catch (ExceptionFor<ErrorCodes::NotWritablePrimary>&) {
+ // If we are not primary, don't include any output for transaction coordinators in
// the curOp command.
return;
}
@@ -270,8 +270,9 @@ void TransactionCoordinatorService::onShardingInitialization(OperationContext* o
std::shared_ptr<TransactionCoordinatorService::CatalogAndScheduler>
TransactionCoordinatorService::_getCatalogAndScheduler(OperationContext* opCtx) {
stdx::unique_lock<Latch> ul(_mutex);
- uassert(
- ErrorCodes::NotMaster, "Transaction coordinator is not a primary", _catalogAndScheduler);
+ uassert(ErrorCodes::NotWritablePrimary,
+ "Transaction coordinator is not a primary",
+ _catalogAndScheduler);
return _catalogAndScheduler;
}
diff --git a/src/mongo/db/s/transaction_coordinator_service.h b/src/mongo/db/s/transaction_coordinator_service.h
index a4fe1ce16f9..f50a511a8a3 100644
--- a/src/mongo/db/s/transaction_coordinator_service.h
+++ b/src/mongo/db/s/transaction_coordinator_service.h
@@ -135,8 +135,8 @@ private:
};
/**
- * Returns the current catalog + scheduler if stepUp has started, otherwise throws a NotMaster
- * exception.
+ * Returns the current catalog + scheduler if stepUp has started, otherwise throws a
+ * NotWritablePrimary exception.
*/
std::shared_ptr<CatalogAndScheduler> _getCatalogAndScheduler(OperationContext* opCtx);
diff --git a/src/mongo/db/s/transaction_coordinator_service_test.cpp b/src/mongo/db/s/transaction_coordinator_service_test.cpp
index 215d101ba0a..e45eaa90ba7 100644
--- a/src/mongo/db/s/transaction_coordinator_service_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service_test.cpp
@@ -198,17 +198,17 @@ TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, OperationsFailBeforeStep
ASSERT_THROWS_CODE(service()->createCoordinator(
operationContext(), makeLogicalSessionIdForTest(), 0, kCommitDeadline),
AssertionException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
ASSERT_THROWS_CODE(service()->coordinateCommit(
operationContext(), makeLogicalSessionIdForTest(), 0, kTwoShardIdSet),
AssertionException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
ASSERT_THROWS_CODE(
service()->recoverCommit(operationContext(), makeLogicalSessionIdForTest(), 0),
AssertionException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
}
TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, OperationsBlockBeforeStepUpCompletes) {
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 7493feef5b0..61d8bd3b985 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -474,7 +474,7 @@ void appendErrorLabelsAndTopologyVersion(OperationContext* opCtx,
(wcCode && ErrorCodes::isA<ErrorCategory::ShutdownError>(*wcCode));
const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- // NotMaster errors always include a topologyVersion, since we increment topologyVersion on
+ // NotPrimary errors always include a topologyVersion, since we increment topologyVersion on
// stepdown. ShutdownErrors only include a topologyVersion if the server is in quiesce mode,
// since we only increment the topologyVersion at shutdown and alert waiting isMaster commands
// if the server enters quiesce mode.
@@ -1059,9 +1059,9 @@ void execCommandDatabase(OperationContext* opCtx,
}
if (MONGO_unlikely(respondWithNotPrimaryInCommandDispatch.shouldFail())) {
- uassert(ErrorCodes::NotMaster, "not primary", canRunHere);
+ uassert(ErrorCodes::NotWritablePrimary, "not primary", canRunHere);
} else {
- uassert(ErrorCodes::NotMaster, "not master", canRunHere);
+ uassert(ErrorCodes::NotWritablePrimary, "not master", canRunHere);
}
if (!command->maintenanceOk() &&
@@ -1428,7 +1428,7 @@ DbResponse receivedCommands(OperationContext* opCtx,
if (LastError::get(opCtx->getClient()).hadNotMasterError()) {
if (c && c->getReadWriteType() == Command::ReadWriteType::kWrite)
notMasterUnackWrites.increment();
- uasserted(ErrorCodes::NotMaster,
+ uasserted(ErrorCodes::NotWritablePrimary,
str::stream()
<< "Not-master error while processing '" << request.getCommandName()
<< "' operation on '" << request.getDatabase() << "' database via "
@@ -1779,12 +1779,13 @@ Future<DbResponse> ServiceEntryPointCommon::handleRequest(OperationContext* opCt
"error"_attr = redact(ue));
debug.errInfo = ue.toStatus();
}
- // A NotMaster error can be set either within receivedInsert/receivedUpdate/receivedDelete
- // or within the AssertionException handler above. Either way, we want to throw an
- // exception here, which will cause the client to be disconnected.
+ // A NotWritablePrimary error can be set either within
+ // receivedInsert/receivedUpdate/receivedDelete or within the AssertionException handler
+ // above. Either way, we want to throw an exception here, which will cause the client to be
+ // disconnected.
if (LastError::get(opCtx->getClient()).hadNotMasterError()) {
notMasterLegacyUnackWrites.increment();
- uasserted(ErrorCodes::NotMaster,
+ uasserted(ErrorCodes::NotWritablePrimary,
str::stream()
<< "Not-master error while processing '" << networkOpToString(op)
<< "' operation on '" << nsString << "' namespace via legacy "
diff --git a/src/mongo/db/sessions_collection_rs.cpp b/src/mongo/db/sessions_collection_rs.cpp
index 664f34c0c8b..fc7f8977717 100644
--- a/src/mongo/db/sessions_collection_rs.cpp
+++ b/src/mongo/db/sessions_collection_rs.cpp
@@ -102,8 +102,8 @@ auto SessionsCollectionRS::_dispatch(const NamespaceString& ns,
// There is a window here where we may transition from Primary to Secondary after we release
// the locks we take in _isStandaloneOrPrimary(). In this case, the callback we run below
- // may throw a NotMaster error, or a stale read. However, this is preferable to running the
- // callback while we hold locks, since that can lead to a deadlock.
+ // may throw a NotWritablePrimary error, or a stale read. However, this is preferable to running
+ // the callback while we hold locks, since that can lead to a deadlock.
auto conn = _makePrimaryConnection(opCtx);
DBClientBase* client = conn->get();
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index 9234ad7769b..8bd0cfbd422 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -94,7 +94,7 @@ void generateSystemIndexForExistingCollection(OperationContext* opCtx,
const IndexSpec& spec) {
// Do not try to generate any system indexes on a secondary.
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
"Not primary while creating authorization index",
replCoord->getReplicationMode() != repl::ReplicationCoordinator::modeReplSet ||
replCoord->canAcceptWritesForDatabase(opCtx, ns.db()));
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index d3772edb7db..742bfd087b4 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -329,7 +329,7 @@ void TransactionParticipant::performNoopWrite(OperationContext* opCtx, StringDat
{
AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite);
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
"Not primary when performing noop write for {}"_format(msg),
replCoord->canAcceptWritesForDatabase(opCtx, "admin"));
@@ -495,7 +495,7 @@ void TransactionParticipant::Participant::beginOrContinue(OperationContext* opCt
repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX);
if (opCtx->writesAreReplicated()) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
"Not primary so we cannot begin or continue a transaction",
replCoord->canAcceptWritesForDatabase(opCtx, "admin"));
// Disallow multi-statement transactions on shard servers that have
@@ -1355,7 +1355,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction(
const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (opCtx->writesAreReplicated()) {
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
"Not primary so we cannot commit a prepared transaction",
replCoord->canAcceptWritesForDatabase(opCtx, "admin"));
}
@@ -1543,7 +1543,7 @@ void TransactionParticipant::Participant::_abortActivePreparedTransaction(Operat
if (opCtx->writesAreReplicated()) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- uassert(ErrorCodes::NotMaster,
+ uassert(ErrorCodes::NotWritablePrimary,
"Not primary so we cannot abort a prepared transaction",
replCoord->canAcceptWritesForDatabase(opCtx, "admin"));
}
diff --git a/src/mongo/db/transaction_participant.h b/src/mongo/db/transaction_participant.h
index 1c3164a51fc..f898b21c112 100644
--- a/src/mongo/db/transaction_participant.h
+++ b/src/mongo/db/transaction_participant.h
@@ -405,7 +405,7 @@ public:
* currently active one or the last one which committed
* - PreparedTransactionInProgress - if the transaction is in the prepared state and a new
* transaction or retryable write is attempted
- * - NotMaster - if the node is not a primary when this method is called.
+ * - NotWritablePrimary - if the node is not a primary when this method is called.
* - IncompleteTransactionHistory - if an attempt is made to begin a retryable write for a
* TransactionParticipant that is not in retryable write mode. This is expected behavior if
* a retryable write has been upgraded to a transaction by the server, which can happen e.g.
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index 665ade204de..c2c18a05c8f 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -942,8 +942,9 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedAbortFails) {
ASSERT_OK(repl::ReplicationCoordinator::get(opCtx())->setFollowerMode(
repl::MemberState::RS_SECONDARY));
- ASSERT_THROWS_CODE(
- txnParticipant.abortTransaction(opCtx()), AssertionException, ErrorCodes::NotMaster);
+ ASSERT_THROWS_CODE(txnParticipant.abortTransaction(opCtx()),
+ AssertionException,
+ ErrorCodes::NotWritablePrimary);
}
TEST_F(TxnParticipantTest, StepDownDuringPreparedCommitFails) {
@@ -958,7 +959,7 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedCommitFails) {
repl::MemberState::RS_SECONDARY));
ASSERT_THROWS_CODE(txnParticipant.commitPreparedTransaction(opCtx(), commitTS, {}),
AssertionException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
}
TEST_F(TxnParticipantTest, StepDownDuringPreparedAbortReleasesRSTL) {
@@ -998,8 +999,9 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedAbortReleasesRSTL) {
MODE_NONE);
ASSERT_OK(repl::ReplicationCoordinator::get(opCtx())->setFollowerMode(
repl::MemberState::RS_SECONDARY));
- ASSERT_THROWS_CODE(
- txnParticipant.abortTransaction(opCtx()), AssertionException, ErrorCodes::NotMaster);
+ ASSERT_THROWS_CODE(txnParticipant.abortTransaction(opCtx()),
+ AssertionException,
+ ErrorCodes::NotWritablePrimary);
ASSERT_EQ(opCtx()->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
MODE_NONE);
@@ -1053,7 +1055,7 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedCommitReleasesRSTL) {
ASSERT_THROWS_CODE(
txnParticipant.commitPreparedTransaction(opCtx(), prepareTimestamp, boost::none),
AssertionException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
ASSERT_EQ(opCtx()->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
MODE_NONE);
@@ -1111,7 +1113,7 @@ TEST_F(TxnParticipantTest, CannotStartNewTransactionIfNotPrimary) {
ASSERT_THROWS_CODE(
txnParticipant.beginOrContinue(opCtx(), *opCtx()->getTxnNumber(), false, true),
AssertionException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
}
TEST_F(TxnParticipantTest, CannotStartRetryableWriteIfNotPrimary) {
@@ -1125,7 +1127,7 @@ TEST_F(TxnParticipantTest, CannotStartRetryableWriteIfNotPrimary) {
ASSERT_THROWS_CODE(
txnParticipant.beginOrContinue(opCtx(), *opCtx()->getTxnNumber(), boost::none, true),
AssertionException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
}
TEST_F(TxnParticipantTest, CannotContinueTransactionIfNotPrimary) {
@@ -1142,7 +1144,7 @@ TEST_F(TxnParticipantTest, CannotContinueTransactionIfNotPrimary) {
ASSERT_THROWS_CODE(
txnParticipant.beginOrContinue(opCtx(), *opCtx()->getTxnNumber(), false, false),
AssertionException,
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
}
TEST_F(TxnParticipantTest, OlderTransactionFailsOnSessionWithNewerTransaction) {
diff --git a/src/mongo/db/write_concern.h b/src/mongo/db/write_concern.h
index 680e2e4756d..dbaebabffd2 100644
--- a/src/mongo/db/write_concern.h
+++ b/src/mongo/db/write_concern.h
@@ -100,7 +100,7 @@ struct WriteConcernResult {
* if this opTime.isNull() no replication-related write concern options will be enforced.
*
* Returns result of the write concern if successful.
- * Returns NotMaster if the host steps down while waiting for replication
+ * Returns NotWritablePrimary if the host steps down while waiting for replication
* Returns UnknownReplWriteConcern if the wMode specified was not enforceable
*/
Status waitForWriteConcern(OperationContext* opCtx,
diff --git a/src/mongo/rpc/op_msg_integration_test.cpp b/src/mongo/rpc/op_msg_integration_test.cpp
index d1b7484548b..a2a380c4f9a 100644
--- a/src/mongo/rpc/op_msg_integration_test.cpp
+++ b/src/mongo/rpc/op_msg_integration_test.cpp
@@ -192,7 +192,7 @@ TEST(OpMsg, DocumentSequenceMaxWriteBatchWorks) {
conn->dropCollection("test.collection");
}
-TEST(OpMsg, CloseConnectionOnFireAndForgetNotMasterError) {
+TEST(OpMsg, CloseConnectionOnFireAndForgetNotWritablePrimaryError) {
const auto connStr = unittest::getFixtureConnectionString();
// This test only works against a replica set.
@@ -219,14 +219,14 @@ TEST(OpMsg, CloseConnectionOnFireAndForgetNotMasterError) {
})"))
.serialize();
- // Round-trip command fails with NotMaster error. Note that this failure is in command
- // dispatch which ignores w:0.
+ // Round-trip command fails with NotWritablePrimary error. Note that this failure is in
+ // command dispatch which ignores w:0.
Message reply;
ASSERT(conn.call(request, reply, /*assertOK*/ true, nullptr));
ASSERT_EQ(
getStatusFromCommandResult(
conn.parseCommandReplyMessage(conn.getServerAddress(), reply)->getCommandReply()),
- ErrorCodes::NotMaster);
+ ErrorCodes::NotWritablePrimary);
// Fire-and-forget closes connection when it sees that error. Note that this is using call()
// rather than say() so that we get an error back when the connection is closed. Normally
@@ -893,7 +893,8 @@ TEST(OpMsg, ExhaustIsMasterMetricDecrementsOnNewOpAfterTerminatingExhaustStream)
<< "failCommand"
<< "mode" << BSON("times" << 1) << "data"
<< BSON("threadName" << threadName << "errorCode"
- << ErrorCodes::NotMaster << "failCommands"
+ << ErrorCodes::NotWritablePrimary
+ << "failCommands"
<< BSON_ARRAY("isMaster")));
auto response = conn2->runCommand(OpMsgRequest::fromDBAndBody("admin", failPointObj));
ASSERT_OK(getStatusFromCommandResult(response->getCommandReply()));
@@ -983,7 +984,8 @@ TEST(OpMsg, ExhaustIsMasterMetricOnNewExhaustIsMasterAfterTerminatingExhaustStre
<< "failCommand"
<< "mode" << BSON("times" << 1) << "data"
<< BSON("threadName" << threadName << "errorCode"
- << ErrorCodes::NotMaster << "failCommands"
+ << ErrorCodes::NotWritablePrimary
+ << "failCommands"
<< BSON_ARRAY("isMaster")));
auto response = conn2->runCommand(OpMsgRequest::fromDBAndBody("admin", failPointObj));
ASSERT_OK(getStatusFromCommandResult(response->getCommandReply()));
diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp
index 7cca30f0b99..04f5c51039d 100644
--- a/src/mongo/s/async_requests_sender.cpp
+++ b/src/mongo/s/async_requests_sender.cpp
@@ -54,7 +54,7 @@ namespace mongo {
namespace {
-// Maximum number of retries for network and replication notMaster errors (per host).
+// Maximum number of retries for network and replication NotPrimary errors (per host).
const int kMaxNumFailedHostRetryAttempts = 3;
MONGO_FAIL_POINT_DEFINE(hangBeforeSchedulingRemoteCommand);
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
index 8a2622f4790..56b925be417 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
@@ -147,7 +147,7 @@ StatusWith<OID> extractElectionId(const BSONObj& responseObj) {
<< hostContacted};
}
- return {ErrorCodes::NotMaster, "only primary can have electionId"};
+ return {ErrorCodes::NotWritablePrimary, "only primary can have electionId"};
}
return {ErrorCodes::UnsupportedFormat, electionIdStatus.reason()};
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
index 56f25d713c8..619217a5259 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
@@ -468,7 +468,7 @@ TEST_F(DistLockCatalogTest, GrabLockWriteConcernError) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
- ASSERT_EQUALS(ErrorCodes::NotMaster, status.code());
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1271,7 +1271,7 @@ TEST_F(DistLockCatalogTest, GetServerNoGLEStats) {
TEST_F(DistLockCatalogTest, GetServerNoElectionId) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
- ASSERT_EQUALS(ErrorCodes::NotMaster, status.code());
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status.code());
ASSERT_FALSE(status.reason().empty());
});
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
index 7c0131f987a..d0017bcc630 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
@@ -149,7 +149,7 @@ void ReplSetDistLockManager::doTask() {
auto opCtx = cc().makeOperationContext();
auto pingStatus = _catalog->ping(opCtx.get(), _processID, Date_t::now());
- if (!pingStatus.isOK() && pingStatus != ErrorCodes::NotMaster) {
+ if (!pingStatus.isOK() && pingStatus != ErrorCodes::NotWritablePrimary) {
LOGV2_WARNING(22668,
"Pinging failed for distributed lock pinger caused by {error}",
"Pinging failed for distributed lock pinger",
@@ -191,7 +191,7 @@ void ReplSetDistLockManager::doTask() {
"lockSessionId"_attr = toUnlock.first,
"error"_attr = unlockStatus);
// Queue another attempt, unless the problem was no longer being primary.
- if (unlockStatus != ErrorCodes::NotMaster) {
+ if (unlockStatus != ErrorCodes::NotWritablePrimary) {
queueUnlock(toUnlock.first, toUnlock.second);
}
} else {
@@ -238,7 +238,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* opCtx,
Timer timer(_serviceContext->getTickSource());
auto serverInfoStatus = _catalog->getServerInfo(opCtx);
if (!serverInfoStatus.isOK()) {
- if (serverInfoStatus.getStatus() == ErrorCodes::NotMaster) {
+ if (serverInfoStatus.getStatus() == ErrorCodes::NotWritablePrimary) {
return false;
}
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
index c71bdd93dd0..64020eeb9f4 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
@@ -1236,7 +1236,7 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) {
/**
* 1. Try to grab lock multiple times.
- * 2. For each attempt, attempting to check the ping document results in NotMaster error.
+ * 2. For each attempt, attempting to check the ping document results in NotWritablePrimary error.
* 3. All of the previous attempt should result in lock busy.
* 4. Try to grab lock again when the ping was not updated and lock expiration has elapsed.
*/
@@ -1285,7 +1285,7 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfNoMaster) {
} else {
getMockCatalog()->expectGetServerInfo(
[&getServerInfoCallCount]() { getServerInfoCallCount++; },
- {ErrorCodes::NotMaster, "not master"});
+ {ErrorCodes::NotWritablePrimary, "not master"});
}
auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index a0f0901952c..3fce1e4a9cf 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -338,7 +338,7 @@ public:
* Updates a single document in the specified namespace on the config server. Must only be used
* for updates to the 'config' database.
*
- * This method retries the operation on NotMaster or network errors, so it should only be used
+ * This method retries the operation on NotPrimary or network errors, so it should only be used
* with modifications which are idempotent.
*
* Returns non-OK status if the command failed to run for some reason. If the command was
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index 7d6f6c4f119..1ae6849018d 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -174,7 +174,7 @@ private:
* useMultiUpdate is true) in the specified namespace on the config server. Must only be used
* for updates to the 'config' database.
*
- * This method retries the operation on NotMaster or network errors, so it should only be used
+ * This method retries the operation on NotPrimary or network errors, so it should only be used
* with modifications which are idempotent.
*
* Returns non-OK status if the command failed to run for some reason. If the command was
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 65bc0c746ce..e68477ee32a 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -252,14 +252,15 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseStaleSecondaryRetryNoPrimary) {
auto future = launchAsync([this] {
auto dbResult = catalogClient()->getDatabase(
operationContext(), "NonExistent", repl::ReadConcernLevel::kMajorityReadConcern);
- ASSERT_EQ(dbResult.getStatus(), ErrorCodes::NotMaster);
+ ASSERT_EQ(dbResult.getStatus(), ErrorCodes::NotWritablePrimary);
});
// Return empty result set as if the database wasn't found
onFindCommand([this, &testHost](const RemoteCommandRequest& request) {
ASSERT_EQUALS(testHost, request.target);
- // Make it so when it attempts to retarget and retry it will get a NotMaster error.
- configTargeter()->setFindHostReturnValue(Status(ErrorCodes::NotMaster, "no config master"));
+ // Make it so when it attempts to retarget and retry it will get a NotWritablePrimary error.
+ configTargeter()->setFindHostReturnValue(
+ Status(ErrorCodes::NotWritablePrimary, "no config master"));
return vector<BSONObj>{};
});
@@ -687,7 +688,7 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
future.default_timed_get();
}
-TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMaster) {
+TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotWritablePrimary) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -699,14 +700,14 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMaster) {
<< "test"),
&responseBuilder);
ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::NotMaster, status);
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status);
});
for (int i = 0; i < 3; ++i) {
onCommand([](const RemoteCommandRequest& request) {
BSONObjBuilder responseBuilder;
- CommandHelpers::appendCommandStatusNoThrow(responseBuilder,
- Status(ErrorCodes::NotMaster, "not master"));
+ CommandHelpers::appendCommandStatusNoThrow(
+ responseBuilder, Status(ErrorCodes::NotWritablePrimary, "not master"));
return responseBuilder.obj();
});
}
@@ -715,7 +716,7 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMaster) {
future.default_timed_get();
}
-TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMasterRetrySuccess) {
+TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotWritablePrimaryRetrySuccess) {
HostAndPort host1("TestHost1");
HostAndPort host2("TestHost2");
@@ -736,11 +737,11 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMasterRetrySuc
ASSERT_EQUALS(host1, request.target);
BSONObjBuilder responseBuilder;
- CommandHelpers::appendCommandStatusNoThrow(responseBuilder,
- Status(ErrorCodes::NotMaster, "not master"));
+ CommandHelpers::appendCommandStatusNoThrow(
+ responseBuilder, Status(ErrorCodes::NotWritablePrimary, "not master"));
// Ensure that when the catalog manager tries to retarget after getting the
- // NotMaster response, it will get back a new target.
+ // NotWritablePrimary response, it will get back a new target.
configTargeter()->setFindHostReturnValue(host2);
return responseBuilder.obj();
});
diff --git a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
index 5fe063a6ce4..3b71e0eb3ad 100644
--- a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
@@ -350,7 +350,7 @@ TEST_F(UpdateRetryTest, Success) {
future.default_timed_get();
}
-TEST_F(UpdateRetryTest, NotMasterErrorReturnedPersistently) {
+TEST_F(UpdateRetryTest, NotWritablePrimaryErrorReturnedPersistently) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
BSONObj objToUpdate = BSON("_id" << 1 << "Value"
@@ -366,13 +366,14 @@ TEST_F(UpdateRetryTest, NotMasterErrorReturnedPersistently) {
updateExpr,
false,
ShardingCatalogClient::kMajorityWriteConcern);
- ASSERT_EQUALS(ErrorCodes::NotMaster, status);
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status);
});
for (int i = 0; i < 3; ++i) {
onCommand([](const RemoteCommandRequest& request) {
BSONObjBuilder bb;
- CommandHelpers::appendCommandStatusNoThrow(bb, {ErrorCodes::NotMaster, "not master"});
+ CommandHelpers::appendCommandStatusNoThrow(
+ bb, {ErrorCodes::NotWritablePrimary, "not master"});
return bb.obj();
});
}
@@ -380,8 +381,8 @@ TEST_F(UpdateRetryTest, NotMasterErrorReturnedPersistently) {
future.default_timed_get();
}
-TEST_F(UpdateRetryTest, NotMasterReturnedFromTargeter) {
- configTargeter()->setFindHostReturnValue(Status(ErrorCodes::NotMaster, "not master"));
+TEST_F(UpdateRetryTest, NotWritablePrimaryReturnedFromTargeter) {
+ configTargeter()->setFindHostReturnValue(Status(ErrorCodes::NotWritablePrimary, "not master"));
BSONObj objToUpdate = BSON("_id" << 1 << "Value"
<< "TestValue");
@@ -396,13 +397,13 @@ TEST_F(UpdateRetryTest, NotMasterReturnedFromTargeter) {
updateExpr,
false,
ShardingCatalogClient::kMajorityWriteConcern);
- ASSERT_EQUALS(ErrorCodes::NotMaster, status);
+ ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status);
});
future.default_timed_get();
}
-TEST_F(UpdateRetryTest, NotMasterOnceSuccessAfterRetry) {
+TEST_F(UpdateRetryTest, NotWritablePrimaryOnceSuccessAfterRetry) {
HostAndPort host1("TestHost1");
HostAndPort host2("TestHost2");
configTargeter()->setFindHostReturnValue(host1);
@@ -433,11 +434,12 @@ TEST_F(UpdateRetryTest, NotMasterOnceSuccessAfterRetry) {
ASSERT_EQUALS(host1, request.target);
// Ensure that when the catalog manager tries to retarget after getting the
- // NotMaster response, it will get back a new target.
+ // NotWritablePrimary response, it will get back a new target.
configTargeter()->setFindHostReturnValue(host2);
BSONObjBuilder bb;
- CommandHelpers::appendCommandStatusNoThrow(bb, {ErrorCodes::NotMaster, "not master"});
+ CommandHelpers::appendCommandStatusNoThrow(bb,
+ {ErrorCodes::NotWritablePrimary, "not master"});
return bb.obj();
});
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index 4f68d1a8c4c..486788c57dd 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -164,7 +164,7 @@ private:
/**
* Maintains the set of all shards known to the instance and their connections and exposes
* functionality to run commands against shards. All commands which this registry executes are
- * retried on NotMaster class of errors and in addition all read commands are retried on network
+ * retried on NotPrimary class of errors and in addition all read commands are retried on network
* errors automatically as well.
*/
class ShardRegistry {
diff --git a/src/mongo/s/commands/batch_downconvert.cpp b/src/mongo/s/commands/batch_downconvert.cpp
index 8aa318f484b..4f4328fe4c6 100644
--- a/src/mongo/s/commands/batch_downconvert.cpp
+++ b/src/mongo/s/commands/batch_downconvert.cpp
@@ -80,7 +80,8 @@ Status extractGLEErrors(const BSONObj& gleResponse, GLEErrors* errors) {
|| code == 16805 /* replicatedToNum no longer primary */
|| code == 14830 /* gle wmode changed / invalid */
// 2.6 Error codes
- || code == ErrorCodes::NotMaster || code == ErrorCodes::UnknownReplWriteConcern ||
+ || code == ErrorCodes::NotWritablePrimary ||
+ code == ErrorCodes::UnknownReplWriteConcern ||
code == ErrorCodes::WriteConcernFailed || code == ErrorCodes::PrimarySteppedDown) {
// Write concern errors that get returned as regular errors (result may not be ok: 1.0)
errors->wcError.reset(new WriteConcernErrorDetail());
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index 635730c82ab..2ad05010afb 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -51,7 +51,7 @@ const BSONObj AsyncResultsMerger::kWholeSortKeySortPattern = BSON(kSortKeyField
namespace {
-// Maximum number of retries for network and replication notMaster errors (per host).
+// Maximum number of retries for network and replication NotPrimary errors (per host).
const int kMaxNumFailedHostRetryAttempts = 3;
/**
diff --git a/src/mongo/s/sessions_collection_sharded_test.cpp b/src/mongo/s/sessions_collection_sharded_test.cpp
index 0bbfc0ea2c0..89081e468c6 100644
--- a/src/mongo/s/sessions_collection_sharded_test.cpp
+++ b/src/mongo/s/sessions_collection_sharded_test.cpp
@@ -138,13 +138,13 @@ TEST_F(SessionsCollectionShardedTest, RefreshOneSessionWriteErrTest) {
response.addToErrDetails([&] {
WriteErrorDetail* errDetail = new WriteErrorDetail();
errDetail->setIndex(0);
- errDetail->setStatus({ErrorCodes::NotMaster, "not master"});
+ errDetail->setStatus({ErrorCodes::NotWritablePrimary, "not master"});
return errDetail;
}());
return response.toBSON();
});
- ASSERT_THROWS_CODE(future.default_timed_get(), DBException, ErrorCodes::NotMaster);
+ ASSERT_THROWS_CODE(future.default_timed_get(), DBException, ErrorCodes::NotWritablePrimary);
}
TEST_F(SessionsCollectionShardedTest, RemoveOneSessionOKTest) {
@@ -193,13 +193,13 @@ TEST_F(SessionsCollectionShardedTest, RemoveOneSessionWriteErrTest) {
response.addToErrDetails([&] {
WriteErrorDetail* errDetail = new WriteErrorDetail();
errDetail->setIndex(0);
- errDetail->setStatus({ErrorCodes::NotMaster, "not master"});
+ errDetail->setStatus({ErrorCodes::NotWritablePrimary, "not master"});
return errDetail;
}());
return response.toBSON();
});
- ASSERT_THROWS_CODE(future.default_timed_get(), DBException, ErrorCodes::NotMaster);
+ ASSERT_THROWS_CODE(future.default_timed_get(), DBException, ErrorCodes::NotWritablePrimary);
}
} // namespace
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index 5f74ae91cd0..bdadf82b80d 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -1452,7 +1452,7 @@ TEST_F(BatchWriteExecTest, RetryableErrorNoTxnNumber) {
request.setWriteConcern(BSONObj());
BatchedCommandResponse retryableErrResponse;
- retryableErrResponse.setStatus({ErrorCodes::NotMaster, "mock retryable error"});
+ retryableErrResponse.setStatus({ErrorCodes::NotWritablePrimary, "mock retryable error"});
auto future = launchAsync([&] {
BatchedCommandResponse response;
@@ -1494,7 +1494,7 @@ TEST_F(BatchWriteExecTest, RetryableErrorTxnNumber) {
operationContext()->setTxnNumber(5);
BatchedCommandResponse retryableErrResponse;
- retryableErrResponse.setStatus({ErrorCodes::NotMaster, "mock retryable error"});
+ retryableErrResponse.setStatus({ErrorCodes::NotWritablePrimary, "mock retryable error"});
auto future = launchAsync([&] {
BatchedCommandResponse response;
diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js
index dffcd5dd033..61dc94082e2 100644
--- a/src/mongo/shell/utils.js
+++ b/src/mongo/shell/utils.js
@@ -95,7 +95,7 @@ function isRetryableError(error) {
"HostNotFound",
"NetworkTimeout",
"SocketException",
- "NotMaster",
+ "NotWritablePrimary",
"NotMasterNoSlaveOk",
"NotMasterOrSecondary",
"PrimarySteppedDown",
diff --git a/src/mongo/util/assert_util_test.cpp b/src/mongo/util/assert_util_test.cpp
index 4275d8a907e..022593a7baf 100644
--- a/src/mongo/util/assert_util_test.cpp
+++ b/src/mongo/util/assert_util_test.cpp
@@ -87,26 +87,29 @@ TEST(AssertUtils, UassertNamedCodeWithoutCategories) {
ASSERT_NOT_CATCHES(ErrorCodes::BadValue, ExceptionForCat<ErrorCategory::Interruption>);
}
-// NotMaster - NotMasterError, RetriableError
-MONGO_STATIC_ASSERT(std::is_same<error_details::ErrorCategoriesFor<ErrorCodes::NotMaster>,
+// NotWritablePrimary - NotMasterError, RetriableError
+MONGO_STATIC_ASSERT(std::is_same<error_details::ErrorCategoriesFor<ErrorCodes::NotWritablePrimary>,
error_details::CategoryList<ErrorCategory::NotMasterError,
ErrorCategory::RetriableError>>());
-MONGO_STATIC_ASSERT(std::is_base_of<AssertionException, ExceptionFor<ErrorCodes::NotMaster>>());
+MONGO_STATIC_ASSERT(
+ std::is_base_of<AssertionException, ExceptionFor<ErrorCodes::NotWritablePrimary>>());
MONGO_STATIC_ASSERT(!std::is_base_of<ExceptionForCat<ErrorCategory::NetworkError>,
- ExceptionFor<ErrorCodes::NotMaster>>());
+ ExceptionFor<ErrorCodes::NotWritablePrimary>>());
MONGO_STATIC_ASSERT(std::is_base_of<ExceptionForCat<ErrorCategory::NotMasterError>,
- ExceptionFor<ErrorCodes::NotMaster>>());
+ ExceptionFor<ErrorCodes::NotWritablePrimary>>());
MONGO_STATIC_ASSERT(!std::is_base_of<ExceptionForCat<ErrorCategory::Interruption>,
- ExceptionFor<ErrorCodes::NotMaster>>());
+ ExceptionFor<ErrorCodes::NotWritablePrimary>>());
TEST(AssertUtils, UassertNamedCodeWithOneCategory) {
- ASSERT_CATCHES(ErrorCodes::NotMaster, DBException);
- ASSERT_CATCHES(ErrorCodes::NotMaster, AssertionException);
- ASSERT_CATCHES(ErrorCodes::NotMaster, ExceptionFor<ErrorCodes::NotMaster>);
- ASSERT_NOT_CATCHES(ErrorCodes::NotMaster, ExceptionFor<ErrorCodes::DuplicateKey>);
- ASSERT_NOT_CATCHES(ErrorCodes::NotMaster, ExceptionForCat<ErrorCategory::NetworkError>);
- ASSERT_CATCHES(ErrorCodes::NotMaster, ExceptionForCat<ErrorCategory::NotMasterError>);
- ASSERT_NOT_CATCHES(ErrorCodes::NotMaster, ExceptionForCat<ErrorCategory::Interruption>);
+ ASSERT_CATCHES(ErrorCodes::NotWritablePrimary, DBException);
+ ASSERT_CATCHES(ErrorCodes::NotWritablePrimary, AssertionException);
+ ASSERT_CATCHES(ErrorCodes::NotWritablePrimary, ExceptionFor<ErrorCodes::NotWritablePrimary>);
+ ASSERT_NOT_CATCHES(ErrorCodes::NotWritablePrimary, ExceptionFor<ErrorCodes::DuplicateKey>);
+ ASSERT_NOT_CATCHES(ErrorCodes::NotWritablePrimary,
+ ExceptionForCat<ErrorCategory::NetworkError>);
+ ASSERT_CATCHES(ErrorCodes::NotWritablePrimary, ExceptionForCat<ErrorCategory::NotMasterError>);
+ ASSERT_NOT_CATCHES(ErrorCodes::NotWritablePrimary,
+ ExceptionForCat<ErrorCategory::Interruption>);
}
// InterruptedDueToReplStateChange - NotMasterError, Interruption, RetriableError