From 783e113bbb1bfa83630222de5b74fe95530692f0 Mon Sep 17 00:00:00 2001 From: Pavi Vetriselvan Date: Mon, 24 Aug 2020 12:25:14 -0400 Subject: SERVER-50408 Change NotMaster error name to NotWritablePrimary --- src/mongo/base/error_codes.yml | 2 +- src/mongo/client/dbclient_connection.cpp | 2 +- src/mongo/client/dbclient_rs.cpp | 2 +- src/mongo/client/scanning_replica_set_monitor.cpp | 4 +-- ...able_replica_set_monitor_error_handler_test.cpp | 2 +- src/mongo/db/catalog/capped_utils.cpp | 4 +-- src/mongo/db/catalog/coll_mod.cpp | 2 +- src/mongo/db/catalog/create_collection.cpp | 4 +-- src/mongo/db/catalog/drop_collection.cpp | 4 +-- src/mongo/db/catalog/drop_database.cpp | 2 +- src/mongo/db/catalog/drop_database_test.cpp | 4 +-- src/mongo/db/catalog/drop_indexes.cpp | 2 +- src/mongo/db/catalog/rename_collection.cpp | 2 +- src/mongo/db/catalog/rename_collection_test.cpp | 4 +-- src/mongo/db/cloner.cpp | 4 +-- src/mongo/db/commands/collection_to_capped.cpp | 2 +- src/mongo/db/commands/create_indexes.cpp | 4 +-- src/mongo/db/commands/drop_indexes.cpp | 2 +- src/mongo/db/commands/find_and_modify.cpp | 2 +- src/mongo/db/commands/oplog_note.cpp | 2 +- src/mongo/db/commands/shutdown_d.cpp | 4 +-- src/mongo/db/error_labels_test.cpp | 19 +++++++------- src/mongo/db/index_builds_coordinator.cpp | 6 ++--- src/mongo/db/index_builds_coordinator_mongod.cpp | 2 +- src/mongo/db/mongod_main.cpp | 4 +-- src/mongo/db/op_observer_impl_test.cpp | 3 ++- src/mongo/db/read_concern_mongod.cpp | 4 +-- src/mongo/db/repl/apply_ops.cpp | 2 +- src/mongo/db/repl/oplog.cpp | 2 +- src/mongo/db/repl/primary_only_service.cpp | 4 +-- src/mongo/db/repl/primary_only_service.h | 2 +- src/mongo/db/repl/primary_only_service_test.cpp | 8 +++--- src/mongo/db/repl/repl_set_commands.cpp | 4 +-- src/mongo/db/repl/replication_coordinator.h | 2 +- src/mongo/db/repl/replication_coordinator_impl.cpp | 14 ++++++----- ...lication_coordinator_impl_heartbeat_v1_test.cpp | 4 +-- .../replication_coordinator_impl_reconfig_test.cpp | 8 +++--- .../db/repl/replication_coordinator_impl_test.cpp | 8 +++--- src/mongo/db/repl/tenant_oplog_applier.cpp | 4 +-- src/mongo/db/repl/topology_coordinator.cpp | 2 +- src/mongo/db/repl/topology_coordinator.h | 4 +-- src/mongo/db/repl/topology_coordinator_v1_test.cpp | 2 +- .../sharding_catalog_manager_shard_operations.cpp | 2 +- .../db/s/migration_chunk_cloner_source_legacy.cpp | 2 +- src/mongo/db/s/migration_destination_manager.cpp | 2 +- src/mongo/db/s/set_shard_version_command.cpp | 2 +- .../db/s/shard_server_catalog_cache_loader.cpp | 4 +-- .../transaction_coordinator_futures_util_test.cpp | 6 ++--- src/mongo/db/s/transaction_coordinator_service.cpp | 9 ++++--- src/mongo/db/s/transaction_coordinator_service.h | 4 +-- .../db/s/transaction_coordinator_service_test.cpp | 6 ++--- src/mongo/db/service_entry_point_common.cpp | 17 +++++++------ src/mongo/db/sessions_collection_rs.cpp | 4 +-- src/mongo/db/system_index.cpp | 2 +- src/mongo/db/transaction_participant.cpp | 8 +++--- src/mongo/db/transaction_participant.h | 2 +- src/mongo/db/transaction_participant_test.cpp | 20 ++++++++------- src/mongo/db/write_concern.h | 2 +- src/mongo/rpc/op_msg_integration_test.cpp | 14 ++++++----- src/mongo/s/async_requests_sender.cpp | 2 +- src/mongo/s/catalog/dist_lock_catalog_impl.cpp | 2 +- .../s/catalog/dist_lock_catalog_impl_test.cpp | 4 +-- src/mongo/s/catalog/replset_dist_lock_manager.cpp | 6 ++--- .../s/catalog/replset_dist_lock_manager_test.cpp | 4 +-- src/mongo/s/catalog/sharding_catalog_client.h | 2 +- src/mongo/s/catalog/sharding_catalog_client_impl.h | 2 +- src/mongo/s/catalog/sharding_catalog_test.cpp | 23 +++++++++-------- .../catalog/sharding_catalog_write_retry_test.cpp | 20 ++++++++------- src/mongo/s/client/shard_registry.h | 2 +- src/mongo/s/commands/batch_downconvert.cpp | 3 ++- src/mongo/s/query/async_results_merger.cpp | 2 +- src/mongo/s/sessions_collection_sharded_test.cpp | 8 +++--- src/mongo/s/write_ops/batch_write_exec_test.cpp | 4 +-- src/mongo/shell/utils.js | 2 +- src/mongo/util/assert_util_test.cpp | 29 ++++++++++++---------- 75 files changed, 203 insertions(+), 186 deletions(-) (limited to 'src/mongo') diff --git a/src/mongo/base/error_codes.yml b/src/mongo/base/error_codes.yml index 676d675dc00..59e2ef6f696 100644 --- a/src/mongo/base/error_codes.yml +++ b/src/mongo/base/error_codes.yml @@ -405,7 +405,7 @@ error_codes: - {code: 9001,name: SocketException,categories: [NetworkError,RetriableError]} - {code: 9996,name: OBSOLETE_RecvStaleConfig} - {code: 10003,name: CannotGrowDocumentInCappedNamespace} - - {code: 10107,name: NotMaster,categories: [NotMasterError,RetriableError]} + - {code: 10107,name: NotWritablePrimary,categories: [NotMasterError,RetriableError]} - {code: 10334,name: BSONObjectTooLarge} - {code: 11000,name: DuplicateKey,extra: DuplicateKeyErrorInfo} - {code: 11600,name: InterruptedAtShutdown,categories: [Interruption,ShutdownError,CancelationError,RetriableError]} diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp index 66714d742de..a8b771be019 100644 --- a/src/mongo/client/dbclient_connection.cpp +++ b/src/mongo/client/dbclient_connection.cpp @@ -819,7 +819,7 @@ void DBClientConnection::handleNotMasterResponse(const BSONObj& replyBody, auto monitor = ReplicaSetMonitor::get(_parentReplSetName); if (monitor) { monitor->failedHost(_serverAddress, - {ErrorCodes::NotMaster, + {ErrorCodes::NotWritablePrimary, str::stream() << "got not master from: " << _serverAddress << " of repl set: " << _parentReplSetName}); } diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp index b9871482aac..0f3df602910 100644 --- a/src/mongo/client/dbclient_rs.cpp +++ b/src/mongo/client/dbclient_rs.cpp @@ -707,7 +707,7 @@ void DBClientReplicaSet::isntMaster() { // monitor doesn't exist. _rsm->failedHost( _masterHost, - {ErrorCodes::NotMaster, str::stream() << "got not master for: " << _masterHost}); + {ErrorCodes::NotWritablePrimary, str::stream() << "got not master for: " << _masterHost}); resetMaster(); } diff --git a/src/mongo/client/scanning_replica_set_monitor.cpp b/src/mongo/client/scanning_replica_set_monitor.cpp index ee5a1144861..ed6058bcc76 100644 --- a/src/mongo/client/scanning_replica_set_monitor.cpp +++ b/src/mongo/client/scanning_replica_set_monitor.cpp @@ -850,7 +850,7 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa // since they don't have the same ordering with pv1 electionId. if (reply.configVersion < _set->configVersion) { return { - ErrorCodes::NotMaster, + ErrorCodes::NotWritablePrimary, str::stream() << "Node " << from << " believes it is primary, but its config version " << reply.configVersion << " is older than the most recent config version " << _set->configVersion}; @@ -863,7 +863,7 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa if (reply.configVersion == _set->configVersion && _set->maxElectionId.isSet() && _set->maxElectionId.compare(reply.electionId) > 0) { return { - ErrorCodes::NotMaster, + ErrorCodes::NotWritablePrimary, str::stream() << "Node " << from << " believes it is primary, but its election id " << reply.electionId << " is older than the most recent election id " << _set->maxElectionId}; diff --git a/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp b/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp index ef0eb80e61a..497d3d04f2a 100644 --- a/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp +++ b/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp @@ -111,7 +111,7 @@ public: ErrorCodes::NotMasterOrSecondary, ErrorCodes::PrimarySteppedDown, ErrorCodes::ShutdownInProgress, - ErrorCodes::NotMaster, + ErrorCodes::NotWritablePrimary, ErrorCodes::NotMasterNoSlaveOk}; inline static const std::string kSetName = "setName"; diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp index 5dd3533fc91..81c9661c819 100644 --- a/src/mongo/db/catalog/capped_utils.cpp +++ b/src/mongo/db/catalog/capped_utils.cpp @@ -63,7 +63,7 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, collectionName); if (userInitiatedWritesAndNotPrimary) { - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while truncating collection: " << collectionName); } @@ -249,7 +249,7 @@ void convertToCapped(OperationContext* opCtx, const NamespaceString& ns, long lo bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns); - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while converting " << ns << " to a capped collection", !userInitiatedWritesAndNotPrimary); diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp index 8a75e773a89..e46ec8fd298 100644 --- a/src/mongo/db/catalog/coll_mod.cpp +++ b/src/mongo/db/catalog/coll_mod.cpp @@ -385,7 +385,7 @@ Status _collModInternal(OperationContext* opCtx, !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss); if (userInitiatedWritesAndNotPrimary) { - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while setting collection options on " << nss); } diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp index 43303c64a1e..2fef0686a47 100644 --- a/src/mongo/db/catalog/create_collection.cpp +++ b/src/mongo/db/catalog/create_collection.cpp @@ -70,7 +70,7 @@ Status _createView(OperationContext* opCtx, if (opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) { - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while creating collection " << nss); } @@ -130,7 +130,7 @@ Status _createCollection(OperationContext* opCtx, if (opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) { - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while creating collection " << nss); } diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp index c3470d56941..3ed987e475f 100644 --- a/src/mongo/db/catalog/drop_collection.cpp +++ b/src/mongo/db/catalog/drop_collection.cpp @@ -60,7 +60,7 @@ Status _checkNssAndReplState(OperationContext* opCtx, const Collection* coll) { if (opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, coll->ns())) { - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while dropping collection " << coll->ns()); } @@ -103,7 +103,7 @@ Status _dropView(OperationContext* opCtx, if (opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, collectionName)) { - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while dropping collection " << collectionName); } diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp index 1d7e149adec..61c5dd953bd 100644 --- a/src/mongo/db/catalog/drop_database.cpp +++ b/src/mongo/db/catalog/drop_database.cpp @@ -71,7 +71,7 @@ Status _checkNssAndReplState(OperationContext* opCtx, Database* db, const std::s opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName); if (userInitiatedWritesAndNotPrimary) { - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while dropping database " << dbName); } diff --git a/src/mongo/db/catalog/drop_database_test.cpp b/src/mongo/db/catalog/drop_database_test.cpp index da0fc666faa..8ee0a7bf6b2 100644 --- a/src/mongo/db/catalog/drop_database_test.cpp +++ b/src/mongo/db/catalog/drop_database_test.cpp @@ -213,12 +213,12 @@ TEST_F(DropDatabaseTest, DropDatabaseReturnsNamespaceNotFoundIfDatabaseDoesNotEx dropDatabaseForApplyOps(_opCtx.get(), _nss.db().toString())); } -TEST_F(DropDatabaseTest, DropDatabaseReturnsNotMasterIfNotPrimary) { +TEST_F(DropDatabaseTest, DropDatabaseReturnsNotWritablePrimaryIfNotPrimary) { _createCollection(_opCtx.get(), _nss); ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY)); ASSERT_TRUE(_opCtx->writesAreReplicated()); ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.db())); - ASSERT_EQUALS(ErrorCodes::NotMaster, + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, dropDatabaseForApplyOps(_opCtx.get(), _nss.db().toString())); } diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp index f44a977ae49..08def9db579 100644 --- a/src/mongo/db/catalog/drop_indexes.cpp +++ b/src/mongo/db/catalog/drop_indexes.cpp @@ -79,7 +79,7 @@ Status checkReplState(OperationContext* opCtx, bool writesAreReplicatedAndNotPrimary = opCtx->writesAreReplicated() && !canAcceptWrites; if (writesAreReplicatedAndNotPrimary) { - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while dropping indexes on database " << dbAndUUID.db() << " with collection " << dbAndUUID.uuid()); } diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp index 111629b3fb4..08383c041bc 100644 --- a/src/mongo/db/catalog/rename_collection.cpp +++ b/src/mongo/db/catalog/rename_collection.cpp @@ -93,7 +93,7 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx, auto replCoord = repl::ReplicationCoordinator::get(opCtx); if (opCtx->writesAreReplicated() && !replCoord->canAcceptWritesFor(opCtx, source)) - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while renaming collection " << source << " to " << target); diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp index 31895d624f8..726265f469f 100644 --- a/src/mongo/db/catalog/rename_collection_test.cpp +++ b/src/mongo/db/catalog/rename_collection_test.cpp @@ -529,12 +529,12 @@ TEST_F(RenameCollectionTest, ASSERT_TRUE(_collectionExists(_opCtx.get(), dropPendingNss)); } -TEST_F(RenameCollectionTest, RenameCollectionReturnsNotMasterIfNotPrimary) { +TEST_F(RenameCollectionTest, RenameCollectionReturnsNotWritablePrimaryIfNotPrimary) { _createCollection(_opCtx.get(), _sourceNss); ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY)); ASSERT_TRUE(_opCtx->writesAreReplicated()); ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _sourceNss.db())); - ASSERT_EQUALS(ErrorCodes::NotMaster, + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, renameCollection(_opCtx.get(), _sourceNss, _targetNss, {})); } diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index 4d445043f6c..b0f8954adc6 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -95,7 +95,7 @@ struct Cloner::Fun { void operator()(DBClientCursorBatchIterator& i) { boost::optional dbLock; dbLock.emplace(opCtx, _dbName, MODE_X); - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while cloning collection " << nss, !opCtx->writesAreReplicated() || repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); @@ -529,7 +529,7 @@ Status Cloner::copyDb(OperationContext* opCtx, } uassert( - ErrorCodes::NotMaster, + ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while cloning database " << dBName << " (after getting list of collections to clone)", !opCtx->writesAreReplicated() || diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp index 353a5fe51c3..1ac73e3da17 100644 --- a/src/mongo/db/commands/collection_to_capped.cpp +++ b/src/mongo/db/commands/collection_to_capped.cpp @@ -119,7 +119,7 @@ public: Lock::CollectionLock collLock(opCtx, toNs, MODE_X); if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, toNs)) { - uasserted(ErrorCodes::NotMaster, + uasserted(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while cloning collection " << from << " to " << to << " (as capped)"); } diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp index 016f6d3cb02..e4a66da2d4b 100644 --- a/src/mongo/db/commands/create_indexes.cpp +++ b/src/mongo/db/commands/create_indexes.cpp @@ -513,7 +513,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx, Lock::DBLock dbLock(opCtx, ns.db(), MODE_IS); checkDatabaseShardingState(opCtx, ns); if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)) { - uasserted(ErrorCodes::NotMaster, + uasserted(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while creating indexes in " << ns.ns()); } @@ -662,7 +662,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx, if (indexBuildsCoord->abortIndexBuildByBuildUUID( opCtx, buildUUID, IndexBuildAction::kPrimaryAbort, abortReason)) { LOGV2(20446, - "Index build: aborted due to NotMaster error", + "Index build: aborted due to NotPrimary error", "buildUUID"_attr = buildUUID); } else { // The index build may already be in the midst of tearing down. diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp index 5252df3c66f..c7b1af95ef2 100644 --- a/src/mongo/db/commands/drop_indexes.cpp +++ b/src/mongo/db/commands/drop_indexes.cpp @@ -107,7 +107,7 @@ public: AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { // Even though reIndex is a standalone-only command, this will return that the command is // allowed on secondaries so that it will fail with a more useful error message to the user - // rather than with a NotMaster error. + // rather than with a NotWritablePrimary error. return AllowedOnSecondary::kAlways; } virtual bool supportsWriteConcern(const BSONObj& cmd) const override { diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 161e44d6dbd..62b4ca577bb 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -183,7 +183,7 @@ void appendCommandResponse(const PlanExecutor* exec, } void assertCanWrite(OperationContext* opCtx, const NamespaceString& nsString) { - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while running findAndModify command on collection " << nsString.ns(), repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nsString)); diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp index b0375a47ddc..74aeecbbc64 100644 --- a/src/mongo/db/commands/oplog_note.cpp +++ b/src/mongo/db/commands/oplog_note.cpp @@ -66,7 +66,7 @@ Status _performNoopWrite(OperationContext* opCtx, BSONObj msgObj, StringData not // Its a proxy for being a primary passing "local" will cause it to return true on secondary if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) { - return {ErrorCodes::NotMaster, "Not a primary"}; + return {ErrorCodes::NotWritablePrimary, "Not a primary"}; } writeConflictRetry(opCtx, note, NamespaceString::kRsOplogNamespace.ns(), [&opCtx, &msgObj] { diff --git a/src/mongo/db/commands/shutdown_d.cpp b/src/mongo/db/commands/shutdown_d.cpp index ac5b5bf6f4a..6dc11a8220a 100644 --- a/src/mongo/db/commands/shutdown_d.cpp +++ b/src/mongo/db/commands/shutdown_d.cpp @@ -61,8 +61,8 @@ Status stepDownForShutdown(OperationContext* opCtx, LOGV2(4695100, "hangInShutdownAfterStepdown failpoint enabled"); hangInShutdownAfterStepdown.pauseWhileSet(opCtx); } - } catch (const ExceptionFor&) { - // Ignore not master errors. + } catch (const ExceptionFor&) { + // Ignore NotWritablePrimary errors. } catch (const DBException& e) { if (!forceShutdown) { return e.toStatus(); diff --git a/src/mongo/db/error_labels_test.cpp b/src/mongo/db/error_labels_test.cpp index 6a176c495ea..a9d4cc27f0c 100644 --- a/src/mongo/db/error_labels_test.cpp +++ b/src/mongo/db/error_labels_test.cpp @@ -51,8 +51,9 @@ TEST(IsTransientTransactionErrorTest, NetworkErrorsAreNotTransientOnCommit) { } TEST(IsTransientTransactionErrorTest, RetryableWriteErrorsAreNotTransientOnAbort) { - ASSERT_FALSE(isTransientTransactionError( - ErrorCodes::NotMaster, false /* hasWriteConcernError */, true /* isCommitOrAbort */)); + ASSERT_FALSE(isTransientTransactionError(ErrorCodes::NotWritablePrimary, + false /* hasWriteConcernError */, + true /* isCommitOrAbort */)); } TEST(IsTransientTransactionErrorTest, @@ -129,7 +130,7 @@ TEST_F(ErrorLabelBuilderTest, NonTransientTransactionErrorsHaveNoTransientTransa sessionInfo.setAutocommit(false); std::string commandName = "commitTransaction"; ErrorLabelBuilder builder( - opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false); + opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false); ASSERT_FALSE(builder.isTransientTransactionError()); } @@ -147,7 +148,7 @@ TEST_F(ErrorLabelBuilderTest, NonRetryableWritesHaveNoRetryableWriteErrorLabel) OperationSessionInfoFromClient sessionInfo; std::string commandName = "insert"; ErrorLabelBuilder builder( - opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false); + opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false); // Test regular writes. ASSERT_FALSE(builder.isRetryableWriteError()); @@ -172,7 +173,7 @@ TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsHaveRetryableWriteErrorLabel) sessionInfo.setTxnNumber(1); std::string commandName = "update"; ErrorLabelBuilder builder( - opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false); + opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false); ASSERT_TRUE(builder.isRetryableWriteError()); } @@ -182,7 +183,7 @@ TEST_F(ErrorLabelBuilderTest, sessionInfo.setTxnNumber(1); std::string commandName = "update"; ErrorLabelBuilder builder( - opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, true); + opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, true); ASSERT_FALSE(builder.isRetryableWriteError()); } @@ -222,17 +223,17 @@ TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsOnCommitAbortHaveRetryableWrit commandName = "commitTransaction"; ErrorLabelBuilder commitBuilder( - opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false); + opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false); ASSERT_TRUE(commitBuilder.isRetryableWriteError()); commandName = "coordinateCommitTransaction"; ErrorLabelBuilder coordinateCommitBuilder( - opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false); + opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false); ASSERT_TRUE(coordinateCommitBuilder.isRetryableWriteError()); commandName = "abortTransaction"; ErrorLabelBuilder abortBuilder( - opCtx(), sessionInfo, commandName, ErrorCodes::NotMaster, boost::none, false); + opCtx(), sessionInfo, commandName, ErrorCodes::NotWritablePrimary, boost::none, false); ASSERT_TRUE(abortBuilder.isRetryableWriteError()); } diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp index d62ddd23692..c8caafc318f 100644 --- a/src/mongo/db/index_builds_coordinator.cpp +++ b/src/mongo/db/index_builds_coordinator.cpp @@ -1245,7 +1245,7 @@ bool IndexBuildsCoordinator::abortIndexBuildByBuildUUID(OperationContext* opCtx, if (IndexBuildAction::kPrimaryAbort == signalAction && !replCoord->canAcceptWritesFor(opCtx, dbAndUUID)) { - uassertStatusOK({ErrorCodes::NotMaster, + uassertStatusOK({ErrorCodes::NotWritablePrimary, str::stream() << "Unable to abort index build because we are not primary: " << buildUUID}); @@ -2064,7 +2064,7 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild // so we must fail the index build. During initial sync, there is no commit timestamp set. if (replSetAndNotPrimary && indexBuildOptions.applicationMode != ApplicationMode::kInitialSync) { - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, str::stream() << "Replication state changed while setting up the index build: " << replState->buildUUID, !startTimestamp.isNull()); @@ -2786,7 +2786,7 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide // commit, trigger a self-abort. if (!isMaster && IndexBuildAction::kSinglePhaseCommit == action) { uassertStatusOK( - {ErrorCodes::NotMaster, + {ErrorCodes::NotWritablePrimary, str::stream() << "Unable to commit index build because we are no longer primary: " << replState->buildUUID}); } diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp index 0d08ee0e8de..f503f4f572d 100644 --- a/src/mongo/db/index_builds_coordinator_mongod.cpp +++ b/src/mongo/db/index_builds_coordinator_mongod.cpp @@ -174,7 +174,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx, Lock::GlobalLock globalLk(opCtx, MODE_IX); auto replCoord = repl::ReplicationCoordinator::get(opCtx); - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, "Not primary while waiting to start an index build", replCoord->canAcceptWritesFor(opCtx, nssOrUuid)); } diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp index 658ae826cad..c04117cfae3 100644 --- a/src/mongo/db/mongod_main.cpp +++ b/src/mongo/db/mongod_main.cpp @@ -506,8 +506,8 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { "error"_attr = redact(status)); if (status == ErrorCodes::AuthSchemaIncompatible) { exitCleanly(EXIT_NEED_UPGRADE); - } else if (status == ErrorCodes::NotMaster) { - // Try creating the indexes if we become master. If we do not become master, + } else if (status == ErrorCodes::NotWritablePrimary) { + // Try creating the indexes if we become primary. If we do not become primary, // the master will create the indexes and we will replicate them. } else { quickExit(EXIT_FAILURE); diff --git a/src/mongo/db/op_observer_impl_test.cpp b/src/mongo/db/op_observer_impl_test.cpp index 856f650f6c3..51614c4b62e 100644 --- a/src/mongo/db/op_observer_impl_test.cpp +++ b/src/mongo/db/op_observer_impl_test.cpp @@ -456,7 +456,8 @@ TEST_F(OpObserverTest, MustBePrimaryToWriteOplogEntries) { WriteUnitOfWork wunit(opCtx.get()); // No-op writes should be prohibited. - ASSERT_THROWS_CODE(opObserver.onOpMessage(opCtx.get(), {}), DBException, ErrorCodes::NotMaster); + ASSERT_THROWS_CODE( + opObserver.onOpMessage(opCtx.get(), {}), DBException, ErrorCodes::NotWritablePrimary); } /** diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp index 3663b9cdcc5..30b76024344 100644 --- a/src/mongo/db/read_concern_mongod.cpp +++ b/src/mongo/db/read_concern_mongod.cpp @@ -295,7 +295,7 @@ Status waitForReadConcernImpl(OperationContext* opCtx, } if (!replCoord->getMemberState().primary()) { - return {ErrorCodes::NotMaster, + return {ErrorCodes::NotWritablePrimary, "cannot satisfy linearizable read concern on non-primary node"}; } } @@ -442,7 +442,7 @@ Status waitForLinearizableReadConcernImpl(OperationContext* opCtx, const int rea { AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) { - return {ErrorCodes::NotMaster, + return {ErrorCodes::NotWritablePrimary, "No longer primary when waiting for linearizable read concern"}; } diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp index 0caf8565939..d9c8565507f 100644 --- a/src/mongo/db/repl/apply_ops.cpp +++ b/src/mongo/db/repl/apply_ops.cpp @@ -412,7 +412,7 @@ Status applyOps(OperationContext* opCtx, opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName); if (userInitiatedWritesAndNotPrimary) - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while applying ops to database " << dbName); if (auto preCondition = info.getPreCondition()) { diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index c0a153252eb..185c9aa04f5 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -226,7 +226,7 @@ void _logOpsInner(OperationContext* opCtx, ss << "(" << record.id << ", " << redact(record.data.toBson()) << ") "; } ss << "]"; - uasserted(ErrorCodes::NotMaster, ss); + uasserted(ErrorCodes::NotWritablePrimary, ss); } // TODO (SERVER-50598): Not allow tenant migration donor to write "commitIndexBuild" and diff --git a/src/mongo/db/repl/primary_only_service.cpp b/src/mongo/db/repl/primary_only_service.cpp index 7bef6832beb..4be2ac36730 100644 --- a/src/mongo/db/repl/primary_only_service.cpp +++ b/src/mongo/db/repl/primary_only_service.cpp @@ -110,7 +110,7 @@ public: // ensure the OpCtx starts off immediately interrupted. if (!clientState.allowOpCtxWhenServiceNotRunning && !clientState.primaryOnlyService->isRunning()) { - opCtx->markKilled(ErrorCodes::NotMaster); + opCtx->markKilled(ErrorCodes::NotWritablePrimary); } } void onDestroyOperationContext(OperationContext* opCtx) override {} @@ -373,7 +373,7 @@ std::shared_ptr PrimaryOnlyService::getOrCreateIns uassertStatusOK(_rebuildStatus); } uassert( - ErrorCodes::NotMaster, + ErrorCodes::NotWritablePrimary, str::stream() << "Not Primary when trying to create a new instance of PrimaryOnlyService " << getServiceName(), _state == State::kRunning); diff --git a/src/mongo/db/repl/primary_only_service.h b/src/mongo/db/repl/primary_only_service.h index f1742a2eac9..791426a432f 100644 --- a/src/mongo/db/repl/primary_only_service.h +++ b/src/mongo/db/repl/primary_only_service.h @@ -235,7 +235,7 @@ protected: * new Instance (by calling constructInstance()), registers it in _instances, and returns it. * It is illegal to call this more than once with 'initialState' documents that have the same * _id but are otherwise not completely identical. - * Throws NotMaster if the node is not currently primary. + * Throws NotWritablePrimary if the node is not currently primary. */ std::shared_ptr getOrCreateInstance(BSONObj initialState); diff --git a/src/mongo/db/repl/primary_only_service_test.cpp b/src/mongo/db/repl/primary_only_service_test.cpp index 076a5613181..bb75b4f8ba1 100644 --- a/src/mongo/db/repl/primary_only_service_test.cpp +++ b/src/mongo/db/repl/primary_only_service_test.cpp @@ -373,7 +373,7 @@ TEST_F(PrimaryOnlyServiceTest, CreateWhenNotPrimary) { ASSERT_THROWS_CODE( TestService::Instance::getOrCreate(_service, BSON("_id" << 0 << "state" << 0)), DBException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); } TEST_F(PrimaryOnlyServiceTest, CreateWithoutID) { @@ -484,7 +484,7 @@ TEST_F(PrimaryOnlyServiceTest, StepDownBeforeRebuildingInstances) { // Let the previous stepUp attempt continue and realize that the node has since stepped down. PrimaryOnlyServiceHangBeforeRebuildingInstances.setMode(FailPoint::off); - ASSERT_THROWS_CODE(getInstanceFuture.get(), DBException, ErrorCodes::NotMaster); + ASSERT_THROWS_CODE(getInstanceFuture.get(), DBException, ErrorCodes::NotWritablePrimary); // Now do another stepUp that is allowed to complete this time. stateOneFPTimesEntered = TestServiceHangDuringStateOne.setMode(FailPoint::alwaysOn); @@ -536,7 +536,7 @@ TEST_F(PrimaryOnlyServiceTest, RecreateInstancesFails) { ASSERT_THROWS_CODE( TestService::Instance::getOrCreate(_service, BSON("_id" << 0 << "state" << 0)), DBException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); // Allow the next stepUp to succeed. PrimaryOnlyServiceFailRebuildingInstances.setMode(FailPoint::off); @@ -564,5 +564,5 @@ TEST_F(PrimaryOnlyServiceTest, OpCtxInterruptedByStepdown) { stepDown(); TestServiceHangBeforeWritingStateDoc.setMode(FailPoint::off); - ASSERT_EQ(ErrorCodes::NotMaster, instance->getCompletionFuture().getNoThrow()); + ASSERT_EQ(ErrorCodes::NotWritablePrimary, instance->getCompletionFuture().getNoThrow()); } diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp index e35ff6199d5..a02a4d32ccb 100644 --- a/src/mongo/db/repl/repl_set_commands.cpp +++ b/src/mongo/db/repl/repl_set_commands.cpp @@ -431,8 +431,8 @@ public: // Convert the error code to be more specific. uasserted(ErrorCodes::CurrentConfigNotCommittedYet, status.reason()); } else if (status == ErrorCodes::PrimarySteppedDown) { - // Return NotMaster since the command has no side effect yet. - status = {ErrorCodes::NotMaster, status.reason()}; + // Return NotWritablePrimary since the command has no side effect yet. + status = {ErrorCodes::NotWritablePrimary, status.reason()}; } uassertStatusOK(status); } diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h index de32ec0783e..919f5a5e5a1 100644 --- a/src/mongo/db/repl/replication_coordinator.h +++ b/src/mongo/db/repl/replication_coordinator.h @@ -229,7 +229,7 @@ public: * the data has been sufficiently replicated * ErrorCodes::ExceededTimeLimit if the opCtx->getMaxTimeMicrosRemaining is reached before * the data has been sufficiently replicated - * ErrorCodes::NotMaster if the node is not Primary/Master + * ErrorCodes::NotWritablePrimary if the node is not a writable primary * ErrorCodes::UnknownReplWriteConcern if the writeConcern.wMode contains a write concern * mode that is not known * ErrorCodes::ShutdownInProgress if we are mid-shutdown diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index fa0d3f6d812..ffc93f5fdf4 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -2296,7 +2296,7 @@ StatusWith ReplicationCoordinatorImpl::getLatestWriteOpTime(OperationCon Lock::GlobalLock globalLock(opCtx, MODE_IS); // Check if the node is primary after acquiring global IS lock. if (!canAcceptNonLocalWrites()) { - return {ErrorCodes::NotMaster, "Not primary so can't get latest write optime"}; + return {ErrorCodes::NotWritablePrimary, "Not primary so can't get latest write optime"}; } auto oplog = LocalOplogInfo::get(opCtx)->getCollection(); if (!oplog) { @@ -2532,7 +2532,9 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx, // Note this check is inherently racy - it's always possible for the node to stepdown from some // other path before we acquire the global exclusive lock. This check is just to try to save us // from acquiring the global X lock unnecessarily. - uassert(ErrorCodes::NotMaster, "not primary so can't step down", getMemberState().primary()); + uassert(ErrorCodes::NotWritablePrimary, + "not primary so can't step down", + getMemberState().primary()); CurOpFailpointHelpers::waitWhileFailPointEnabled( &stepdownHangBeforeRSTLEnqueue, opCtx, "stepdownHangBeforeRSTLEnqueue"); @@ -2880,7 +2882,7 @@ Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext if (opCtx->inMultiDocumentTransaction()) { if (!_readWriteAbility->canAcceptNonLocalWrites_UNSAFE()) { - return Status(ErrorCodes::NotMaster, + return Status(ErrorCodes::NotWritablePrimary, "Multi-document transactions are only allowed on replica set primaries."); } } @@ -3020,7 +3022,7 @@ void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result, } if (commitmentStatus) { - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, "commitmentStatus is only supported on primary.", _readWriteAbility->canAcceptNonLocalWrites(lock)); auto configWriteConcern = _getConfigReplicationWriteConcern(); @@ -3297,7 +3299,7 @@ Status ReplicationCoordinatorImpl::doReplSetReconfig(OperationContext* opCtx, if (!force && !_readWriteAbility->canAcceptNonLocalWrites(lk)) { return Status( - ErrorCodes::NotMaster, + ErrorCodes::NotWritablePrimary, str::stream() << "Safe reconfig is only allowed on a writable PRIMARY. Current state is " << _getMemberState_inlock().toString()); @@ -3469,7 +3471,7 @@ Status ReplicationCoordinatorImpl::doReplSetReconfig(OperationContext* opCtx, { Lock::GlobalLock globalLock(opCtx, LockMode::MODE_IX); if (!force && !_readWriteAbility->canAcceptNonLocalWrites(opCtx)) { - return {ErrorCodes::NotMaster, "Stepped down when persisting new config"}; + return {ErrorCodes::NotWritablePrimary, "Stepped down when persisting new config"}; } // Don't write no-op for internal and external force reconfig. diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp index d2fccde344a..1a4c843523a 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp @@ -1216,7 +1216,7 @@ TEST_F(ReplCoordHBV1Test, LastCommittedOpTimeOnlyUpdatesFromHeartbeatIfNotInStar * Tests assert that stepdown via heartbeat completed, and the tests that send the new config via * heartbeat assert that the new config was stored. Tests that send the new config with the * replSetReconfig command don't check that it was stored; if the stepdown finished first then the - * replSetReconfig was rejected with a NotMaster error. + * replSetReconfig was rejected with a NotWritablePrimary error. */ class HBStepdownAndReconfigTest : public ReplCoordHBV1Test { protected: @@ -1360,7 +1360,7 @@ Future HBStepdownAndReconfigTest::startReconfigCommand() { BSONObjBuilder result; auto status = Status::OK(); try { - // OK for processReplSetReconfig to return, throw NotMaster-like error, or succeed. + // OK for processReplSetReconfig to return, throw NotPrimary-like error, or succeed. status = coord->processReplSetReconfig(opCtx.get(), args, &result); } catch (const DBException&) { status = exceptionToStatus(); diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp index 9d23c302774..cd1a089d177 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp @@ -90,7 +90,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenReconfigReceivedWhileSecondary) { ReplSetReconfigArgs args; args.force = false; const auto opCtx = makeOperationContext(); - ASSERT_EQUALS(ErrorCodes::NotMaster, + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); ASSERT_TRUE(result.obj().isEmpty()); } @@ -123,7 +123,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningSafeReconfigWhileInDrainMod ReplSetReconfigArgs args; args.force = false; auto status = getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result); - ASSERT_EQUALS(ErrorCodes::NotMaster, status); + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status); ASSERT_STRING_CONTAINS(status.reason(), "Safe reconfig is only allowed on a writable PRIMARY."); ASSERT_TRUE(result.obj().isEmpty()); } @@ -796,7 +796,7 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary << BSON("_id" << 2 << "host" << "node2:12345"))); const auto opCtx = makeOperationContext(); - ASSERT_EQUALS(ErrorCodes::NotMaster, + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result)); // forced should succeed @@ -1469,7 +1469,7 @@ TEST_F(ReplCoordReconfigTest, StepdownShouldInterruptConfigWrite) { respondToAllHeartbeats(); } - ASSERT_EQ(status.code(), ErrorCodes::NotMaster); + ASSERT_EQ(status.code(), ErrorCodes::NotWritablePrimary); ASSERT_EQ(status.reason(), "Stepped down when persisting new config"); } diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp index ff034802740..f19ee0d2bd0 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp @@ -1212,8 +1212,8 @@ TEST_F(ReplCoordTest, } TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWriteConcern) { - // Test that a thread blocked in awaitReplication will be woken up and return NotMaster - // if the node steps down while it is waiting. + // Test that a thread blocked in awaitReplication will be woken up and return PrimarySteppedDown + // (a NotMasterError) if the node steps down while it is waiting. assertStartSuccess(BSON("_id" << "mySet" << "version" << 2 << "members" @@ -1917,7 +1917,7 @@ TEST_F(StepDownTestWithUnelectableNode, ASSERT_TRUE(repl->getMemberState().secondary()); } -TEST_F(StepDownTest, NodeReturnsNotMasterWhenAskedToStepDownAsANonPrimaryNode) { +TEST_F(StepDownTest, NodeReturnsNotWritablePrimaryWhenAskedToStepDownAsANonPrimaryNode) { const auto opCtx = makeOperationContext(); OpTimeWithTermOne optime1(100, 1); @@ -1930,7 +1930,7 @@ TEST_F(StepDownTest, NodeReturnsNotMasterWhenAskedToStepDownAsANonPrimaryNode) { ASSERT_THROWS_CODE( getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(0)), AssertionException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); ASSERT_TRUE(getReplCoord()->getMemberState().secondary()); } diff --git a/src/mongo/db/repl/tenant_oplog_applier.cpp b/src/mongo/db/repl/tenant_oplog_applier.cpp index 520c4f5b0f8..f55ce6a539f 100644 --- a/src/mongo/db/repl/tenant_oplog_applier.cpp +++ b/src/mongo/db/repl/tenant_oplog_applier.cpp @@ -439,8 +439,8 @@ Status TenantOplogApplier::_applyOplogEntryOrGroupedInserts( OperationContext* opCtx, const OplogEntryOrGroupedInserts& entryOrGroupedInserts, OplogApplication::Mode oplogApplicationMode) { - // We must ensure the opCtx uses replicated writes, because that will ensure we get a NotMaster - // error if a stepdown occurs. + // We must ensure the opCtx uses replicated writes, because that will ensure we get a + // NotWritablePrimary error if a stepdown occurs. invariant(opCtx->writesAreReplicated()); // Ensure context matches that of _applyOplogBatchPerWorker. diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp index f99dbbda5d5..4cc30799822 100644 --- a/src/mongo/db/repl/topology_coordinator.cpp +++ b/src/mongo/db/repl/topology_coordinator.cpp @@ -1665,7 +1665,7 @@ TopologyCoordinator::prepareForStepDownAttempt() { } if (_leaderMode == LeaderMode::kNotLeader) { - return Status{ErrorCodes::NotMaster, "This node is not a primary."}; + return Status{ErrorCodes::NotWritablePrimary, "This node is not a primary."}; } invariant(_leaderMode == LeaderMode::kMaster || _leaderMode == LeaderMode::kLeaderElect); diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h index 0373dab95de..5f7f9fbe2c1 100644 --- a/src/mongo/db/repl/topology_coordinator.h +++ b/src/mongo/db/repl/topology_coordinator.h @@ -630,11 +630,11 @@ public: * when we receive a stepdown command (which can fail if not enough secondaries are caught up) * to ensure that we never process more than one stepdown request at a time. * Returns OK if it is safe to continue with the stepdown attempt, or returns: - * - NotMaster if this node is not a leader. + * - NotWritablePrimary if this node is not a leader. * - ConflictingOperationInProgess if this node is already processing a stepdown request of any * kind. * On an OK return status also returns a function object that can be called to abort the - * pending stepdown attempt and return this node to normal primary/master state. + * pending stepdown attempt and return this node to normal (writable) primary state. */ StatusWith prepareForStepDownAttempt(); diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp index c3b36b81bda..d0625259077 100644 --- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp +++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp @@ -2152,7 +2152,7 @@ TEST_F(TopoCoordTest, PrepareStepDownAttemptFailsIfNotLeader) { << "protocolVersion" << 1), 0); getTopoCoord().changeMemberState_forTest(MemberState::RS_SECONDARY); - Status expectedStatus(ErrorCodes::NotMaster, "This node is not a primary. "); + Status expectedStatus(ErrorCodes::NotWritablePrimary, "This node is not a primary. "); ASSERT_EQUALS(expectedStatus, getTopoCoord().prepareForStepDownAttempt().getStatus()); } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp index 1bb71524d3a..f03ee603838 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp @@ -367,7 +367,7 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( << connectionString.toString() << " as a shard"); } if (!isMaster) { - return {ErrorCodes::NotMaster, + return {ErrorCodes::NotWritablePrimary, str::stream() << connectionString.toString() << " does not have a master. If this is a replica set, ensure that it has a" diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp index 0b7b5cbc1d2..40d2853424e 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp @@ -781,7 +781,7 @@ StatusWith MigrationChunkClonerSourceLegacy::_callRecipient(const BSONO responseStatus = args.response; }); - // TODO: Update RemoteCommandTargeter on NotMaster errors. + // TODO: Update RemoteCommandTargeter on NotWritablePrimary errors. if (!scheduleStatus.isOK()) { return scheduleStatus.getStatus(); } diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index 3d8ba900e92..e6e4de4ac75 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -718,7 +718,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions( // Checks that the collection's UUID matches the donor's. auto checkUUIDsMatch = [&](const Collection* collection) { - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, str::stream() << "Unable to create collection " << nss.ns() << " because the node is not primary", repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp index 8948c5c5725..f8a321aea1a 100644 --- a/src/mongo/db/s/set_shard_version_command.cpp +++ b/src/mongo/db/s/set_shard_version_command.cpp @@ -143,7 +143,7 @@ public: autoDb.emplace(opCtx, nss.db(), MODE_IS); // Slave nodes cannot support set shard version - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, str::stream() << "setShardVersion with collection version is only supported " "against primary nodes, but it was received for namespace " << nss.ns(), diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp index a4f9a02e009..922e9121690 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp @@ -501,7 +501,7 @@ void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opC boost::optional taskNumToWait; while (true) { - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, str::stream() << "Unable to wait for collection metadata flush for " << nss.ns() << " because the node's replication role changed.", _role == ReplicaSetRole::Primary && _term == initialTerm); @@ -552,7 +552,7 @@ void ShardServerCatalogCacheLoader::waitForDatabaseFlush(OperationContext* opCtx boost::optional taskNumToWait; while (true) { - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, str::stream() << "Unable to wait for database metadata flush for " << dbName.toString() << " because the node's replication role changed.", diff --git a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp index e46ab94599f..4278c3367ff 100644 --- a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp +++ b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp @@ -250,7 +250,7 @@ TEST(TransactionCoordinatorFuturesUtilTest, promises[0].setError(errorStatus1); ASSERT(!resultFuture.isReady()); - Status errorStatus2{ErrorCodes::NotMaster, "dummy error"}; + Status errorStatus2{ErrorCodes::NotWritablePrimary, "dummy error"}; promises[1].setError(errorStatus2); ASSERT(!resultFuture.isReady()); @@ -642,11 +642,11 @@ TEST_F(AsyncWorkSchedulerTest, DestroyingSchedulerCapturedInFutureCallback) { future.get(); } -TEST_F(AsyncWorkSchedulerTest, NotifiesRemoteCommandTargeter_CmdResponseNotMasterError) { +TEST_F(AsyncWorkSchedulerTest, NotifiesRemoteCommandTargeter_CmdResponseNotWritablePrimaryError) { ASSERT_EQ(0UL, getShardTargeterMock(kShardIds[1])->getAndClearMarkedDownHosts().size()); scheduleAWSRemoteCommandWithResponse(kShardIds[1], - BSON("ok" << 0 << "code" << ErrorCodes::NotMaster + BSON("ok" << 0 << "code" << ErrorCodes::NotWritablePrimary << "errmsg" << "dummy")); diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp index a6392223182..98f0944d510 100644 --- a/src/mongo/db/s/transaction_coordinator_service.cpp +++ b/src/mongo/db/s/transaction_coordinator_service.cpp @@ -93,8 +93,8 @@ void TransactionCoordinatorService::reportCoordinators(OperationContext* opCtx, std::shared_ptr cas; try { cas = _getCatalogAndScheduler(opCtx); - } catch (ExceptionFor&) { - // If we are not master, don't include any output for transaction coordinators in + } catch (ExceptionFor&) { + // If we are not primary, don't include any output for transaction coordinators in // the curOp command. return; } @@ -270,8 +270,9 @@ void TransactionCoordinatorService::onShardingInitialization(OperationContext* o std::shared_ptr TransactionCoordinatorService::_getCatalogAndScheduler(OperationContext* opCtx) { stdx::unique_lock ul(_mutex); - uassert( - ErrorCodes::NotMaster, "Transaction coordinator is not a primary", _catalogAndScheduler); + uassert(ErrorCodes::NotWritablePrimary, + "Transaction coordinator is not a primary", + _catalogAndScheduler); return _catalogAndScheduler; } diff --git a/src/mongo/db/s/transaction_coordinator_service.h b/src/mongo/db/s/transaction_coordinator_service.h index a4fe1ce16f9..f50a511a8a3 100644 --- a/src/mongo/db/s/transaction_coordinator_service.h +++ b/src/mongo/db/s/transaction_coordinator_service.h @@ -135,8 +135,8 @@ private: }; /** - * Returns the current catalog + scheduler if stepUp has started, otherwise throws a NotMaster - * exception. + * Returns the current catalog + scheduler if stepUp has started, otherwise throws a + * NotWritablePrimary exception. */ std::shared_ptr _getCatalogAndScheduler(OperationContext* opCtx); diff --git a/src/mongo/db/s/transaction_coordinator_service_test.cpp b/src/mongo/db/s/transaction_coordinator_service_test.cpp index 215d101ba0a..e45eaa90ba7 100644 --- a/src/mongo/db/s/transaction_coordinator_service_test.cpp +++ b/src/mongo/db/s/transaction_coordinator_service_test.cpp @@ -198,17 +198,17 @@ TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, OperationsFailBeforeStep ASSERT_THROWS_CODE(service()->createCoordinator( operationContext(), makeLogicalSessionIdForTest(), 0, kCommitDeadline), AssertionException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); ASSERT_THROWS_CODE(service()->coordinateCommit( operationContext(), makeLogicalSessionIdForTest(), 0, kTwoShardIdSet), AssertionException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); ASSERT_THROWS_CODE( service()->recoverCommit(operationContext(), makeLogicalSessionIdForTest(), 0), AssertionException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); } TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, OperationsBlockBeforeStepUpCompletes) { diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp index 7493feef5b0..61d8bd3b985 100644 --- a/src/mongo/db/service_entry_point_common.cpp +++ b/src/mongo/db/service_entry_point_common.cpp @@ -474,7 +474,7 @@ void appendErrorLabelsAndTopologyVersion(OperationContext* opCtx, (wcCode && ErrorCodes::isA(*wcCode)); const auto replCoord = repl::ReplicationCoordinator::get(opCtx); - // NotMaster errors always include a topologyVersion, since we increment topologyVersion on + // NotPrimary errors always include a topologyVersion, since we increment topologyVersion on // stepdown. ShutdownErrors only include a topologyVersion if the server is in quiesce mode, // since we only increment the topologyVersion at shutdown and alert waiting isMaster commands // if the server enters quiesce mode. @@ -1059,9 +1059,9 @@ void execCommandDatabase(OperationContext* opCtx, } if (MONGO_unlikely(respondWithNotPrimaryInCommandDispatch.shouldFail())) { - uassert(ErrorCodes::NotMaster, "not primary", canRunHere); + uassert(ErrorCodes::NotWritablePrimary, "not primary", canRunHere); } else { - uassert(ErrorCodes::NotMaster, "not master", canRunHere); + uassert(ErrorCodes::NotWritablePrimary, "not master", canRunHere); } if (!command->maintenanceOk() && @@ -1428,7 +1428,7 @@ DbResponse receivedCommands(OperationContext* opCtx, if (LastError::get(opCtx->getClient()).hadNotMasterError()) { if (c && c->getReadWriteType() == Command::ReadWriteType::kWrite) notMasterUnackWrites.increment(); - uasserted(ErrorCodes::NotMaster, + uasserted(ErrorCodes::NotWritablePrimary, str::stream() << "Not-master error while processing '" << request.getCommandName() << "' operation on '" << request.getDatabase() << "' database via " @@ -1779,12 +1779,13 @@ Future ServiceEntryPointCommon::handleRequest(OperationContext* opCt "error"_attr = redact(ue)); debug.errInfo = ue.toStatus(); } - // A NotMaster error can be set either within receivedInsert/receivedUpdate/receivedDelete - // or within the AssertionException handler above. Either way, we want to throw an - // exception here, which will cause the client to be disconnected. + // A NotWritablePrimary error can be set either within + // receivedInsert/receivedUpdate/receivedDelete or within the AssertionException handler + // above. Either way, we want to throw an exception here, which will cause the client to be + // disconnected. if (LastError::get(opCtx->getClient()).hadNotMasterError()) { notMasterLegacyUnackWrites.increment(); - uasserted(ErrorCodes::NotMaster, + uasserted(ErrorCodes::NotWritablePrimary, str::stream() << "Not-master error while processing '" << networkOpToString(op) << "' operation on '" << nsString << "' namespace via legacy " diff --git a/src/mongo/db/sessions_collection_rs.cpp b/src/mongo/db/sessions_collection_rs.cpp index 664f34c0c8b..fc7f8977717 100644 --- a/src/mongo/db/sessions_collection_rs.cpp +++ b/src/mongo/db/sessions_collection_rs.cpp @@ -102,8 +102,8 @@ auto SessionsCollectionRS::_dispatch(const NamespaceString& ns, // There is a window here where we may transition from Primary to Secondary after we release // the locks we take in _isStandaloneOrPrimary(). In this case, the callback we run below - // may throw a NotMaster error, or a stale read. However, this is preferable to running the - // callback while we hold locks, since that can lead to a deadlock. + // may throw a NotWritablePrimary error, or a stale read. However, this is preferable to running + // the callback while we hold locks, since that can lead to a deadlock. auto conn = _makePrimaryConnection(opCtx); DBClientBase* client = conn->get(); diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp index 9234ad7769b..8bd0cfbd422 100644 --- a/src/mongo/db/system_index.cpp +++ b/src/mongo/db/system_index.cpp @@ -94,7 +94,7 @@ void generateSystemIndexForExistingCollection(OperationContext* opCtx, const IndexSpec& spec) { // Do not try to generate any system indexes on a secondary. auto replCoord = repl::ReplicationCoordinator::get(opCtx); - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, "Not primary while creating authorization index", replCoord->getReplicationMode() != repl::ReplicationCoordinator::modeReplSet || replCoord->canAcceptWritesForDatabase(opCtx, ns.db())); diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp index d3772edb7db..742bfd087b4 100644 --- a/src/mongo/db/transaction_participant.cpp +++ b/src/mongo/db/transaction_participant.cpp @@ -329,7 +329,7 @@ void TransactionParticipant::performNoopWrite(OperationContext* opCtx, StringDat { AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, "Not primary when performing noop write for {}"_format(msg), replCoord->canAcceptWritesForDatabase(opCtx, "admin")); @@ -495,7 +495,7 @@ void TransactionParticipant::Participant::beginOrContinue(OperationContext* opCt repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX); if (opCtx->writesAreReplicated()) { auto replCoord = repl::ReplicationCoordinator::get(opCtx); - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, "Not primary so we cannot begin or continue a transaction", replCoord->canAcceptWritesForDatabase(opCtx, "admin")); // Disallow multi-statement transactions on shard servers that have @@ -1355,7 +1355,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction( const auto replCoord = repl::ReplicationCoordinator::get(opCtx); if (opCtx->writesAreReplicated()) { - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, "Not primary so we cannot commit a prepared transaction", replCoord->canAcceptWritesForDatabase(opCtx, "admin")); } @@ -1543,7 +1543,7 @@ void TransactionParticipant::Participant::_abortActivePreparedTransaction(Operat if (opCtx->writesAreReplicated()) { auto replCoord = repl::ReplicationCoordinator::get(opCtx); - uassert(ErrorCodes::NotMaster, + uassert(ErrorCodes::NotWritablePrimary, "Not primary so we cannot abort a prepared transaction", replCoord->canAcceptWritesForDatabase(opCtx, "admin")); } diff --git a/src/mongo/db/transaction_participant.h b/src/mongo/db/transaction_participant.h index 1c3164a51fc..f898b21c112 100644 --- a/src/mongo/db/transaction_participant.h +++ b/src/mongo/db/transaction_participant.h @@ -405,7 +405,7 @@ public: * currently active one or the last one which committed * - PreparedTransactionInProgress - if the transaction is in the prepared state and a new * transaction or retryable write is attempted - * - NotMaster - if the node is not a primary when this method is called. + * - NotWritablePrimary - if the node is not a primary when this method is called. * - IncompleteTransactionHistory - if an attempt is made to begin a retryable write for a * TransactionParticipant that is not in retryable write mode. This is expected behavior if * a retryable write has been upgraded to a transaction by the server, which can happen e.g. diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp index 665ade204de..c2c18a05c8f 100644 --- a/src/mongo/db/transaction_participant_test.cpp +++ b/src/mongo/db/transaction_participant_test.cpp @@ -942,8 +942,9 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedAbortFails) { ASSERT_OK(repl::ReplicationCoordinator::get(opCtx())->setFollowerMode( repl::MemberState::RS_SECONDARY)); - ASSERT_THROWS_CODE( - txnParticipant.abortTransaction(opCtx()), AssertionException, ErrorCodes::NotMaster); + ASSERT_THROWS_CODE(txnParticipant.abortTransaction(opCtx()), + AssertionException, + ErrorCodes::NotWritablePrimary); } TEST_F(TxnParticipantTest, StepDownDuringPreparedCommitFails) { @@ -958,7 +959,7 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedCommitFails) { repl::MemberState::RS_SECONDARY)); ASSERT_THROWS_CODE(txnParticipant.commitPreparedTransaction(opCtx(), commitTS, {}), AssertionException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); } TEST_F(TxnParticipantTest, StepDownDuringPreparedAbortReleasesRSTL) { @@ -998,8 +999,9 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedAbortReleasesRSTL) { MODE_NONE); ASSERT_OK(repl::ReplicationCoordinator::get(opCtx())->setFollowerMode( repl::MemberState::RS_SECONDARY)); - ASSERT_THROWS_CODE( - txnParticipant.abortTransaction(opCtx()), AssertionException, ErrorCodes::NotMaster); + ASSERT_THROWS_CODE(txnParticipant.abortTransaction(opCtx()), + AssertionException, + ErrorCodes::NotWritablePrimary); ASSERT_EQ(opCtx()->lockState()->getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); @@ -1053,7 +1055,7 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedCommitReleasesRSTL) { ASSERT_THROWS_CODE( txnParticipant.commitPreparedTransaction(opCtx(), prepareTimestamp, boost::none), AssertionException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); ASSERT_EQ(opCtx()->lockState()->getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); @@ -1111,7 +1113,7 @@ TEST_F(TxnParticipantTest, CannotStartNewTransactionIfNotPrimary) { ASSERT_THROWS_CODE( txnParticipant.beginOrContinue(opCtx(), *opCtx()->getTxnNumber(), false, true), AssertionException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); } TEST_F(TxnParticipantTest, CannotStartRetryableWriteIfNotPrimary) { @@ -1125,7 +1127,7 @@ TEST_F(TxnParticipantTest, CannotStartRetryableWriteIfNotPrimary) { ASSERT_THROWS_CODE( txnParticipant.beginOrContinue(opCtx(), *opCtx()->getTxnNumber(), boost::none, true), AssertionException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); } TEST_F(TxnParticipantTest, CannotContinueTransactionIfNotPrimary) { @@ -1142,7 +1144,7 @@ TEST_F(TxnParticipantTest, CannotContinueTransactionIfNotPrimary) { ASSERT_THROWS_CODE( txnParticipant.beginOrContinue(opCtx(), *opCtx()->getTxnNumber(), false, false), AssertionException, - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); } TEST_F(TxnParticipantTest, OlderTransactionFailsOnSessionWithNewerTransaction) { diff --git a/src/mongo/db/write_concern.h b/src/mongo/db/write_concern.h index 680e2e4756d..dbaebabffd2 100644 --- a/src/mongo/db/write_concern.h +++ b/src/mongo/db/write_concern.h @@ -100,7 +100,7 @@ struct WriteConcernResult { * if this opTime.isNull() no replication-related write concern options will be enforced. * * Returns result of the write concern if successful. - * Returns NotMaster if the host steps down while waiting for replication + * Returns NotWritablePrimary if the host steps down while waiting for replication * Returns UnknownReplWriteConcern if the wMode specified was not enforceable */ Status waitForWriteConcern(OperationContext* opCtx, diff --git a/src/mongo/rpc/op_msg_integration_test.cpp b/src/mongo/rpc/op_msg_integration_test.cpp index d1b7484548b..a2a380c4f9a 100644 --- a/src/mongo/rpc/op_msg_integration_test.cpp +++ b/src/mongo/rpc/op_msg_integration_test.cpp @@ -192,7 +192,7 @@ TEST(OpMsg, DocumentSequenceMaxWriteBatchWorks) { conn->dropCollection("test.collection"); } -TEST(OpMsg, CloseConnectionOnFireAndForgetNotMasterError) { +TEST(OpMsg, CloseConnectionOnFireAndForgetNotWritablePrimaryError) { const auto connStr = unittest::getFixtureConnectionString(); // This test only works against a replica set. @@ -219,14 +219,14 @@ TEST(OpMsg, CloseConnectionOnFireAndForgetNotMasterError) { })")) .serialize(); - // Round-trip command fails with NotMaster error. Note that this failure is in command - // dispatch which ignores w:0. + // Round-trip command fails with NotWritablePrimary error. Note that this failure is in + // command dispatch which ignores w:0. Message reply; ASSERT(conn.call(request, reply, /*assertOK*/ true, nullptr)); ASSERT_EQ( getStatusFromCommandResult( conn.parseCommandReplyMessage(conn.getServerAddress(), reply)->getCommandReply()), - ErrorCodes::NotMaster); + ErrorCodes::NotWritablePrimary); // Fire-and-forget closes connection when it sees that error. Note that this is using call() // rather than say() so that we get an error back when the connection is closed. Normally @@ -893,7 +893,8 @@ TEST(OpMsg, ExhaustIsMasterMetricDecrementsOnNewOpAfterTerminatingExhaustStream) << "failCommand" << "mode" << BSON("times" << 1) << "data" << BSON("threadName" << threadName << "errorCode" - << ErrorCodes::NotMaster << "failCommands" + << ErrorCodes::NotWritablePrimary + << "failCommands" << BSON_ARRAY("isMaster"))); auto response = conn2->runCommand(OpMsgRequest::fromDBAndBody("admin", failPointObj)); ASSERT_OK(getStatusFromCommandResult(response->getCommandReply())); @@ -983,7 +984,8 @@ TEST(OpMsg, ExhaustIsMasterMetricOnNewExhaustIsMasterAfterTerminatingExhaustStre << "failCommand" << "mode" << BSON("times" << 1) << "data" << BSON("threadName" << threadName << "errorCode" - << ErrorCodes::NotMaster << "failCommands" + << ErrorCodes::NotWritablePrimary + << "failCommands" << BSON_ARRAY("isMaster"))); auto response = conn2->runCommand(OpMsgRequest::fromDBAndBody("admin", failPointObj)); ASSERT_OK(getStatusFromCommandResult(response->getCommandReply())); diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp index 7cca30f0b99..04f5c51039d 100644 --- a/src/mongo/s/async_requests_sender.cpp +++ b/src/mongo/s/async_requests_sender.cpp @@ -54,7 +54,7 @@ namespace mongo { namespace { -// Maximum number of retries for network and replication notMaster errors (per host). +// Maximum number of retries for network and replication NotPrimary errors (per host). const int kMaxNumFailedHostRetryAttempts = 3; MONGO_FAIL_POINT_DEFINE(hangBeforeSchedulingRemoteCommand); diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp index 8a2622f4790..56b925be417 100644 --- a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp +++ b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp @@ -147,7 +147,7 @@ StatusWith extractElectionId(const BSONObj& responseObj) { << hostContacted}; } - return {ErrorCodes::NotMaster, "only primary can have electionId"}; + return {ErrorCodes::NotWritablePrimary, "only primary can have electionId"}; } return {ErrorCodes::UnsupportedFormat, electionIdStatus.reason()}; diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp index 56f25d713c8..619217a5259 100644 --- a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp +++ b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp @@ -468,7 +468,7 @@ TEST_F(DistLockCatalogTest, GrabLockWriteConcernError) { auto status = distLockCatalog() ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "") .getStatus(); - ASSERT_EQUALS(ErrorCodes::NotMaster, status.code()); + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status.code()); ASSERT_FALSE(status.reason().empty()); }); @@ -1271,7 +1271,7 @@ TEST_F(DistLockCatalogTest, GetServerNoGLEStats) { TEST_F(DistLockCatalogTest, GetServerNoElectionId) { auto future = launchOnSeparateThread([this](OperationContext* opCtx) { auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus(); - ASSERT_EQUALS(ErrorCodes::NotMaster, status.code()); + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status.code()); ASSERT_FALSE(status.reason().empty()); }); diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp index 7c0131f987a..d0017bcc630 100644 --- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp +++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp @@ -149,7 +149,7 @@ void ReplSetDistLockManager::doTask() { auto opCtx = cc().makeOperationContext(); auto pingStatus = _catalog->ping(opCtx.get(), _processID, Date_t::now()); - if (!pingStatus.isOK() && pingStatus != ErrorCodes::NotMaster) { + if (!pingStatus.isOK() && pingStatus != ErrorCodes::NotWritablePrimary) { LOGV2_WARNING(22668, "Pinging failed for distributed lock pinger caused by {error}", "Pinging failed for distributed lock pinger", @@ -191,7 +191,7 @@ void ReplSetDistLockManager::doTask() { "lockSessionId"_attr = toUnlock.first, "error"_attr = unlockStatus); // Queue another attempt, unless the problem was no longer being primary. - if (unlockStatus != ErrorCodes::NotMaster) { + if (unlockStatus != ErrorCodes::NotWritablePrimary) { queueUnlock(toUnlock.first, toUnlock.second); } } else { @@ -238,7 +238,7 @@ StatusWith ReplSetDistLockManager::isLockExpired(OperationContext* opCtx, Timer timer(_serviceContext->getTickSource()); auto serverInfoStatus = _catalog->getServerInfo(opCtx); if (!serverInfoStatus.isOK()) { - if (serverInfoStatus.getStatus() == ErrorCodes::NotMaster) { + if (serverInfoStatus.getStatus() == ErrorCodes::NotWritablePrimary) { return false; } diff --git a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp index c71bdd93dd0..64020eeb9f4 100644 --- a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp +++ b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp @@ -1236,7 +1236,7 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) { /** * 1. Try to grab lock multiple times. - * 2. For each attempt, attempting to check the ping document results in NotMaster error. + * 2. For each attempt, attempting to check the ping document results in NotWritablePrimary error. * 3. All of the previous attempt should result in lock busy. * 4. Try to grab lock again when the ping was not updated and lock expiration has elapsed. */ @@ -1285,7 +1285,7 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfNoMaster) { } else { getMockCatalog()->expectGetServerInfo( [&getServerInfoCallCount]() { getServerInfoCallCount++; }, - {ErrorCodes::NotMaster, "not master"}); + {ErrorCodes::NotWritablePrimary, "not master"}); } auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus(); diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h index a0f0901952c..3fce1e4a9cf 100644 --- a/src/mongo/s/catalog/sharding_catalog_client.h +++ b/src/mongo/s/catalog/sharding_catalog_client.h @@ -338,7 +338,7 @@ public: * Updates a single document in the specified namespace on the config server. Must only be used * for updates to the 'config' database. * - * This method retries the operation on NotMaster or network errors, so it should only be used + * This method retries the operation on NotPrimary or network errors, so it should only be used * with modifications which are idempotent. * * Returns non-OK status if the command failed to run for some reason. If the command was diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h index 7d6f6c4f119..1ae6849018d 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.h +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h @@ -174,7 +174,7 @@ private: * useMultiUpdate is true) in the specified namespace on the config server. Must only be used * for updates to the 'config' database. * - * This method retries the operation on NotMaster or network errors, so it should only be used + * This method retries the operation on NotPrimary or network errors, so it should only be used * with modifications which are idempotent. * * Returns non-OK status if the command failed to run for some reason. If the command was diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp index 65bc0c746ce..e68477ee32a 100644 --- a/src/mongo/s/catalog/sharding_catalog_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_test.cpp @@ -252,14 +252,15 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseStaleSecondaryRetryNoPrimary) { auto future = launchAsync([this] { auto dbResult = catalogClient()->getDatabase( operationContext(), "NonExistent", repl::ReadConcernLevel::kMajorityReadConcern); - ASSERT_EQ(dbResult.getStatus(), ErrorCodes::NotMaster); + ASSERT_EQ(dbResult.getStatus(), ErrorCodes::NotWritablePrimary); }); // Return empty result set as if the database wasn't found onFindCommand([this, &testHost](const RemoteCommandRequest& request) { ASSERT_EQUALS(testHost, request.target); - // Make it so when it attempts to retarget and retry it will get a NotMaster error. - configTargeter()->setFindHostReturnValue(Status(ErrorCodes::NotMaster, "no config master")); + // Make it so when it attempts to retarget and retry it will get a NotWritablePrimary error. + configTargeter()->setFindHostReturnValue( + Status(ErrorCodes::NotWritablePrimary, "no config master")); return vector{}; }); @@ -687,7 +688,7 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce future.default_timed_get(); } -TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMaster) { +TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotWritablePrimary) { configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); auto future = launchAsync([this] { @@ -699,14 +700,14 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMaster) { << "test"), &responseBuilder); ASSERT_NOT_OK(status); - ASSERT_EQUALS(ErrorCodes::NotMaster, status); + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status); }); for (int i = 0; i < 3; ++i) { onCommand([](const RemoteCommandRequest& request) { BSONObjBuilder responseBuilder; - CommandHelpers::appendCommandStatusNoThrow(responseBuilder, - Status(ErrorCodes::NotMaster, "not master")); + CommandHelpers::appendCommandStatusNoThrow( + responseBuilder, Status(ErrorCodes::NotWritablePrimary, "not master")); return responseBuilder.obj(); }); } @@ -715,7 +716,7 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMaster) { future.default_timed_get(); } -TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMasterRetrySuccess) { +TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotWritablePrimaryRetrySuccess) { HostAndPort host1("TestHost1"); HostAndPort host2("TestHost2"); @@ -736,11 +737,11 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMasterRetrySuc ASSERT_EQUALS(host1, request.target); BSONObjBuilder responseBuilder; - CommandHelpers::appendCommandStatusNoThrow(responseBuilder, - Status(ErrorCodes::NotMaster, "not master")); + CommandHelpers::appendCommandStatusNoThrow( + responseBuilder, Status(ErrorCodes::NotWritablePrimary, "not master")); // Ensure that when the catalog manager tries to retarget after getting the - // NotMaster response, it will get back a new target. + // NotWritablePrimary response, it will get back a new target. configTargeter()->setFindHostReturnValue(host2); return responseBuilder.obj(); }); diff --git a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp index 5fe063a6ce4..3b71e0eb3ad 100644 --- a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp @@ -350,7 +350,7 @@ TEST_F(UpdateRetryTest, Success) { future.default_timed_get(); } -TEST_F(UpdateRetryTest, NotMasterErrorReturnedPersistently) { +TEST_F(UpdateRetryTest, NotWritablePrimaryErrorReturnedPersistently) { configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); BSONObj objToUpdate = BSON("_id" << 1 << "Value" @@ -366,13 +366,14 @@ TEST_F(UpdateRetryTest, NotMasterErrorReturnedPersistently) { updateExpr, false, ShardingCatalogClient::kMajorityWriteConcern); - ASSERT_EQUALS(ErrorCodes::NotMaster, status); + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status); }); for (int i = 0; i < 3; ++i) { onCommand([](const RemoteCommandRequest& request) { BSONObjBuilder bb; - CommandHelpers::appendCommandStatusNoThrow(bb, {ErrorCodes::NotMaster, "not master"}); + CommandHelpers::appendCommandStatusNoThrow( + bb, {ErrorCodes::NotWritablePrimary, "not master"}); return bb.obj(); }); } @@ -380,8 +381,8 @@ TEST_F(UpdateRetryTest, NotMasterErrorReturnedPersistently) { future.default_timed_get(); } -TEST_F(UpdateRetryTest, NotMasterReturnedFromTargeter) { - configTargeter()->setFindHostReturnValue(Status(ErrorCodes::NotMaster, "not master")); +TEST_F(UpdateRetryTest, NotWritablePrimaryReturnedFromTargeter) { + configTargeter()->setFindHostReturnValue(Status(ErrorCodes::NotWritablePrimary, "not master")); BSONObj objToUpdate = BSON("_id" << 1 << "Value" << "TestValue"); @@ -396,13 +397,13 @@ TEST_F(UpdateRetryTest, NotMasterReturnedFromTargeter) { updateExpr, false, ShardingCatalogClient::kMajorityWriteConcern); - ASSERT_EQUALS(ErrorCodes::NotMaster, status); + ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status); }); future.default_timed_get(); } -TEST_F(UpdateRetryTest, NotMasterOnceSuccessAfterRetry) { +TEST_F(UpdateRetryTest, NotWritablePrimaryOnceSuccessAfterRetry) { HostAndPort host1("TestHost1"); HostAndPort host2("TestHost2"); configTargeter()->setFindHostReturnValue(host1); @@ -433,11 +434,12 @@ TEST_F(UpdateRetryTest, NotMasterOnceSuccessAfterRetry) { ASSERT_EQUALS(host1, request.target); // Ensure that when the catalog manager tries to retarget after getting the - // NotMaster response, it will get back a new target. + // NotWritablePrimary response, it will get back a new target. configTargeter()->setFindHostReturnValue(host2); BSONObjBuilder bb; - CommandHelpers::appendCommandStatusNoThrow(bb, {ErrorCodes::NotMaster, "not master"}); + CommandHelpers::appendCommandStatusNoThrow(bb, + {ErrorCodes::NotWritablePrimary, "not master"}); return bb.obj(); }); diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h index 4f68d1a8c4c..486788c57dd 100644 --- a/src/mongo/s/client/shard_registry.h +++ b/src/mongo/s/client/shard_registry.h @@ -164,7 +164,7 @@ private: /** * Maintains the set of all shards known to the instance and their connections and exposes * functionality to run commands against shards. All commands which this registry executes are - * retried on NotMaster class of errors and in addition all read commands are retried on network + * retried on NotPrimary class of errors and in addition all read commands are retried on network * errors automatically as well. */ class ShardRegistry { diff --git a/src/mongo/s/commands/batch_downconvert.cpp b/src/mongo/s/commands/batch_downconvert.cpp index 8aa318f484b..4f4328fe4c6 100644 --- a/src/mongo/s/commands/batch_downconvert.cpp +++ b/src/mongo/s/commands/batch_downconvert.cpp @@ -80,7 +80,8 @@ Status extractGLEErrors(const BSONObj& gleResponse, GLEErrors* errors) { || code == 16805 /* replicatedToNum no longer primary */ || code == 14830 /* gle wmode changed / invalid */ // 2.6 Error codes - || code == ErrorCodes::NotMaster || code == ErrorCodes::UnknownReplWriteConcern || + || code == ErrorCodes::NotWritablePrimary || + code == ErrorCodes::UnknownReplWriteConcern || code == ErrorCodes::WriteConcernFailed || code == ErrorCodes::PrimarySteppedDown) { // Write concern errors that get returned as regular errors (result may not be ok: 1.0) errors->wcError.reset(new WriteConcernErrorDetail()); diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp index 635730c82ab..2ad05010afb 100644 --- a/src/mongo/s/query/async_results_merger.cpp +++ b/src/mongo/s/query/async_results_merger.cpp @@ -51,7 +51,7 @@ const BSONObj AsyncResultsMerger::kWholeSortKeySortPattern = BSON(kSortKeyField namespace { -// Maximum number of retries for network and replication notMaster errors (per host). +// Maximum number of retries for network and replication NotPrimary errors (per host). const int kMaxNumFailedHostRetryAttempts = 3; /** diff --git a/src/mongo/s/sessions_collection_sharded_test.cpp b/src/mongo/s/sessions_collection_sharded_test.cpp index 0bbfc0ea2c0..89081e468c6 100644 --- a/src/mongo/s/sessions_collection_sharded_test.cpp +++ b/src/mongo/s/sessions_collection_sharded_test.cpp @@ -138,13 +138,13 @@ TEST_F(SessionsCollectionShardedTest, RefreshOneSessionWriteErrTest) { response.addToErrDetails([&] { WriteErrorDetail* errDetail = new WriteErrorDetail(); errDetail->setIndex(0); - errDetail->setStatus({ErrorCodes::NotMaster, "not master"}); + errDetail->setStatus({ErrorCodes::NotWritablePrimary, "not master"}); return errDetail; }()); return response.toBSON(); }); - ASSERT_THROWS_CODE(future.default_timed_get(), DBException, ErrorCodes::NotMaster); + ASSERT_THROWS_CODE(future.default_timed_get(), DBException, ErrorCodes::NotWritablePrimary); } TEST_F(SessionsCollectionShardedTest, RemoveOneSessionOKTest) { @@ -193,13 +193,13 @@ TEST_F(SessionsCollectionShardedTest, RemoveOneSessionWriteErrTest) { response.addToErrDetails([&] { WriteErrorDetail* errDetail = new WriteErrorDetail(); errDetail->setIndex(0); - errDetail->setStatus({ErrorCodes::NotMaster, "not master"}); + errDetail->setStatus({ErrorCodes::NotWritablePrimary, "not master"}); return errDetail; }()); return response.toBSON(); }); - ASSERT_THROWS_CODE(future.default_timed_get(), DBException, ErrorCodes::NotMaster); + ASSERT_THROWS_CODE(future.default_timed_get(), DBException, ErrorCodes::NotWritablePrimary); } } // namespace diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp index 5f74ae91cd0..bdadf82b80d 100644 --- a/src/mongo/s/write_ops/batch_write_exec_test.cpp +++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp @@ -1452,7 +1452,7 @@ TEST_F(BatchWriteExecTest, RetryableErrorNoTxnNumber) { request.setWriteConcern(BSONObj()); BatchedCommandResponse retryableErrResponse; - retryableErrResponse.setStatus({ErrorCodes::NotMaster, "mock retryable error"}); + retryableErrResponse.setStatus({ErrorCodes::NotWritablePrimary, "mock retryable error"}); auto future = launchAsync([&] { BatchedCommandResponse response; @@ -1494,7 +1494,7 @@ TEST_F(BatchWriteExecTest, RetryableErrorTxnNumber) { operationContext()->setTxnNumber(5); BatchedCommandResponse retryableErrResponse; - retryableErrResponse.setStatus({ErrorCodes::NotMaster, "mock retryable error"}); + retryableErrResponse.setStatus({ErrorCodes::NotWritablePrimary, "mock retryable error"}); auto future = launchAsync([&] { BatchedCommandResponse response; diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js index dffcd5dd033..61dc94082e2 100644 --- a/src/mongo/shell/utils.js +++ b/src/mongo/shell/utils.js @@ -95,7 +95,7 @@ function isRetryableError(error) { "HostNotFound", "NetworkTimeout", "SocketException", - "NotMaster", + "NotWritablePrimary", "NotMasterNoSlaveOk", "NotMasterOrSecondary", "PrimarySteppedDown", diff --git a/src/mongo/util/assert_util_test.cpp b/src/mongo/util/assert_util_test.cpp index 4275d8a907e..022593a7baf 100644 --- a/src/mongo/util/assert_util_test.cpp +++ b/src/mongo/util/assert_util_test.cpp @@ -87,26 +87,29 @@ TEST(AssertUtils, UassertNamedCodeWithoutCategories) { ASSERT_NOT_CATCHES(ErrorCodes::BadValue, ExceptionForCat); } -// NotMaster - NotMasterError, RetriableError -MONGO_STATIC_ASSERT(std::is_same, +// NotWritablePrimary - NotMasterError, RetriableError +MONGO_STATIC_ASSERT(std::is_same, error_details::CategoryList>()); -MONGO_STATIC_ASSERT(std::is_base_of>()); +MONGO_STATIC_ASSERT( + std::is_base_of>()); MONGO_STATIC_ASSERT(!std::is_base_of, - ExceptionFor>()); + ExceptionFor>()); MONGO_STATIC_ASSERT(std::is_base_of, - ExceptionFor>()); + ExceptionFor>()); MONGO_STATIC_ASSERT(!std::is_base_of, - ExceptionFor>()); + ExceptionFor>()); TEST(AssertUtils, UassertNamedCodeWithOneCategory) { - ASSERT_CATCHES(ErrorCodes::NotMaster, DBException); - ASSERT_CATCHES(ErrorCodes::NotMaster, AssertionException); - ASSERT_CATCHES(ErrorCodes::NotMaster, ExceptionFor); - ASSERT_NOT_CATCHES(ErrorCodes::NotMaster, ExceptionFor); - ASSERT_NOT_CATCHES(ErrorCodes::NotMaster, ExceptionForCat); - ASSERT_CATCHES(ErrorCodes::NotMaster, ExceptionForCat); - ASSERT_NOT_CATCHES(ErrorCodes::NotMaster, ExceptionForCat); + ASSERT_CATCHES(ErrorCodes::NotWritablePrimary, DBException); + ASSERT_CATCHES(ErrorCodes::NotWritablePrimary, AssertionException); + ASSERT_CATCHES(ErrorCodes::NotWritablePrimary, ExceptionFor); + ASSERT_NOT_CATCHES(ErrorCodes::NotWritablePrimary, ExceptionFor); + ASSERT_NOT_CATCHES(ErrorCodes::NotWritablePrimary, + ExceptionForCat); + ASSERT_CATCHES(ErrorCodes::NotWritablePrimary, ExceptionForCat); + ASSERT_NOT_CATCHES(ErrorCodes::NotWritablePrimary, + ExceptionForCat); } // InterruptedDueToReplStateChange - NotMasterError, Interruption, RetriableError -- cgit v1.2.1