summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormathisbessamdb <mathis.bessa@mongodb.com>2023-05-11 22:37:41 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-05-12 00:29:01 +0000
commit91d9967910ed5d2257074dfe3362b0b317a26cff (patch)
tree20ead0d5cb211d277276aef8bddff123250a142b
parentf4b6a7cd809dba448f1c474f492556d0027e160d (diff)
downloadmongo-91d9967910ed5d2257074dfe3362b0b317a26cff.tar.gz
SERVER-68655 Change ReplicationCoordinator::canAcceptWritesForDatabase to take in DatabaseName obj
-rw-r--r--src/mongo/db/catalog/drop_database.cpp8
-rw-r--r--src/mongo/db/catalog/drop_database_test.cpp4
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp2
-rw-r--r--src/mongo/db/change_collection_expired_documents_remover.cpp2
-rw-r--r--src/mongo/db/change_stream_pre_images_collection_manager.cpp4
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/command_can_run_here.cpp4
-rw-r--r--src/mongo/db/command_can_run_here.h2
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp7
-rw-r--r--src/mongo/db/commands/oplog_note.cpp2
-rw-r--r--src/mongo/db/exec/write_stage_common.cpp2
-rw-r--r--src/mongo/db/query/get_executor.cpp3
-rw-r--r--src/mongo/db/read_concern_mongod.cpp2
-rw-r--r--src/mongo/db/repl/apply_ops.cpp4
-rw-r--r--src/mongo/db/repl/noop_writer.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator.h6
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h5
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp36
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.h5
-rw-r--r--src/mongo/db/repl/replication_coordinator_noop.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_noop.h5
-rw-r--r--src/mongo/db/repl/shard_merge_recipient_service.cpp6
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp3
-rw-r--r--src/mongo/db/repl/tenant_migration_access_blocker_util.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.cpp4
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp2
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp2
-rw-r--r--src/mongo/db/service_entry_point_common.cpp11
-rw-r--r--src/mongo/db/session/session_catalog_mongod.cpp2
-rw-r--r--src/mongo/db/session/sessions_collection_rs.cpp2
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.cpp2
-rw-r--r--src/mongo/db/storage/snapshot_helper.cpp3
-rw-r--r--src/mongo/db/system_index.cpp2
-rw-r--r--src/mongo/db/transaction/transaction_participant.cpp8
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.cpp4
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.h5
-rw-r--r--src/mongo/s/query_analysis_client.cpp4
40 files changed, 109 insertions, 90 deletions
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 15dee02d730..05994b0faa7 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -71,8 +71,8 @@ Status _checkNssAndReplState(OperationContext* opCtx, Database* db, const Databa
}
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
- !replCoord->canAcceptWritesForDatabase(opCtx, dbName.toStringWithTenantId());
+ bool userInitiatedWritesAndNotPrimary =
+ opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotWritablePrimary,
@@ -439,8 +439,8 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a
<< numCollectionsToDrop << " collection(s).");
}
- bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
- !replCoord->canAcceptWritesForDatabase(opCtx, dbName.toStringWithTenantId());
+ bool userInitiatedWritesAndNotPrimary =
+ opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::PrimarySteppedDown,
diff --git a/src/mongo/db/catalog/drop_database_test.cpp b/src/mongo/db/catalog/drop_database_test.cpp
index e81d03e002e..0cf234450bc 100644
--- a/src/mongo/db/catalog/drop_database_test.cpp
+++ b/src/mongo/db/catalog/drop_database_test.cpp
@@ -221,7 +221,7 @@ TEST_F(DropDatabaseTest, DropDatabaseReturnsNotWritablePrimaryIfNotPrimary) {
_createCollection(_opCtx.get(), _nss);
ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY));
ASSERT_TRUE(_opCtx->writesAreReplicated());
- ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.db()));
+ ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.dbName()));
ASSERT_EQUALS(ErrorCodes::NotWritablePrimary,
dropDatabaseForApplyOps(_opCtx.get(), _nss.dbName()));
}
@@ -408,7 +408,7 @@ TEST_F(DropDatabaseTest,
[this](OperationContext*, const repl::OpTime&) {
ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY));
ASSERT_TRUE(_opCtx->writesAreReplicated());
- ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.db()));
+ ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.dbName()));
return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0));
});
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index efb5e81614c..3d56994f277 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -568,7 +568,7 @@ TEST_F(RenameCollectionTest, RenameCollectionReturnsNotWritablePrimaryIfNotPrima
_createCollection(_opCtx.get(), _sourceNss);
ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY));
ASSERT_TRUE(_opCtx->writesAreReplicated());
- ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _sourceNss.db()));
+ ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _sourceNss.dbName()));
ASSERT_EQUALS(ErrorCodes::NotWritablePrimary,
renameCollection(_opCtx.get(), _sourceNss, _targetNss, {}));
}
diff --git a/src/mongo/db/change_collection_expired_documents_remover.cpp b/src/mongo/db/change_collection_expired_documents_remover.cpp
index 84b9f4817ea..8629e5699f9 100644
--- a/src/mongo/db/change_collection_expired_documents_remover.cpp
+++ b/src/mongo/db/change_collection_expired_documents_remover.cpp
@@ -118,7 +118,7 @@ void removeExpiredDocuments(Client* client) {
// maintenance flag (requires opCtx->lockState()->isRSTLLocked()).
if (!useUnreplicatedTruncates &&
!repl::ReplicationCoordinator::get(opCtx.get())
- ->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kConfig.toString())) {
+ ->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kConfig)) {
continue;
}
diff --git a/src/mongo/db/change_stream_pre_images_collection_manager.cpp b/src/mongo/db/change_stream_pre_images_collection_manager.cpp
index b3794877b63..503c44785b0 100644
--- a/src/mongo/db/change_stream_pre_images_collection_manager.cpp
+++ b/src/mongo/db/change_stream_pre_images_collection_manager.cpp
@@ -497,7 +497,7 @@ size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredPreImagesWithCollSc
// Early exit if the collection doesn't exist or running on a secondary.
if (!preImageColl.exists() ||
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
- opCtx, DatabaseName::kConfig.toString())) {
+ opCtx, DatabaseName::kConfig)) {
return 0;
}
@@ -548,7 +548,7 @@ size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredPreImagesWithCollSc
// Early exit if the collection doesn't exist or running on a secondary.
if (!preImageColl.exists() ||
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
- opCtx, DatabaseName::kConfig.toString())) {
+ opCtx, DatabaseName::kConfig)) {
return 0;
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 68c683385e2..4b89594e915 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -560,7 +560,7 @@ Status DefaultClonerImpl::copyDb(OperationContext* opCtx,
<< " (after getting list of collections to clone)",
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx,
- dBName));
+ dbName));
auto status = _createCollectionsForDb(opCtx, createCollectionParams, dBName);
if (!status.isOK()) {
diff --git a/src/mongo/db/command_can_run_here.cpp b/src/mongo/db/command_can_run_here.cpp
index 37089aefa68..1707e6035d7 100644
--- a/src/mongo/db/command_can_run_here.cpp
+++ b/src/mongo/db/command_can_run_here.cpp
@@ -38,11 +38,11 @@
namespace mongo {
bool commandCanRunHere(OperationContext* opCtx,
- const std::string& dbname,
+ const DatabaseName& dbName,
const Command* command,
bool inMultiDocumentTransaction) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- if (replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname))
+ if (replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbName))
return true; // primary: always ok
if (!opCtx->writesAreReplicated())
return true; // standalone: always ok
diff --git a/src/mongo/db/command_can_run_here.h b/src/mongo/db/command_can_run_here.h
index 204b246ef88..d3f02fdea60 100644
--- a/src/mongo/db/command_can_run_here.h
+++ b/src/mongo/db/command_can_run_here.h
@@ -37,7 +37,7 @@
namespace mongo {
bool commandCanRunHere(OperationContext* opCtx,
- const std::string& dbname,
+ const DatabaseName& dbName,
const Command* command,
bool inMultiDocumentTransaction);
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 7c815373005..039f1c850b6 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -111,14 +111,11 @@ public:
void run(OperationContext* opCtx, rpc::ReplyBuilderInterface* result) override {
// Explain is never allowed in multi-document transactions.
const bool inMultiDocumentTransaction = false;
- // TODO SERVER-68655 pass _dbName directly to commandCanRunHere
uassert(50746,
"Explain's child command cannot run on this node. "
"Are you explaining a write command on a secondary?",
- commandCanRunHere(opCtx,
- _dbName.toStringWithTenantId(),
- _innerInvocation->definition(),
- inMultiDocumentTransaction));
+ commandCanRunHere(
+ opCtx, _dbName, _innerInvocation->definition(), inMultiDocumentTransaction));
_innerInvocation->explain(opCtx, _verbosity, result);
}
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 39936586204..9ed7f7f3060 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -73,7 +73,7 @@ Status _performNoopWrite(OperationContext* opCtx, BSONObj msgObj, StringData not
}
// Its a proxy for being a primary passing "local" will cause it to return true on secondary
- if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
+ if (!replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)) {
return {ErrorCodes::NotWritablePrimary, "Not a primary"};
}
diff --git a/src/mongo/db/exec/write_stage_common.cpp b/src/mongo/db/exec/write_stage_common.cpp
index 11670a420d6..8a6c259e8e2 100644
--- a/src/mongo/db/exec/write_stage_common.cpp
+++ b/src/mongo/db/exec/write_stage_common.cpp
@@ -59,7 +59,7 @@ PreWriteFilter::PreWriteFilter(OperationContext* opCtx, NamespaceString nss)
// Always allow writes on standalone and secondary nodes.
const auto replCoord{repl::ReplicationCoordinator::get(opCtx)};
- return !replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin.toString());
+ return !replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin);
}()) {}
PreWriteFilter::Action PreWriteFilter::computeAction(const Document& doc) {
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index b400bea6c69..887ea96be96 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -535,7 +535,8 @@ bool shouldWaitForOplogVisibility(OperationContext* opCtx,
// to wait for the oplog visibility timestamp to be updated, it would wait for a replication
// batch that would never complete because it couldn't reacquire its own lock, the global lock
// held by the waiting reader.
- return repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, "admin");
+ return repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
+ opCtx, DatabaseName::kAdmin);
}
namespace {
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 381f6c7963e..27fd824a7d5 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -473,7 +473,7 @@ Status waitForLinearizableReadConcernImpl(OperationContext* opCtx,
{
AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite);
- if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
+ if (!replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)) {
return {ErrorCodes::NotWritablePrimary,
"No longer primary when waiting for linearizable read concern"};
}
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 44b41a5525d..775e4dcb0b1 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -257,8 +257,8 @@ Status applyOps(OperationContext* opCtx,
}
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
- !replCoord->canAcceptWritesForDatabase(opCtx, dbName.toStringWithTenantId());
+ bool userInitiatedWritesAndNotPrimary =
+ opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName);
if (userInitiatedWritesAndNotPrimary)
return Status(ErrorCodes::NotWritablePrimary,
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index 81be6651282..f2898454371 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -169,7 +169,7 @@ void NoopWriter::_writeNoop(OperationContext* opCtx) {
auto replCoord = ReplicationCoordinator::get(opCtx);
// Its a proxy for being a primary
- if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
+ if (!replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)) {
LOGV2_DEBUG(21220, 1, "Not a primary, skipping the noop write");
return;
}
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index 078aa9b6284..812b2ef6689 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -270,13 +270,15 @@ public:
* NOTE: This function can only be meaningfully called while the caller holds the
* ReplicationStateTransitionLock in some mode other than MODE_NONE.
*/
- virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) = 0;
+ virtual bool canAcceptWritesForDatabase(OperationContext* opCtx,
+ const DatabaseName& dbName) = 0;
/**
* Version which does not check for the RSTL. Do not use in new code. Without the RSTL, the
* return value may be inaccurate by the time the function returns.
*/
- virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) = 0;
+ virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
+ const DatabaseName& dbName) = 0;
/**
* Returns true if it is valid for this node to accept writes on the given namespace.
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index fa920a775f3..695e8d55298 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -3010,14 +3010,14 @@ bool ReplicationCoordinatorImpl::isWritablePrimaryForReportingPurposes() {
}
bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase(OperationContext* opCtx,
- StringData dbName) {
+ const DatabaseName& dbName) {
// The answer isn't meaningful unless we hold the ReplicationStateTransitionLock.
invariant(opCtx->lockState()->isRSTLLocked() || opCtx->isLockFreeReadsOp());
return canAcceptWritesForDatabase_UNSAFE(opCtx, dbName);
}
bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
- StringData dbName) {
+ const DatabaseName& dbName) {
// _canAcceptNonLocalWrites is always true for standalone nodes, and adjusted based on
// primary+drain state in replica sets.
//
@@ -3026,7 +3026,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationCont
if (_readWriteAbility->canAcceptNonLocalWrites_UNSAFE() || alwaysAllowNonLocalWrites(opCtx)) {
return true;
}
- if (dbName == DatabaseName::kLocal.db()) {
+ if (dbName == DatabaseName::kLocal) {
return true;
}
return false;
@@ -3071,7 +3071,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesFor(OperationContext* opCtx,
bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* opCtx,
const NamespaceStringOrUUID& nsOrUUID) {
- bool canWriteToDB = canAcceptWritesForDatabase_UNSAFE(opCtx, nsOrUUID.dbName().db());
+ bool canWriteToDB = canAcceptWritesForDatabase_UNSAFE(opCtx, nsOrUUID.dbName());
if (!canWriteToDB && !isSystemDotProfile(opCtx, nsOrUUID)) {
return false;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 3d5614f6598..426c948f6ed 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -149,8 +149,9 @@ public:
virtual bool isWritablePrimaryForReportingPurposes();
- virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName);
- virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName);
+ virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, const DatabaseName& dbName);
+ virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
+ const DatabaseName& dbName);
bool canAcceptWritesFor(OperationContext* opCtx,
const NamespaceStringOrUUID& nsorUUID) override;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index 3c84207e7dc..4d3a42bbced 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -2717,7 +2717,8 @@ TEST_F(PrimaryCatchUpTest, PrimaryDoesNotNeedToCatchUp) {
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was not incremented.
ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -2772,7 +2773,8 @@ TEST_F(PrimaryCatchUpTest, CatchupSucceeds) {
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was incremented.
ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -2806,7 +2808,8 @@ TEST_F(PrimaryCatchUpTest, CatchupTimeout) {
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was incremented.
ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -2847,7 +2850,8 @@ TEST_F(PrimaryCatchUpTest, CannotSeeAllNodes) {
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was not incremented.
ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -2891,7 +2895,8 @@ TEST_F(PrimaryCatchUpTest, HeartbeatTimeout) {
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was not incremented.
ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -2929,7 +2934,8 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownBeforeHeartbeatRefreshing) {
ASSERT_EQUALS(0, countTextFormatLogLinesContaining("Catchup timed out"));
auto opCtx = makeOperationContext();
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was not incremented.
ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -2977,7 +2983,8 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringCatchUp) {
ASSERT_EQUALS(0, countTextFormatLogLinesContaining("Catchup timed out"));
auto opCtx = makeOperationContext();
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was incremented.
ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -3053,12 +3060,14 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringDrainMode) {
ASSERT(replCoord->getApplierState() == ApplierState::Draining);
{
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_FALSE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_FALSE(replCoord->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
}
signalDrainComplete(opCtx.get());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
ASSERT(replCoord->getApplierState() == ApplierState::Stopped);
- ASSERT_TRUE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_TRUE(replCoord->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was not incremented again.
ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -3140,7 +3149,8 @@ TEST_F(PrimaryCatchUpTest, FreshestNodeBecomesAvailableLater) {
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was incremented.
ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -3201,7 +3211,8 @@ TEST_F(PrimaryCatchUpTest, InfiniteTimeoutAndAbort) {
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was incremented.
ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
@@ -3230,7 +3241,8 @@ TEST_F(PrimaryCatchUpTest, ZeroTimeout) {
auto opCtx = makeOperationContext();
signalDrainComplete(opCtx.get());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(
+ opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test")));
// Check that the number of elections requiring primary catchup was not incremented.
ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting());
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 9a044655c0e..7754fcddb6d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -2019,7 +2019,7 @@ TEST_F(StepDownTest, StepDownFailureRestoresDrainState) {
{
// We can't take writes yet since we're still in drain mode.
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin"));
+ ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin));
}
// Step down where the secondary actually has to catch up before the stepDown can succeed.
@@ -2044,7 +2044,7 @@ TEST_F(StepDownTest, StepDownFailureRestoresDrainState) {
// in drain mode.
{
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin"));
+ ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin));
}
// Now complete drain mode and ensure that we become capable of taking writes.
@@ -2053,7 +2053,7 @@ TEST_F(StepDownTest, StepDownFailureRestoresDrainState) {
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin));
}
class StepDownTestWithUnelectableNode : public StepDownTest {
@@ -2941,7 +2941,7 @@ TEST_F(StepDownTest, InterruptingStepDownCommandRestoresWriteAvailability) {
// This is the important check, that we stepped back up when aborting the stepdown command
// attempt.
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin"));
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin));
}
// Test that if a stepdown command is blocked waiting for secondaries to catch up when an
@@ -3001,7 +3001,7 @@ TEST_F(StepDownTest, InterruptingAfterUnconditionalStepdownDoesNotRestoreWriteAv
// This is the important check, that we didn't accidentally step back up when aborting the
// stepdown command attempt.
Lock::GlobalLock lock(opCtx.get(), MODE_IX);
- ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin"));
+ ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin));
}
TEST_F(ReplCoordTest, GetReplicationModeNone) {
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index 073ed4e767d..43b67594f65 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -189,7 +189,7 @@ bool ReplicationCoordinatorMock::isWritablePrimaryForReportingPurposes() {
}
bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* opCtx,
- StringData dbName) {
+ const DatabaseName& dbName) {
stdx::lock_guard<Mutex> lk(_mutex);
// Return true if we allow writes explicitly even when not in primary state, as in sharding
@@ -198,18 +198,18 @@ bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* op
if (_alwaysAllowWrites) {
return true;
}
- return dbName == DatabaseName::kLocal.db() || _memberState.primary();
+ return dbName == DatabaseName::kLocal || _memberState.primary();
}
bool ReplicationCoordinatorMock::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
- StringData dbName) {
+ const DatabaseName& dbName) {
return canAcceptWritesForDatabase(opCtx, dbName);
}
bool ReplicationCoordinatorMock::canAcceptWritesFor(OperationContext* opCtx,
const NamespaceStringOrUUID& nsOrUUID) {
// TODO
- return canAcceptWritesForDatabase(opCtx, nsOrUUID.dbName().db());
+ return canAcceptWritesForDatabase(opCtx, nsOrUUID.dbName());
}
bool ReplicationCoordinatorMock::canAcceptWritesFor_UNSAFE(OperationContext* opCtx,
diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h
index 77a8c715d69..b9d01bd1742 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_mock.h
@@ -114,9 +114,10 @@ public:
virtual bool isWritablePrimaryForReportingPurposes();
- virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName);
+ virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, const DatabaseName& dbName);
- virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName);
+ virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
+ const DatabaseName& dbName);
bool canAcceptWritesFor(OperationContext* opCtx,
const NamespaceStringOrUUID& nsOrUUID) override;
diff --git a/src/mongo/db/repl/replication_coordinator_noop.cpp b/src/mongo/db/repl/replication_coordinator_noop.cpp
index 839c9f58020..e03471a100e 100644
--- a/src/mongo/db/repl/replication_coordinator_noop.cpp
+++ b/src/mongo/db/repl/replication_coordinator_noop.cpp
@@ -77,12 +77,12 @@ bool ReplicationCoordinatorNoOp::isWritablePrimaryForReportingPurposes() {
}
bool ReplicationCoordinatorNoOp::canAcceptWritesForDatabase(OperationContext* opCtx,
- StringData dbName) {
+ const DatabaseName& dbName) {
MONGO_UNREACHABLE;
}
bool ReplicationCoordinatorNoOp::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
- StringData dbName) {
+ const DatabaseName& dbName) {
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/repl/replication_coordinator_noop.h b/src/mongo/db/repl/replication_coordinator_noop.h
index 1a8eca06917..2f3d9e3c931 100644
--- a/src/mongo/db/repl/replication_coordinator_noop.h
+++ b/src/mongo/db/repl/replication_coordinator_noop.h
@@ -71,8 +71,9 @@ public:
bool isInPrimaryOrSecondaryState(OperationContext* opCtx) const final;
bool isInPrimaryOrSecondaryState_UNSAFE() const final;
- bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) final;
- bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) final;
+ bool canAcceptWritesForDatabase(OperationContext* opCtx, const DatabaseName& dbName) final;
+ bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
+ const DatabaseName& dbName) final;
bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID) final;
bool canAcceptWritesFor_UNSAFE(OperationContext* opCtx,
diff --git a/src/mongo/db/repl/shard_merge_recipient_service.cpp b/src/mongo/db/repl/shard_merge_recipient_service.cpp
index c1d3a40bc50..1cb67e16f91 100644
--- a/src/mongo/db/repl/shard_merge_recipient_service.cpp
+++ b/src/mongo/db/repl/shard_merge_recipient_service.cpp
@@ -1469,7 +1469,7 @@ ShardMergeRecipientService::Instance::_fetchRetryableWritesOplogBeforeStartOpTim
// re-create the collection.
auto coordinator = repl::ReplicationCoordinator::get(opCtx.get());
Lock::GlobalLock globalLock(opCtx.get(), MODE_IX);
- if (!coordinator->canAcceptWritesForDatabase(opCtx.get(), oplogBufferNS.db())) {
+ if (!coordinator->canAcceptWritesForDatabase(opCtx.get(), oplogBufferNS.dbName())) {
uassertStatusOK(
Status(ErrorCodes::NotWritablePrimary,
"Recipient node is not primary, cannot clear oplog buffer collection."));
@@ -1592,8 +1592,8 @@ void ShardMergeRecipientService::Instance::_startOplogBuffer(OperationContext* o
repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX);
auto oplogBufferNS = getOplogBufferNs(getMigrationUUID());
- if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx,
- oplogBufferNS.db())) {
+ if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
+ opCtx, oplogBufferNS.dbName())) {
uassertStatusOK(
Status(ErrorCodes::NotWritablePrimary, "Recipient node is no longer a primary."));
}
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 75267be9c55..9b35e5ebbbe 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -1468,7 +1468,8 @@ void StorageInterfaceImpl::waitForAllEarlierOplogWritesToBeVisible(OperationCont
AutoGetOplog oplogRead(opCtx, OplogAccessMode::kRead);
if (primaryOnly &&
- !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, "admin"))
+ !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx,
+ DatabaseName::kAdmin))
return;
const auto& oplog = oplogRead.getCollection();
uassert(ErrorCodes::NotYetInitialized, "The oplog does not exist", oplog);
diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
index 306e2b5cfe2..0592fb30dd5 100644
--- a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
+++ b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
@@ -634,7 +634,7 @@ void performNoopWrite(OperationContext* opCtx, StringData msg) {
AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite);
uassert(ErrorCodes::NotWritablePrimary,
"Not primary when performing noop write for {}"_format(msg),
- replCoord->canAcceptWritesForDatabase(opCtx, "admin"));
+ replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin));
writeConflictRetry(
opCtx, "performNoopWrite", NamespaceString::kRsOplogNamespace.ns(), [&opCtx, &msg] {
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
index 3d2ba358d21..1ac857ed1f4 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
@@ -1225,7 +1225,7 @@ TenantMigrationRecipientService::Instance::_fetchRetryableWritesOplogBeforeStart
// re-create the collection.
auto coordinator = repl::ReplicationCoordinator::get(opCtx.get());
Lock::GlobalLock globalLock(opCtx.get(), MODE_IX);
- if (!coordinator->canAcceptWritesForDatabase(opCtx.get(), oplogBufferNS.db())) {
+ if (!coordinator->canAcceptWritesForDatabase(opCtx.get(), oplogBufferNS.dbName())) {
uassertStatusOK(
Status(ErrorCodes::NotWritablePrimary,
"Recipient node is not primary, cannot clear oplog buffer collection."));
@@ -2237,7 +2237,7 @@ void TenantMigrationRecipientService::Instance::_setup() {
auto oplogBufferNS = getOplogBufferNs(getMigrationUUID());
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
- opCtx, oplogBufferNS.db())) {
+ opCtx, oplogBufferNS.dbName())) {
uassertStatusOK(
Status(ErrorCodes::NotWritablePrimary, "Recipient node is no longer a primary."));
}
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index 5a512319c67..6e865bc47cd 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -89,7 +89,7 @@ void ActiveMigrationsRegistry::lock(OperationContext* opCtx, StringData reason)
uassert(ErrorCodes::NotWritablePrimary,
"Cannot lock the registry while the node is in draining mode",
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
- opCtx, DatabaseName::kAdmin.toString()));
+ opCtx, DatabaseName::kAdmin));
}
unblockMigrationsOnError.dismiss();
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index cc4dce227d6..e592e5db739 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -64,7 +64,7 @@ const auto documentIdDecoration = OperationContext::declareDecoration<BSONObj>()
bool isStandaloneOrPrimary(OperationContext* opCtx) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- return replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin.toString());
+ return replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin);
}
/**
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 801a57e184c..a23da80ed18 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -1573,8 +1573,9 @@ void ExecCommandDatabase::_initiateCommand() {
}
_invocation->checkAuthorization(opCtx, request);
-
- const bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname);
+ const auto dbName =
+ DatabaseNameUtil::deserialize(request.getValidatedTenantId(), request.getDatabase());
+ const bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbName);
if (!opCtx->getClient()->isInDirectClient() &&
!MONGO_unlikely(skipCheckingForNotPrimaryInCommandDispatch.shouldFail())) {
@@ -1592,7 +1593,7 @@ void ExecCommandDatabase::_initiateCommand() {
bool couldHaveOptedIn =
allowed == Command::AllowedOnSecondary::kOptIn && !inMultiDocumentTransaction;
bool optedIn = couldHaveOptedIn && ReadPreferenceSetting::get(opCtx).canRunOnSecondary();
- bool canRunHere = commandCanRunHere(opCtx, dbname, command, inMultiDocumentTransaction);
+ bool canRunHere = commandCanRunHere(opCtx, dbName, command, inMultiDocumentTransaction);
if (!canRunHere && couldHaveOptedIn) {
const auto msg = client->supportsHello() ? "not primary and secondaryOk=false"_sd
: "not master and slaveOk=false"_sd;
@@ -1608,7 +1609,7 @@ void ExecCommandDatabase::_initiateCommand() {
if (!command->maintenanceOk() &&
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet &&
- !replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname) &&
+ !replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbName) &&
!replCoord->getMemberState().secondary()) {
uassert(ErrorCodes::NotPrimaryOrSecondary,
@@ -1632,7 +1633,7 @@ void ExecCommandDatabase::_initiateCommand() {
repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX);
uassert(ErrorCodes::NotWritablePrimary,
"Cannot start a transaction in a non-primary state",
- replCoord->canAcceptWritesForDatabase(opCtx, dbname));
+ replCoord->canAcceptWritesForDatabase(opCtx, dbName));
}
}
diff --git a/src/mongo/db/session/session_catalog_mongod.cpp b/src/mongo/db/session/session_catalog_mongod.cpp
index 7e2eab73e6d..899b3498bcb 100644
--- a/src/mongo/db/session/session_catalog_mongod.cpp
+++ b/src/mongo/db/session/session_catalog_mongod.cpp
@@ -658,7 +658,7 @@ int MongoDSessionCatalog::reapSessionsOlderThan(OperationContext* opCtx,
// around the fact that the logical sessions cache is not registered to listen for replication
// state changes.
const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- if (!replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, DatabaseName::kConfig.toString()))
+ if (!replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, DatabaseName::kConfig))
return 0;
return removeExpiredTransactionSessionsFromDisk(
diff --git a/src/mongo/db/session/sessions_collection_rs.cpp b/src/mongo/db/session/sessions_collection_rs.cpp
index 92a1afd26cc..76aec1ab9ce 100644
--- a/src/mongo/db/session/sessions_collection_rs.cpp
+++ b/src/mongo/db/session/sessions_collection_rs.cpp
@@ -86,7 +86,7 @@ bool SessionsCollectionRS::_isStandaloneOrPrimary(const NamespaceString& ns,
auto coord = mongo::repl::ReplicationCoordinator::get(opCtx);
- return coord->canAcceptWritesForDatabase(opCtx, ns.db());
+ return coord->canAcceptWritesForDatabase(opCtx, ns.dbName());
}
template <typename LocalCallback, typename RemoteCallback>
diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp
index b67bb141353..98a804b0694 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics.cpp
@@ -432,7 +432,7 @@ void ResourceConsumption::merge(OperationContext* opCtx,
// inconsistent state is not impactful for the purposes of metrics collection, perform a
// best-effort check so that we can record metrics for this operation.
auto isPrimary = repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase_UNSAFE(
- opCtx, DatabaseName::kAdmin.toString());
+ opCtx, DatabaseName::kAdmin);
AggregatedMetrics newMetrics;
if (isPrimary) {
diff --git a/src/mongo/db/storage/snapshot_helper.cpp b/src/mongo/db/storage/snapshot_helper.cpp
index a610672c78d..8470f74dc92 100644
--- a/src/mongo/db/storage/snapshot_helper.cpp
+++ b/src/mongo/db/storage/snapshot_helper.cpp
@@ -100,7 +100,8 @@ bool shouldReadAtLastApplied(OperationContext* opCtx,
// being applied and we can read from the default snapshot. If we are in a replication state
// (like secondary or primary catch-up) where we are not accepting writes, we should read at
// lastApplied.
- if (repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, "admin")) {
+ if (repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
+ opCtx, DatabaseName::kAdmin)) {
if (reason) {
*reason = "primary";
}
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index 259d3b9fbf2..7a6e9b53dad 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -95,7 +95,7 @@ void generateSystemIndexForExistingCollection(OperationContext* opCtx,
uassert(ErrorCodes::NotWritablePrimary,
"Not primary while creating authorization index",
replCoord->getReplicationMode() != repl::ReplicationCoordinator::modeReplSet ||
- replCoord->canAcceptWritesForDatabase(opCtx, ns.db()));
+ replCoord->canAcceptWritesForDatabase(opCtx, ns.dbName()));
invariant(!opCtx->lockState()->inAWriteUnitOfWork());
diff --git a/src/mongo/db/transaction/transaction_participant.cpp b/src/mongo/db/transaction/transaction_participant.cpp
index a1e1b205fec..c961a63ee6b 100644
--- a/src/mongo/db/transaction/transaction_participant.cpp
+++ b/src/mongo/db/transaction/transaction_participant.cpp
@@ -553,7 +553,7 @@ void TransactionParticipant::performNoopWrite(OperationContext* opCtx, StringDat
AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite);
uassert(ErrorCodes::NotWritablePrimary,
"Not primary when performing noop write for {}"_format(msg),
- replCoord->canAcceptWritesForDatabase(opCtx, "admin"));
+ replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin));
writeConflictRetry(
opCtx, "performNoopWrite", NamespaceString::kRsOplogNamespace.ns(), [&opCtx, &msg] {
@@ -946,7 +946,7 @@ void TransactionParticipant::Participant::beginOrContinue(
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
uassert(ErrorCodes::NotWritablePrimary,
"Not primary so we cannot begin or continue a transaction",
- replCoord->canAcceptWritesForDatabase(opCtx, "admin"));
+ replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin));
// Disallow multi-statement transactions on shard servers that have
// writeConcernMajorityJournalDefault=false unless enableTestCommands=true. But allow
// retryable writes (autocommit == boost::none).
@@ -1865,7 +1865,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction(
if (opCtx->writesAreReplicated()) {
uassert(ErrorCodes::NotWritablePrimary,
"Not primary so we cannot commit a prepared transaction",
- replCoord->canAcceptWritesForDatabase(opCtx, "admin"));
+ replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin));
}
uassert(
@@ -2152,7 +2152,7 @@ void TransactionParticipant::Participant::_abortActivePreparedTransaction(Operat
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
uassert(ErrorCodes::NotWritablePrimary,
"Not primary so we cannot abort a prepared transaction",
- replCoord->canAcceptWritesForDatabase(opCtx, "admin"));
+ replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin));
}
_abortActiveTransaction(opCtx, TransactionState::kPrepared);
diff --git a/src/mongo/embedded/replication_coordinator_embedded.cpp b/src/mongo/embedded/replication_coordinator_embedded.cpp
index 66e56bd22da..e0329a66a99 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.cpp
+++ b/src/mongo/embedded/replication_coordinator_embedded.cpp
@@ -78,12 +78,12 @@ bool ReplicationCoordinatorEmbedded::isWritablePrimaryForReportingPurposes() {
}
bool ReplicationCoordinatorEmbedded::canAcceptWritesForDatabase(OperationContext* opCtx,
- StringData dbName) {
+ const DatabaseName& dbName) {
return true;
}
bool ReplicationCoordinatorEmbedded::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
- StringData dbName) {
+ const DatabaseName& dbName) {
return true;
}
diff --git a/src/mongo/embedded/replication_coordinator_embedded.h b/src/mongo/embedded/replication_coordinator_embedded.h
index d01f1b770f9..76a18aa8a4c 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.h
+++ b/src/mongo/embedded/replication_coordinator_embedded.h
@@ -71,8 +71,9 @@ public:
bool isInPrimaryOrSecondaryState(OperationContext* opCtx) const override;
bool isInPrimaryOrSecondaryState_UNSAFE() const override;
- bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) override;
- bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) override;
+ bool canAcceptWritesForDatabase(OperationContext* opCtx, const DatabaseName& dbName) override;
+ bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
+ const DatabaseName& dbName) override;
bool canAcceptWritesFor(OperationContext* opCtx,
const NamespaceStringOrUUID& nsOrUUID) override;
diff --git a/src/mongo/s/query_analysis_client.cpp b/src/mongo/s/query_analysis_client.cpp
index 9085a4e6e4c..fe5244080b6 100644
--- a/src/mongo/s/query_analysis_client.cpp
+++ b/src/mongo/s/query_analysis_client.cpp
@@ -72,8 +72,8 @@ void QueryAnalysisClient::setTaskExecutor(ServiceContext* service,
bool QueryAnalysisClient::_canAcceptWrites(OperationContext* opCtx, const DatabaseName& dbName) {
repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX);
- return mongo::repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(
- opCtx, DatabaseNameUtil::serialize(dbName));
+ return mongo::repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx,
+ dbName);
}
BSONObj QueryAnalysisClient::_executeCommandOnPrimaryLocal(