summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Broadstone <mbroadst@mongodb.com>2023-04-18 10:52:03 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-04-18 12:09:05 +0000
commit7cc71f7d5f29a5850a4d186b9554df3dcb407117 (patch)
tree068faa351ea01c29118b09a0b84b82d119c5e141
parent87824ece4511770a9a3d34b89cf544de7596dd77 (diff)
downloadmongo-7cc71f7d5f29a5850a4d186b9554df3dcb407117.tar.gz
SERVER-76222 Avoid temporary allocations of DatabaseName
-rw-r--r--src/mongo/client/dbclient_rs.cpp6
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp25
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp2
-rw-r--r--src/mongo/db/catalog_raii.cpp2
-rw-r--r--src/mongo/db/catalog_raii_test.cpp23
-rw-r--r--src/mongo/db/commands/dbcheck.cpp2
-rw-r--r--src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp2
-rw-r--r--src/mongo/db/curop.h4
-rw-r--r--src/mongo/db/fle_crud.cpp2
-rw-r--r--src/mongo/db/op_observer/op_observer_impl_test.cpp8
-rw-r--r--src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp2
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp72
-rw-r--r--src/mongo/db/s/analyze_shard_key_cmd_util.cpp13
-rw-r--r--src/mongo/db/s/configure_query_analyzer_cmd.cpp2
-rw-r--r--src/mongo/db/s/move_primary/move_primary_donor_service.cpp6
-rw-r--r--src/mongo/db/s/op_observer_sharding_impl.cpp6
-rw-r--r--src/mongo/db/s/query_analysis_writer.cpp2
-rw-r--r--src/mongo/db/s/query_analysis_writer_test.cpp3
-rw-r--r--src/mongo/db/s/range_deletion_util.cpp9
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.cpp4
-rw-r--r--src/mongo/db/s/reshard_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_manual_cleanup.cpp2
-rw-r--r--src/mongo/db/s/sessions_collection_config_server.cpp2
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.cpp3
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator_service.cpp2
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp3
-rw-r--r--src/mongo/db/s/sharding_util.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_check_metadata_consistency_command.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_drop_indexes_command.cpp2
-rw-r--r--src/mongo/db/service_entry_point_common.cpp8
-rw-r--r--src/mongo/db/shard_role.cpp3
-rw-r--r--src/mongo/db/shard_role_test.cpp8
-rw-r--r--src/mongo/db/ttl.cpp4
-rw-r--r--src/mongo/db/views/view_definition_test.cpp3
-rw-r--r--src/mongo/dbtests/dbtests.cpp2
-rw-r--r--src/mongo/s/commands/cluster_drop_collection_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_rename_collection_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_set_cluster_parameter_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp2
40 files changed, 117 insertions, 136 deletions
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index 368b1534ccb..b6b28375246 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -211,7 +211,7 @@ bool _isSecondaryCommand(StringData commandName, const BSONObj& commandArgs) {
}
// Internal implementation of isSecondaryQuery, takes previously-parsed read preference
-bool _isSecondaryQuery(const string& ns,
+bool _isSecondaryQuery(StringData ns,
const BSONObj& filter,
const ReadPreferenceSetting& readPref) {
// If the read pref is primary only, this is not a secondary query
@@ -502,8 +502,8 @@ std::unique_ptr<DBClientCursor> DBClientReplicaSet::find(FindCommandRequest find
const ReadPreferenceSetting& readPref,
ExhaustMode exhaustMode) {
invariant(findRequest.getNamespaceOrUUID().nss());
- const std::string nss = findRequest.getNamespaceOrUUID().nss()->ns().toString();
- if (_isSecondaryQuery(nss, findRequest.toBSON(BSONObj{}), readPref)) {
+ const StringData ns = findRequest.getNamespaceOrUUID().nss()->ns();
+ if (_isSecondaryQuery(ns, findRequest.toBSON(BSONObj{}), readPref)) {
LOGV2_DEBUG(5951202,
3,
"dbclient_rs query using secondary or tagged node selection",
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 0f1ab9ab043..81c7ffa6d80 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -400,8 +400,7 @@ TEST_F(CollectionCatalogTest, OnDropCollection) {
TEST_F(CollectionCatalogTest, RenameCollection) {
auto uuid = UUID::gen();
- NamespaceString oldNss =
- NamespaceString::createNamespaceString_forTest(DatabaseName{nss.db()}, "oldcol");
+ NamespaceString oldNss = NamespaceString::createNamespaceString_forTest(nss.dbName(), "oldcol");
std::shared_ptr<Collection> collShared = std::make_shared<CollectionMock>(uuid, oldNss);
auto collection = collShared.get();
catalog.registerCollection(opCtx.get(), uuid, std::move(collShared), boost::none);
@@ -1125,7 +1124,7 @@ private:
}
void _dropCollection(OperationContext* opCtx, const NamespaceString& nss, Timestamp timestamp) {
- Lock::DBLock dbLk(opCtx, DatabaseName{nss.db()}, MODE_IX);
+ Lock::DBLock dbLk(opCtx, nss.dbName(), MODE_IX);
Lock::CollectionLock collLk(opCtx, nss, MODE_X);
CollectionWriter collection(opCtx, nss);
@@ -1161,7 +1160,7 @@ private:
const NamespaceString& from,
const NamespaceString& to,
Timestamp timestamp) {
- Lock::DBLock dbLk(opCtx, DatabaseName{from.db()}, MODE_IX);
+ Lock::DBLock dbLk(opCtx, from.dbName(), MODE_IX);
Lock::CollectionLock fromLk(opCtx, from, MODE_X);
Lock::CollectionLock toLk(opCtx, to, MODE_X);
@@ -2712,10 +2711,10 @@ TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingInsert) {
->establishConsistentCollection(opCtx.get(), nss, Timestamp(1, 17));
CollectionCatalog::get(opCtx.get())
->establishConsistentCollection(
- opCtx.get(), {DatabaseName{nss.db()}, firstUUID}, Timestamp(1, 17));
+ opCtx.get(), {nss.dbName(), firstUUID}, Timestamp(1, 17));
CollectionCatalog::get(opCtx.get())
->establishConsistentCollection(
- opCtx.get(), {DatabaseName{nss.db()}, secondUUID}, Timestamp(1, 17));
+ opCtx.get(), {nss.dbName(), secondUUID}, Timestamp(1, 17));
// Lookups before the inserted timestamp is still unknown
ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 11)).result,
@@ -2747,10 +2746,10 @@ TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingInsert) {
->establishConsistentCollection(opCtx.get(), nss, Timestamp(1, 12));
CollectionCatalog::get(opCtx.get())
->establishConsistentCollection(
- opCtx.get(), {DatabaseName{nss.db()}, firstUUID}, Timestamp(1, 12));
+ opCtx.get(), {nss.dbName(), firstUUID}, Timestamp(1, 12));
CollectionCatalog::get(opCtx.get())
->establishConsistentCollection(
- opCtx.get(), {DatabaseName{nss.db()}, secondUUID}, Timestamp(1, 12));
+ opCtx.get(), {nss.dbName(), secondUUID}, Timestamp(1, 12));
// We should now have extended the range from Timestamp(1, 17) to Timestamp(1, 12)
ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 12)).result,
@@ -2781,10 +2780,10 @@ TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingInsert) {
->establishConsistentCollection(opCtx.get(), nss, Timestamp(1, 25));
CollectionCatalog::get(opCtx.get())
->establishConsistentCollection(
- opCtx.get(), {DatabaseName{nss.db()}, firstUUID}, Timestamp(1, 25));
+ opCtx.get(), {nss.dbName(), firstUUID}, Timestamp(1, 25));
CollectionCatalog::get(opCtx.get())
->establishConsistentCollection(
- opCtx.get(), {DatabaseName{nss.db()}, secondUUID}, Timestamp(1, 25));
+ opCtx.get(), {nss.dbName(), secondUUID}, Timestamp(1, 25));
// Check the entries, most didn't change
ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).result,
@@ -2819,10 +2818,10 @@ TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingInsert) {
->establishConsistentCollection(opCtx.get(), nss, Timestamp(1, 26));
CollectionCatalog::get(opCtx.get())
->establishConsistentCollection(
- opCtx.get(), {DatabaseName{nss.db()}, firstUUID}, Timestamp(1, 26));
+ opCtx.get(), {nss.dbName(), firstUUID}, Timestamp(1, 26));
CollectionCatalog::get(opCtx.get())
->establishConsistentCollection(
- opCtx.get(), {DatabaseName{nss.db()}, secondUUID}, Timestamp(1, 26));
+ opCtx.get(), {nss.dbName(), secondUUID}, Timestamp(1, 26));
// We should not have re-written the existing entry at Timestamp(1, 26)
ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).result,
@@ -3949,7 +3948,7 @@ TEST_F(CollectionCatalogTimestampTest, ResolveNamespaceStringOrUUIDAtLatest) {
const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b");
const Timestamp createCollectionTs = Timestamp(10, 10);
const UUID uuid = createCollection(opCtx.get(), nss, createCollectionTs);
- const NamespaceStringOrUUID nssOrUUID = NamespaceStringOrUUID(DatabaseName{nss.db()}, uuid);
+ const NamespaceStringOrUUID nssOrUUID = NamespaceStringOrUUID(nss.dbName(), uuid);
NamespaceString resolvedNss =
CollectionCatalog::get(opCtx.get())->resolveNamespaceStringOrUUID(opCtx.get(), nssOrUUID);
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 128a61b4f52..3f54a626654 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -488,7 +488,7 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx,
targetDBLock.emplace(opCtx, target.dbName(), MODE_X);
}
- DatabaseShardingState::assertMatchingDbVersion(opCtx, DatabaseName{source.db()});
+ DatabaseShardingState::assertMatchingDbVersion(opCtx, source.dbName());
DisableDocumentValidation validationDisabler(opCtx);
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 51e2f1dd8b1..cadcdb6bcda 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -487,7 +487,7 @@ AutoGetCollectionLockFree::AutoGetCollectionLockFree(OperationContext* opCtx,
// Check that the sharding database version matches our read.
// Note: this must always be checked, regardless of whether the collection exists, so that the
// dbVersion of this node or the caller gets updated quickly in case either is stale.
- DatabaseShardingState::assertMatchingDbVersion(opCtx, DatabaseName{_resolvedNss.db()});
+ DatabaseShardingState::assertMatchingDbVersion(opCtx, _resolvedNss.dbName());
checkCollectionUUIDMismatch(opCtx, _resolvedNss, _collectionPtr, options._expectedUUID);
diff --git a/src/mongo/db/catalog_raii_test.cpp b/src/mongo/db/catalog_raii_test.cpp
index 3aafca7c0ad..4d1067d5c43 100644
--- a/src/mongo/db/catalog_raii_test.cpp
+++ b/src/mongo/db/catalog_raii_test.cpp
@@ -240,8 +240,8 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionSecondaryNamespacesSingleDb) {
ASSERT(opCtx1->lockState()->isRSTLLocked());
ASSERT(opCtx1->lockState()->isReadLocked()); // Global lock check
ASSERT(opCtx1->lockState()->isDbLockedForMode(nss.dbName(), MODE_IS));
- ASSERT(opCtx1->lockState()->isDbLockedForMode(DatabaseName{kSecondaryNss1.db()}, MODE_IS));
- ASSERT(opCtx1->lockState()->isDbLockedForMode(DatabaseName{kSecondaryNss2.db()}, MODE_IS));
+ ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss1.dbName(), MODE_IS));
+ ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss2.dbName(), MODE_IS));
ASSERT(opCtx1->lockState()->isCollectionLockedForMode(nss, MODE_IS));
ASSERT(opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNss1, MODE_IS));
ASSERT(opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNss2, MODE_IS));
@@ -249,10 +249,8 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionSecondaryNamespacesSingleDb) {
ASSERT(!opCtx1->lockState()->isRSTLExclusive());
ASSERT(!opCtx1->lockState()->isGlobalLockedRecursively());
ASSERT(!opCtx1->lockState()->isWriteLocked());
- ASSERT(
- !opCtx1->lockState()->isDbLockedForMode(DatabaseName{kSecondaryNssOtherDb1.db()}, MODE_IS));
- ASSERT(
- !opCtx1->lockState()->isDbLockedForMode(DatabaseName{kSecondaryNssOtherDb2.db()}, MODE_IS));
+ ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb1.dbName(), MODE_IS));
+ ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb2.dbName(), MODE_IS));
ASSERT(!opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNssOtherDb1, MODE_IS));
ASSERT(!opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNssOtherDb2, MODE_IS));
@@ -278,18 +276,16 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionMultiNamespacesMODEIX) {
ASSERT(opCtx1->lockState()->isRSTLLocked());
ASSERT(opCtx1->lockState()->isWriteLocked()); // Global lock check
ASSERT(opCtx1->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX));
- ASSERT(opCtx1->lockState()->isDbLockedForMode(DatabaseName{kSecondaryNss1.db()}, MODE_IX));
- ASSERT(opCtx1->lockState()->isDbLockedForMode(DatabaseName{kSecondaryNss2.db()}, MODE_IX));
+ ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss1.dbName(), MODE_IX));
+ ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss2.dbName(), MODE_IX));
ASSERT(opCtx1->lockState()->isCollectionLockedForMode(nss, MODE_IX));
ASSERT(opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNss1, MODE_IX));
ASSERT(opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNss2, MODE_IX));
ASSERT(!opCtx1->lockState()->isRSTLExclusive());
ASSERT(!opCtx1->lockState()->isGlobalLockedRecursively());
- ASSERT(
- !opCtx1->lockState()->isDbLockedForMode(DatabaseName{kSecondaryNssOtherDb1.db()}, MODE_IX));
- ASSERT(
- !opCtx1->lockState()->isDbLockedForMode(DatabaseName{kSecondaryNssOtherDb2.db()}, MODE_IX));
+ ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb1.dbName(), MODE_IX));
+ ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb2.dbName(), MODE_IX));
ASSERT(!opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNssOtherDb1, MODE_IX));
ASSERT(!opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNssOtherDb2, MODE_IX));
@@ -325,8 +321,7 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionMultiNssCollLockDeadline) {
// Take a MODE_X collection lock on kSecondaryNss1.
boost::optional<AutoGetCollection> autoGetCollWithXLock;
autoGetCollWithXLock.emplace(client1.second.get(), kSecondaryNss1, MODE_X);
- ASSERT(
- client1.second->lockState()->isDbLockedForMode(DatabaseName{kSecondaryNss1.db()}, MODE_IX));
+ ASSERT(client1.second->lockState()->isDbLockedForMode(kSecondaryNss1.dbName(), MODE_IX));
ASSERT(client1.second->lockState()->isCollectionLockedForMode(kSecondaryNss1, MODE_X));
// Now trying to take a MODE_IS lock on kSecondaryNss1 as a secondary collection should fail.
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 99674e07f7b..68fe56756d0 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -543,7 +543,7 @@ private:
collection = catalog->establishConsistentCollection(
opCtx,
- {DatabaseName{info.nss.db()}, info.uuid},
+ {info.nss.dbName(), info.uuid},
opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx));
} else {
autoColl.emplace(opCtx, info.nss, MODE_IS);
diff --git a/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp b/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp
index 577734457a5..622bdbc387d 100644
--- a/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp
+++ b/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp
@@ -78,7 +78,7 @@ public:
}
// Check if the receiving shard is still the primary for the database
- DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, DatabaseName{fromNss.db()});
+ DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, fromNss.dbName());
// Acquiring the local part of the distributed locks for involved namespaces allows:
// - Serialize with sharded DDLs, ensuring no concurrent modifications of the
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index c915ffb2fcd..7862a96ea4a 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -577,9 +577,7 @@ public:
if (_dbprofile <= 0)
return false;
- if (CollectionCatalog::get(opCtx())
- ->getDatabaseProfileSettings(DatabaseName{getNSS().db()})
- .filter)
+ if (CollectionCatalog::get(opCtx())->getDatabaseProfileSettings(getNSS().dbName()).filter)
return true;
return elapsedTimeExcludingPauses() >= Milliseconds{serverGlobalParams.slowMS.load()};
diff --git a/src/mongo/db/fle_crud.cpp b/src/mongo/db/fle_crud.cpp
index ed7e1be568c..f8c6a24794b 100644
--- a/src/mongo/db/fle_crud.cpp
+++ b/src/mongo/db/fle_crud.cpp
@@ -1507,7 +1507,7 @@ std::vector<std::vector<FLEEdgeCountInfo>> FLEQueryInterfaceImpl::getTags(
getCountsCmd.setTokens(toTagSets(tokensSets));
getCountsCmd.setQueryType(queryTypeTranslation(type));
- auto response = _txnClient.runCommandSync(DatabaseName{nss.db()}, getCountsCmd.toBSON({}));
+ auto response = _txnClient.runCommandSync(nss.dbName(), getCountsCmd.toBSON({}));
auto status = getStatusFromWriteCommandReply(response);
uassertStatusOK(status);
diff --git a/src/mongo/db/op_observer/op_observer_impl_test.cpp b/src/mongo/db/op_observer/op_observer_impl_test.cpp
index 920c4dc198a..eb0bc007c3e 100644
--- a/src/mongo/db/op_observer/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer/op_observer_impl_test.cpp
@@ -641,7 +641,7 @@ TEST_F(OpObserverTest, OnDropCollectionInlcudesTenantId) {
// Write to the oplog.
{
- AutoGetDb autoDb(opCtx.get(), DatabaseName{nss.db()}, MODE_X);
+ AutoGetDb autoDb(opCtx.get(), nss.dbName(), MODE_X);
WriteUnitOfWork wunit(opCtx.get());
opObserver.onDropCollection(
opCtx.get(), nss, uuid, 0U, OpObserver::CollectionDropType::kTwoPhase);
@@ -705,7 +705,7 @@ TEST_F(OpObserverTest, OnRenameCollectionIncludesTenantIdFeatureFlagOff) {
// Write to the oplog.
{
- AutoGetDb autoDb(opCtx.get(), DatabaseName{sourceNss.db()}, MODE_X);
+ AutoGetDb autoDb(opCtx.get(), sourceNss.dbName(), MODE_X);
WriteUnitOfWork wunit(opCtx.get());
opObserver.onRenameCollection(
opCtx.get(), sourceNss, targetNss, uuid, dropTargetUuid, 0U, stayTemp);
@@ -741,7 +741,7 @@ TEST_F(OpObserverTest, OnRenameCollectionIncludesTenantIdFeatureFlagOn) {
// Write to the oplog.
{
- AutoGetDb autoDb(opCtx.get(), DatabaseName{sourceNss.db()}, MODE_X);
+ AutoGetDb autoDb(opCtx.get(), sourceNss.dbName(), MODE_X);
WriteUnitOfWork wunit(opCtx.get());
opObserver.onRenameCollection(
opCtx.get(), sourceNss, targetNss, uuid, dropTargetUuid, 0U, stayTemp);
@@ -866,7 +866,7 @@ TEST_F(OpObserverTest, ImportCollectionOplogEntryIncludesTenantId) {
// Write to the oplog.
{
- AutoGetDb autoDb(opCtx.get(), DatabaseName{nss.db()}, MODE_X);
+ AutoGetDb autoDb(opCtx.get(), nss.dbName(), MODE_X);
WriteUnitOfWork wunit(opCtx.get());
opObserver.onImportCollection(opCtx.get(),
importUUID,
diff --git a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
index 4e76cf69f20..45207e93883 100644
--- a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
@@ -435,7 +435,7 @@ ShardServerProcessInterface::expectUnshardedCollectionInScope(
void ShardServerProcessInterface::checkOnPrimaryShardForDb(OperationContext* opCtx,
const NamespaceString& nss) {
- DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, DatabaseName{nss.db()});
+ DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, nss.dbName());
}
} // namespace mongo
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index f3e0fb62953..ac3b69e80f5 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -403,7 +403,7 @@ TEST_F(StorageInterfaceImplTest, InsertDocumentsReturnsOKWhenNoOperationsAreGive
StorageInterfaceImpl storage;
ASSERT_OK(storage.insertDocuments(opCtx, nss, {}));
- ASSERT_OK(storage.insertDocuments(opCtx, {DatabaseName{nss.db()}, *options.uuid}, {}));
+ ASSERT_OK(storage.insertDocuments(opCtx, {nss.dbName(), *options.uuid}, {}));
}
TEST_F(StorageInterfaceImplTest,
@@ -423,9 +423,9 @@ TEST_F(StorageInterfaceImplTest,
ASSERT_STRING_CONTAINS(status.reason(), "Collection::insertDocument got document without _id");
// Again, but specify the collection with its UUID.
- ASSERT_EQ(ErrorCodes::InternalError,
- storage.insertDocuments(
- opCtx, {DatabaseName{nss.db()}, *options.uuid}, transformInserts({op})));
+ ASSERT_EQ(
+ ErrorCodes::InternalError,
+ storage.insertDocuments(opCtx, {nss.dbName(), *options.uuid}, transformInserts({op})));
}
TEST_F(StorageInterfaceImplTest,
@@ -487,7 +487,7 @@ TEST_F(StorageInterfaceImplTest, InsertDocumentsSavesOperationsWhenCollSpecified
auto op1 = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL});
auto op2 = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL});
ASSERT_OK(storage.insertDocuments(
- opCtx, {DatabaseName{nss.db()}, *options.uuid}, transformInserts({op1, op2})));
+ opCtx, {nss.dbName(), *options.uuid}, transformInserts({op1, op2})));
// Check contents of oplog.
_assertDocumentsInCollectionEquals(opCtx, nss, {op1.obj, op2.obj});
@@ -538,7 +538,7 @@ TEST_F(StorageInterfaceImplTest, InsertDocWorksWithExistingCappedCollectionSpeci
StorageInterfaceImpl storage;
ASSERT_OK(storage.insertDocument(opCtx,
- {DatabaseName{nss.db()}, *options.uuid},
+ {nss.dbName(), *options.uuid},
{BSON("_id" << 1), Timestamp(1)},
OpTime::kUninitializedTerm));
AutoGetCollectionForReadCommand autoColl(opCtx, nss);
@@ -746,11 +746,11 @@ TEST_F(StorageInterfaceImplTest, DropCollectionWorksWithMissingCollection) {
auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
- ASSERT_FALSE(AutoGetDb(opCtx, DatabaseName{nss.db()}, MODE_IS).getDb());
+ ASSERT_FALSE(AutoGetDb(opCtx, nss.dbName(), MODE_IS).getDb());
ASSERT_OK(storage.dropCollection(opCtx, nss));
ASSERT_FALSE(AutoGetCollectionForReadCommand(opCtx, nss).getCollection());
// Database should not be created after running dropCollection.
- ASSERT_FALSE(AutoGetDb(opCtx, DatabaseName{nss.db()}, MODE_IS).getDb());
+ ASSERT_FALSE(AutoGetDb(opCtx, nss.dbName(), MODE_IS).getDb());
}
TEST_F(StorageInterfaceImplTest, DropCollectionWorksWithSystemCollection) {
@@ -2551,9 +2551,8 @@ TEST_F(StorageInterfaceImplTest, FindByIdReturnsNoSuchKeyWhenDocumentIsNotFound)
{doc3, Timestamp(0), OpTime::kUninitializedTerm}}));
ASSERT_EQUALS(ErrorCodes::NoSuchKey, storage.findById(opCtx, nss, doc2["_id"]).getStatus());
- ASSERT_EQUALS(
- ErrorCodes::NoSuchKey,
- storage.findById(opCtx, {DatabaseName{nss.db()}, *options.uuid}, doc2["_id"]).getStatus());
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey,
+ storage.findById(opCtx, {nss.dbName(), *options.uuid}, doc2["_id"]).getStatus());
}
TEST_F(StorageInterfaceImplTest, FindByIdReturnsDocumentWhenDocumentExists) {
@@ -2573,9 +2572,9 @@ TEST_F(StorageInterfaceImplTest, FindByIdReturnsDocumentWhenDocumentExists) {
{doc3, Timestamp(0), OpTime::kUninitializedTerm}}));
ASSERT_BSONOBJ_EQ(doc2, unittest::assertGet(storage.findById(opCtx, nss, doc2["_id"])));
- ASSERT_BSONOBJ_EQ(doc2,
- unittest::assertGet(storage.findById(
- opCtx, {DatabaseName{nss.db()}, *options.uuid}, doc2["_id"])));
+ ASSERT_BSONOBJ_EQ(
+ doc2,
+ unittest::assertGet(storage.findById(opCtx, {nss.dbName(), *options.uuid}, doc2["_id"])));
}
TEST_F(StorageInterfaceImplTest, FindByIdReturnsBadStatusIfPlanExecutorFails) {
@@ -2643,9 +2642,9 @@ TEST_F(StorageInterfaceImplTest, DeleteByIdReturnsNoSuchKeyWhenDocumentIsNotFoun
{{doc1, Timestamp(0), OpTime::kUninitializedTerm},
{doc3, Timestamp(0), OpTime::kUninitializedTerm}}));
ASSERT_EQUALS(ErrorCodes::NoSuchKey, storage.deleteById(opCtx, nss, doc2["_id"]).getStatus());
- ASSERT_EQUALS(ErrorCodes::NoSuchKey,
- storage.deleteById(opCtx, {DatabaseName{nss.db()}, *options.uuid}, doc2["_id"])
- .getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::NoSuchKey,
+ storage.deleteById(opCtx, {nss.dbName(), *options.uuid}, doc2["_id"]).getStatus());
_assertDocumentsInCollectionEquals(opCtx, nss, {doc1, doc3});
}
@@ -2699,7 +2698,7 @@ TEST_F(StorageInterfaceImplTest,
StorageInterfaceImpl storage;
NamespaceString nss = NamespaceString::createNamespaceString_forTest("mydb.coll");
NamespaceString wrongColl =
- NamespaceString::createNamespaceString_forTest(DatabaseName{nss.db()}, "wrongColl"_sd);
+ NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd);
ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid()));
auto doc = BSON("_id" << 0 << "x" << 1);
auto status = storage.upsertById(opCtx, wrongColl, doc["_id"], doc);
@@ -2733,7 +2732,7 @@ TEST_F(StorageInterfaceImplTest, UpsertSingleDocumentReplacesExistingDocumentInC
// Again, but specify the collection's UUID.
ASSERT_OK(storage.upsertById(
- opCtx, {DatabaseName{nss.db()}, *options.uuid}, originalDoc["_id"], BSON("x" << 200)));
+ opCtx, {nss.dbName(), *options.uuid}, originalDoc["_id"], BSON("x" << 200)));
_assertDocumentsInCollectionEquals(opCtx,
nss,
{BSON("_id" << 0 << "x" << 0),
@@ -2765,10 +2764,8 @@ TEST_F(StorageInterfaceImplTest, UpsertSingleDocumentInsertsNewDocumentInCollect
BSON("_id" << 2 << "x" << 2),
BSON("_id" << 1 << "x" << 100)});
- ASSERT_OK(storage.upsertById(opCtx,
- {DatabaseName{nss.db()}, *options.uuid},
- BSON("" << 3).firstElement(),
- BSON("x" << 300)));
+ ASSERT_OK(storage.upsertById(
+ opCtx, {nss.dbName(), *options.uuid}, BSON("" << 3).firstElement(), BSON("x" << 300)));
_assertDocumentsInCollectionEquals(opCtx,
nss,
{BSON("_id" << 0 << "x" << 0),
@@ -2806,7 +2803,7 @@ TEST_F(StorageInterfaceImplTest,
BSON("_id" << 2 << "x" << 2)});
ASSERT_OK(storage.upsertById(
- opCtx, {DatabaseName{nss.db()}, *options.uuid}, originalDoc["_id"], BSON("x" << 200)));
+ opCtx, {nss.dbName(), *options.uuid}, originalDoc["_id"], BSON("x" << 200)));
_assertDocumentsInCollectionEquals(opCtx,
nss,
{BSON("_id" << 0 << "x" << 0),
@@ -2829,7 +2826,7 @@ TEST_F(StorageInterfaceImplTest, UpsertSingleDocumentReturnsFailedToParseOnNonSi
"Unable to update document with a non-simple _id query:");
ASSERT_EQ(storage.upsertById(opCtx,
- {DatabaseName{nss.db()}, *options.uuid},
+ {nss.dbName(), *options.uuid},
BSON("" << BSON("$gt" << 3)).firstElement(),
BSON("x" << 100)),
ErrorCodes::InvalidIdField);
@@ -2851,7 +2848,7 @@ TEST_F(StorageInterfaceImplTest,
ASSERT_STRING_CONTAINS(status.reason(),
"Unable to update document in a collection without an _id index.");
- ASSERT_EQ(storage.upsertById(opCtx, {DatabaseName{nss.db()}, *options.uuid}, doc["_id"], doc),
+ ASSERT_EQ(storage.upsertById(opCtx, {nss.dbName(), *options.uuid}, doc["_id"], doc),
ErrorCodes::IndexNotFound);
}
@@ -2872,12 +2869,11 @@ TEST_F(StorageInterfaceImplTest,
"Unknown modifier: $unknownUpdateOp. Expected a valid update modifier or pipeline-style "
"update specified as an array");
- ASSERT_THROWS_CODE(storage.upsertById(opCtx,
- {DatabaseName{nss.db()}, *options.uuid},
- BSON("" << 1).firstElement(),
- unknownUpdateOp),
- DBException,
- ErrorCodes::FailedToParse);
+ ASSERT_THROWS_CODE(
+ storage.upsertById(
+ opCtx, {nss.dbName(), *options.uuid}, BSON("" << 1).firstElement(), unknownUpdateOp),
+ DBException,
+ ErrorCodes::FailedToParse);
}
TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenDatabaseDoesNotExist) {
@@ -2977,7 +2973,7 @@ TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenColle
StorageInterfaceImpl storage;
NamespaceString nss = NamespaceString::createNamespaceString_forTest("mydb.coll");
NamespaceString wrongColl =
- NamespaceString::createNamespaceString_forTest(DatabaseName{nss.db()}, "wrongColl"_sd);
+ NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd);
ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid()));
auto filter = BSON("x" << 1);
auto status = storage.deleteByFilter(opCtx, wrongColl, filter);
@@ -3148,7 +3144,7 @@ TEST_F(StorageInterfaceImplTest,
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
NamespaceString wrongColl =
- NamespaceString::createNamespaceString_forTest(DatabaseName{nss.db()}, "wrongColl"_sd);
+ NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd);
ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid()));
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound,
storage.getCollectionCount(opCtx, wrongColl).getStatus());
@@ -3192,7 +3188,7 @@ TEST_F(StorageInterfaceImplTest,
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
NamespaceString wrongColl =
- NamespaceString::createNamespaceString_forTest(DatabaseName{nss.db()}, "wrongColl"_sd);
+ NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd);
ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid()));
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, storage.setCollectionCount(opCtx, wrongColl, 3));
}
@@ -3211,7 +3207,7 @@ TEST_F(StorageInterfaceImplTest,
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
NamespaceString wrongColl =
- NamespaceString::createNamespaceString_forTest(DatabaseName{nss.db()}, "wrongColl"_sd);
+ NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd);
ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid()));
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound,
storage.getCollectionSize(opCtx, wrongColl).getStatus());
@@ -3255,7 +3251,7 @@ TEST_F(StorageInterfaceImplTest, SetIndexIsMultikeyReturnsNamespaceNotFoundForMi
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
NamespaceString wrongColl =
- NamespaceString::createNamespaceString_forTest(DatabaseName{nss.db()}, "wrongColl"_sd);
+ NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd);
ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
ASSERT_EQUALS(
ErrorCodes::NamespaceNotFound,
@@ -3269,7 +3265,7 @@ TEST_F(StorageInterfaceImplTest, SetIndexIsMultikeyLooksUpCollectionByUUID) {
auto options = generateOptionsWithUuid();
ASSERT_OK(storage.createCollection(opCtx, nss, options));
NamespaceString wrongColl =
- NamespaceString::createNamespaceString_forTest(DatabaseName{nss.db()}, "wrongColl"_sd);
+ NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd);
ASSERT_EQUALS(ErrorCodes::IndexNotFound,
storage.setIndexIsMultikey(
opCtx, wrongColl, *options.uuid, "foo", {}, {}, Timestamp(3, 3)));
diff --git a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp
index 4bfbe6c84c5..36aa9283847 100644
--- a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp
+++ b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp
@@ -705,8 +705,7 @@ std::pair<BSONObj, Timestamp> generateSplitPoints(OperationContext* opCtx,
str::stream() << "Cannot analyze a shard key for a non-existing collection",
origCollUuid);
// Perform best-effort validation that the collection has not been dropped and recreated.
- uassert(CollectionUUIDMismatchInfo(
- DatabaseName{nss.db()}, collUuid, nss.coll().toString(), boost::none),
+ uassert(CollectionUUIDMismatchInfo(nss.dbName(), collUuid, nss.coll().toString(), boost::none),
str::stream() << "Found that the collection UUID has changed from " << collUuid
<< " to " << origCollUuid << " since the command started",
origCollUuid == collUuid);
@@ -824,11 +823,11 @@ KeyCharacteristicsMetrics calculateKeyCharacteristicsMetrics(OperationContext* o
str::stream() << "Cannot analyze a shard key for a non-existing collection",
collection);
// Perform best-effort validation that the collection has not been dropped and recreated.
- uassert(CollectionUUIDMismatchInfo(
- DatabaseName{nss.db()}, collUuid, nss.coll().toString(), boost::none),
- str::stream() << "Found that the collection UUID has changed from " << collUuid
- << " to " << collection->uuid() << " since the command started",
- collection->uuid() == collUuid);
+ uassert(
+ CollectionUUIDMismatchInfo(nss.dbName(), collUuid, nss.coll().toString(), boost::none),
+ str::stream() << "Found that the collection UUID has changed from " << collUuid
+ << " to " << collection->uuid() << " since the command started",
+ collection->uuid() == collUuid);
// Performs best-effort validation that the shard key does not contain an array field by
// extracting the shard key value from a random document in the collection and asserting
diff --git a/src/mongo/db/s/configure_query_analyzer_cmd.cpp b/src/mongo/db/s/configure_query_analyzer_cmd.cpp
index b8816e7a04d..7f6fe0b8c2d 100644
--- a/src/mongo/db/s/configure_query_analyzer_cmd.cpp
+++ b/src/mongo/db/s/configure_query_analyzer_cmd.cpp
@@ -62,7 +62,7 @@ constexpr int kMaxSampleRate = 1'000'000;
StatusWith<UUID> validateCollectionOptionsOnPrimaryShard(OperationContext* opCtx,
const NamespaceString& nss) {
ListCollections listCollections;
- listCollections.setDbName(DatabaseName{nss.db()});
+ listCollections.setDbName(nss.dbName());
listCollections.setFilter(BSON("name" << nss.coll()));
auto listCollectionsCmdObj =
CommandHelpers::filterCommandRequestForPassthrough(listCollections.toBSON({}));
diff --git a/src/mongo/db/s/move_primary/move_primary_donor_service.cpp b/src/mongo/db/s/move_primary/move_primary_donor_service.cpp
index b5f73fb3f77..7901d52ad85 100644
--- a/src/mongo/db/s/move_primary/move_primary_donor_service.cpp
+++ b/src/mongo/db/s/move_primary/move_primary_donor_service.cpp
@@ -278,7 +278,7 @@ void MovePrimaryDonorExternalState::syncDataOnRecipient(OperationContext* opCtx,
boost::optional<Timestamp> timestamp) {
MovePrimaryRecipientSyncData request;
request.setMovePrimaryCommonMetadata(getMetadata());
- request.setDbName(DatabaseName{getMetadata().getDatabaseName().db()});
+ request.setDbName(getMetadata().getDatabaseName().dbName());
if (timestamp) {
request.setReturnAfterReachingDonorTimestamp(*timestamp);
}
@@ -288,14 +288,14 @@ void MovePrimaryDonorExternalState::syncDataOnRecipient(OperationContext* opCtx,
void MovePrimaryDonorExternalState::abortMigrationOnRecipient(OperationContext* opCtx) {
MovePrimaryRecipientAbortMigration request;
request.setMovePrimaryCommonMetadata(getMetadata());
- request.setDbName(DatabaseName{getMetadata().getDatabaseName().db()});
+ request.setDbName(getMetadata().getDatabaseName().dbName());
_runCommandOnRecipient(opCtx, request.toBSON({}));
}
void MovePrimaryDonorExternalState::forgetMigrationOnRecipient(OperationContext* opCtx) {
MovePrimaryRecipientForgetMigration request;
request.setMovePrimaryCommonMetadata(getMetadata());
- request.setDbName(DatabaseName{getMetadata().getDatabaseName().db()});
+ request.setDbName(getMetadata().getDatabaseName().dbName());
_runCommandOnRecipient(opCtx, request.toBSON({}));
}
diff --git a/src/mongo/db/s/op_observer_sharding_impl.cpp b/src/mongo/db/s/op_observer_sharding_impl.cpp
index 613050cf0ea..42d8dbc28f3 100644
--- a/src/mongo/db/s/op_observer_sharding_impl.cpp
+++ b/src/mongo/db/s/op_observer_sharding_impl.cpp
@@ -121,7 +121,7 @@ void OpObserverShardingImpl::shardObserveInsertsOp(
auto* const css = shardingWriteRouter.getCss();
css->checkShardVersionOrThrow(opCtx);
- DatabaseShardingState::assertMatchingDbVersion(opCtx, DatabaseName{nss.db()});
+ DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.dbName());
auto* const csr = checked_cast<CollectionShardingRuntime*>(css);
auto metadata = csr->getCurrentMetadataIfKnown();
@@ -163,7 +163,7 @@ void OpObserverShardingImpl::shardObserveUpdateOp(OperationContext* opCtx,
const bool inMultiDocumentTransaction) {
auto* const css = shardingWriteRouter.getCss();
css->checkShardVersionOrThrow(opCtx);
- DatabaseShardingState::assertMatchingDbVersion(opCtx, DatabaseName{nss.db()});
+ DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.dbName());
auto* const csr = checked_cast<CollectionShardingRuntime*>(css);
auto metadata = csr->getCurrentMetadataIfKnown();
@@ -199,7 +199,7 @@ void OpObserverShardingImpl::shardObserveDeleteOp(OperationContext* opCtx,
const bool inMultiDocumentTransaction) {
auto* const css = shardingWriteRouter.getCss();
css->checkShardVersionOrThrow(opCtx);
- DatabaseShardingState::assertMatchingDbVersion(opCtx, DatabaseName{nss.db()});
+ DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.dbName());
auto* const csr = checked_cast<CollectionShardingRuntime*>(css);
auto metadata = csr->getCurrentMetadataIfKnown();
diff --git a/src/mongo/db/s/query_analysis_writer.cpp b/src/mongo/db/s/query_analysis_writer.cpp
index 59b3be0002f..802040dd87e 100644
--- a/src/mongo/db/s/query_analysis_writer.cpp
+++ b/src/mongo/db/s/query_analysis_writer.cpp
@@ -81,7 +81,7 @@ BSONObj createIndex(OperationContext* opCtx, const NamespaceString& nss, const B
DBDirectClient client(opCtx);
client.runCommand(
- DatabaseName{nss.db()},
+ nss.dbName(),
BSON("createIndexes" << nss.coll().toString() << "indexes" << BSON_ARRAY(indexSpec)),
resObj);
diff --git a/src/mongo/db/s/query_analysis_writer_test.cpp b/src/mongo/db/s/query_analysis_writer_test.cpp
index a83a9d52c03..de6f687079e 100644
--- a/src/mongo/db/s/query_analysis_writer_test.cpp
+++ b/src/mongo/db/s/query_analysis_writer_test.cpp
@@ -210,8 +210,7 @@ protected:
void assertTTLIndexExists(const NamespaceString& nss, const std::string& name) const {
DBDirectClient client(operationContext());
BSONObj result;
- client.runCommand(
- DatabaseName{nss.db()}, BSON("listIndexes" << nss.coll().toString()), result);
+ client.runCommand(nss.dbName(), BSON("listIndexes" << nss.coll().toString()), result);
auto indexes = result.getObjectField("cursor").getField("firstBatch").Array();
auto iter = indexes.begin();
diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp
index c46baf6b810..65513cf225f 100644
--- a/src/mongo/db/s/range_deletion_util.cpp
+++ b/src/mongo/db/s/range_deletion_util.cpp
@@ -261,10 +261,9 @@ ExecutorFuture<void> deleteRangeInBatchesWithExecutor(
return ExecutorFuture<void>(executor).then([=] {
return withTemporaryOperationContext(
[=](OperationContext* opCtx) {
- return deleteRangeInBatches(
- opCtx, DatabaseName{nss.db()}, collectionUuid, keyPattern, range);
+ return deleteRangeInBatches(opCtx, nss.dbName(), collectionUuid, keyPattern, range);
},
- DatabaseName{nss.db()},
+ nss.dbName(),
collectionUuid);
});
}
@@ -292,7 +291,7 @@ ExecutorFuture<void> waitForDeletionsToMajorityReplicate(
.waitUntilMajority(clientOpTime, CancellationToken::uncancelable())
.thenRunOn(executor);
},
- DatabaseName{nss.db()},
+ nss.dbName(),
collectionUuid);
}
@@ -546,7 +545,7 @@ SharedSemiFuture<void> removeDocumentsInRange(
[&](OperationContext* opCtx) {
removePersistentRangeDeletionTask(opCtx, collectionUuid, range);
},
- DatabaseName{nss.db()},
+ nss.dbName(),
collectionUuid);
} catch (const DBException& e) {
LOGV2_ERROR(23770,
diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp
index 85a0764271b..482d24dc253 100644
--- a/src/mongo/db/s/rename_collection_coordinator.cpp
+++ b/src/mongo/db/s/rename_collection_coordinator.cpp
@@ -364,7 +364,7 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
// - Locally rename source to target
ShardsvrRenameCollectionParticipant renameCollParticipantRequest(
fromNss, _doc.getSourceUUID().value());
- renameCollParticipantRequest.setDbName(DatabaseName{fromNss.db()});
+ renameCollParticipantRequest.setDbName(fromNss.dbName());
renameCollParticipantRequest.setTargetUUID(_doc.getTargetUUID());
renameCollParticipantRequest.setRenameCollectionRequest(_request);
const auto cmdObj = CommandHelpers::appendMajorityWriteConcern(
@@ -455,7 +455,7 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
// - Unblock CRUD on participants for both source and destination collections
ShardsvrRenameCollectionUnblockParticipant unblockParticipantRequest(
fromNss, _doc.getSourceUUID().value());
- unblockParticipantRequest.setDbName(DatabaseName{fromNss.db()});
+ unblockParticipantRequest.setDbName(fromNss.dbName());
unblockParticipantRequest.setRenameCollectionRequest(_request);
auto const cmdObj = CommandHelpers::appendMajorityWriteConcern(
unblockParticipantRequest.toBSON({}));
diff --git a/src/mongo/db/s/reshard_collection_coordinator.cpp b/src/mongo/db/s/reshard_collection_coordinator.cpp
index 20e5f5854ca..03d13536ccc 100644
--- a/src/mongo/db/s/reshard_collection_coordinator.cpp
+++ b/src/mongo/db/s/reshard_collection_coordinator.cpp
@@ -157,7 +157,7 @@ ExecutorFuture<void> ReshardCollectionCoordinator::_runImpl(
_updateStateDocument(opCtx, std::move(newDoc));
ConfigsvrReshardCollection configsvrReshardCollection(nss(), _doc.getKey());
- configsvrReshardCollection.setDbName(DatabaseName{nss().db()});
+ configsvrReshardCollection.setDbName(nss().dbName());
configsvrReshardCollection.setUnique(_doc.getUnique());
configsvrReshardCollection.setCollation(_doc.getCollation());
configsvrReshardCollection.set_presetReshardedChunks(_doc.get_presetReshardedChunks());
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index d19beb96dca..26b7cde064e 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -697,7 +697,7 @@ void executeMetadataChangesInTxn(
BSONObj makeFlushRoutingTableCacheUpdatesCmd(const NamespaceString& nss) {
auto cmd = FlushRoutingTableCacheUpdatesWithWriteConcern(nss);
cmd.setSyncFromConfig(true);
- cmd.setDbName(DatabaseName{nss.db()});
+ cmd.setDbName(nss.dbName());
return cmd.toBSON(
BSON(WriteConcernOptions::kWriteConcernField << kMajorityWriteConcern.toBSON()));
}
diff --git a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp
index a1249ddcbb9..79e3a076bd9 100644
--- a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp
+++ b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp
@@ -239,7 +239,7 @@ bool ReshardingCoordinatorCleaner::_checkExistsTempReshardingCollection(
void ReshardingCoordinatorCleaner::_dropTemporaryReshardingCollection(
OperationContext* opCtx, const NamespaceString& tempReshardingNss) {
ShardsvrDropCollection dropCollectionCommand(tempReshardingNss);
- dropCollectionCommand.setDbName(DatabaseName{tempReshardingNss.db()});
+ dropCollectionCommand.setDbName(tempReshardingNss.dbName());
const auto dbInfo = uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, tempReshardingNss.db()));
diff --git a/src/mongo/db/s/sessions_collection_config_server.cpp b/src/mongo/db/s/sessions_collection_config_server.cpp
index bc096dc414f..b953c145f5c 100644
--- a/src/mongo/db/s/sessions_collection_config_server.cpp
+++ b/src/mongo/db/s/sessions_collection_config_server.cpp
@@ -65,7 +65,7 @@ void SessionsCollectionConfigServer::_shardCollectionIfNeeded(OperationContext*
CreateCollectionRequest requestParamsObj;
requestParamsObj.setShardKey(BSON("_id" << 1));
shardsvrCollRequest.setCreateCollectionRequest(std::move(requestParamsObj));
- shardsvrCollRequest.setDbName(DatabaseName{NamespaceString::kLogicalSessionsNamespace.db()});
+ shardsvrCollRequest.setDbName(NamespaceString::kLogicalSessionsNamespace.dbName());
cluster::createCollection(opCtx, shardsvrCollRequest);
}
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp
index 315e6aa61a9..2231362157e 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp
@@ -289,8 +289,7 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas
metadata().getDatabaseVersion() /* databaseVersion */);
// Check under the dbLock if this is still the primary shard for the database
- DatabaseShardingState::assertIsPrimaryShardForDb(opCtx,
- DatabaseName{originalNss().db()});
+ DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, originalNss().dbName());
};
})
.then([this, executor, token, anchor = shared_from_this()] {
diff --git a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp
index 3cd9fc18681..9e7d38a7570 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp
@@ -278,7 +278,7 @@ ShardingDDLCoordinatorService::getOrCreateInstance(OperationContext* opCtx, BSON
uassert(ErrorCodes::IllegalOperation,
"Request sent without attaching database version",
clientDbVersion);
- DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, DatabaseName{nss.db()});
+ DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, nss.dbName());
coorMetadata.setDatabaseVersion(clientDbVersion);
}
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index 8ec7f8b12fb..93d1b3cfd3d 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -147,8 +147,7 @@ Status modifyRecoveryDocument(OperationContext* opCtx,
try {
// Use boost::optional so we can release the locks early
boost::optional<AutoGetDb> autoGetDb;
- autoGetDb.emplace(
- opCtx, DatabaseName{NamespaceString::kServerConfigurationNamespace.db()}, MODE_X);
+ autoGetDb.emplace(opCtx, NamespaceString::kServerConfigurationNamespace.dbName(), MODE_X);
const auto configOpTime = [&]() {
const auto vcTime = VectorClock::get(opCtx)->getTime();
diff --git a/src/mongo/db/s/sharding_util.cpp b/src/mongo/db/s/sharding_util.cpp
index b329b4b193a..4f7b74ea7ff 100644
--- a/src/mongo/db/s/sharding_util.cpp
+++ b/src/mongo/db/s/sharding_util.cpp
@@ -58,7 +58,7 @@ void tellShardsToRefreshCollection(OperationContext* opCtx,
const std::shared_ptr<executor::TaskExecutor>& executor) {
auto cmd = FlushRoutingTableCacheUpdatesWithWriteConcern(nss);
cmd.setSyncFromConfig(true);
- cmd.setDbName(DatabaseName{nss.db()});
+ cmd.setDbName(nss.dbName());
auto cmdObj = CommandHelpers::appendMajorityWriteConcern(cmd.toBSON({}));
sendCommandToShards(opCtx, DatabaseName::kAdmin.db(), cmdObj, shardIds, executor);
}
diff --git a/src/mongo/db/s/shardsvr_check_metadata_consistency_command.cpp b/src/mongo/db/s/shardsvr_check_metadata_consistency_command.cpp
index 673a7368bc7..cb96382be0f 100644
--- a/src/mongo/db/s/shardsvr_check_metadata_consistency_command.cpp
+++ b/src/mongo/db/s/shardsvr_check_metadata_consistency_command.cpp
@@ -202,7 +202,7 @@ public:
const auto dbDDLLock = ddlLockManager->lock(
opCtx, nss.db(), kLockReason, DDLLockManager::kDefaultLockTimeout);
- DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, DatabaseName{nss.db()});
+ DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, nss.dbName());
return establishCursors(opCtx,
Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(),
diff --git a/src/mongo/db/s/shardsvr_drop_indexes_command.cpp b/src/mongo/db/s/shardsvr_drop_indexes_command.cpp
index 237ab96fa0f..922e2f6659c 100644
--- a/src/mongo/db/s/shardsvr_drop_indexes_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_indexes_command.cpp
@@ -160,7 +160,7 @@ ShardsvrDropIndexesCommand::Invocation::Response ShardsvrDropIndexesCommand::Inv
auto dbDDLLock = ddlLockManager->lock(opCtx, ns().db(), lockReason, lockTimeout);
// Check under the dbLock if this is still the primary shard for the database
- DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, DatabaseName{ns().db()});
+ DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, ns().dbName());
auto resolvedNs = ns();
auto dropIdxBSON = dropIdxCmd.toBSON({});
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 6cb860f6f73..7105a9a2217 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -539,7 +539,7 @@ void appendErrorLabelsAndTopologyVersion(OperationContext* opCtx,
void appendAdditionalParticipants(OperationContext* opCtx,
BSONObjBuilder* commandBodyFieldsBob,
const std::string& commandName,
- const std::string& ns) {
+ StringData ns) {
// (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns.
if (gFeatureFlagAdditionalParticipants.isEnabledAndIgnoreFCVUnsafe()) {
std::vector<BSONElement> shardIdsFromFpData;
@@ -549,7 +549,7 @@ void appendAdditionalParticipants(OperationContext* opCtx,
data.hasField("shardId")) {
shardIdsFromFpData = data.getField("shardId").Array();
return ((data.getStringField("cmdName") == commandName) &&
- (data.getStringField("ns").toString() == ns));
+ (data.getStringField("ns") == ns));
}
return false;
}))) {
@@ -1271,7 +1271,7 @@ void RunCommandImpl::_epilogue() {
_ecd->getLastOpBeforeRun(),
_ecd->getLastOpAfterRun());
appendAdditionalParticipants(
- opCtx, &body, command->getName(), _ecd->getInvocation()->ns().ns().toString());
+ opCtx, &body, command->getName(), _ecd->getInvocation()->ns().ns());
}
auto commandBodyBob = replyBuilder->getBodyBuilder();
@@ -1946,7 +1946,7 @@ void ExecCommandDatabase::_handleFailure(Status status) {
getLastOpBeforeRun(),
getLastOpAfterRun());
appendAdditionalParticipants(
- opCtx, &_extraFieldsBuilder, command->getName(), _execContext->nsString().ns().toString());
+ opCtx, &_extraFieldsBuilder, command->getName(), _execContext->nsString().ns());
BSONObjBuilder metadataBob;
behaviors.appendReplyMetadata(opCtx, request, &metadataBob);
diff --git a/src/mongo/db/shard_role.cpp b/src/mongo/db/shard_role.cpp
index c785c27556d..a696699746d 100644
--- a/src/mongo/db/shard_role.cpp
+++ b/src/mongo/db/shard_role.cpp
@@ -205,8 +205,7 @@ void checkPlacementVersion(OperationContext* opCtx,
const PlacementConcern& placementConcern) {
const auto& receivedDbVersion = placementConcern.dbVersion;
if (receivedDbVersion) {
- DatabaseShardingState::assertMatchingDbVersion(
- opCtx, DatabaseName{nss.db()}, *receivedDbVersion);
+ DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.dbName(), *receivedDbVersion);
}
const auto& receivedShardVersion = placementConcern.shardVersion;
diff --git a/src/mongo/db/shard_role_test.cpp b/src/mongo/db/shard_role_test.cpp
index a53eafa60f3..4cb2e2775c7 100644
--- a/src/mongo/db/shard_role_test.cpp
+++ b/src/mongo/db/shard_role_test.cpp
@@ -838,18 +838,18 @@ TEST_F(ShardRoleTest, YieldAndRestoreAcquisitionWithLocks) {
},
MODE_IX);
- ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(DatabaseName{nss.db()}, MODE_IX));
+ ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX));
ASSERT_TRUE(opCtx()->lockState()->isCollectionLockedForMode(nss, MODE_IX));
// Yield the resources
auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx());
ASSERT(yieldedTransactionResources);
- ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(DatabaseName{nss.db()}, MODE_IX));
+ ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX));
ASSERT_FALSE(opCtx()->lockState()->isCollectionLockedForMode(nss, MODE_IX));
// Restore the resources
restoreTransactionResourcesToOperationContext(opCtx(), std::move(*yieldedTransactionResources));
- ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(DatabaseName{nss.db()}, MODE_IX));
+ ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX));
ASSERT_TRUE(opCtx()->lockState()->isCollectionLockedForMode(nss, MODE_IX));
}
@@ -898,7 +898,7 @@ TEST_F(ShardRoleTest, RestoreForWriteFailsIfPlacementConcernNoLongerMet) {
ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized());
});
- ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(DatabaseName{nss.db()}, MODE_IX));
+ ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX));
ASSERT_FALSE(opCtx()->lockState()->isCollectionLockedForMode(nss, MODE_IX));
}
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index c14bd33b50b..242c87e3071 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -898,8 +898,8 @@ void TTLMonitor::onStepUp(OperationContext* opCtx) {
// processCollModCommand() will acquire MODE_X access to the collection.
BSONObjBuilder builder;
- uassertStatusOK(processCollModCommand(
- opCtx, {DatabaseName{nss->db()}, uuid}, collModCmd, &builder));
+ uassertStatusOK(
+ processCollModCommand(opCtx, {nss->dbName(), uuid}, collModCmd, &builder));
auto result = builder.obj();
LOGV2(
6847701,
diff --git a/src/mongo/db/views/view_definition_test.cpp b/src/mongo/db/views/view_definition_test.cpp
index 19010030237..cdb319e7314 100644
--- a/src/mongo/db/views/view_definition_test.cpp
+++ b/src/mongo/db/views/view_definition_test.cpp
@@ -118,8 +118,7 @@ TEST(ViewDefinitionTest, SetViewOnSucceedsIfNewViewOnIsInSameDatabaseAsView) {
}
TEST(ViewDefinitionTest, SetPipelineSucceedsOnValidArrayBSONElement) {
- ViewDefinition viewDef(
- DatabaseName{viewNss.db()}, viewNss.coll(), backingNss.coll(), BSONObj(), nullptr);
+ ViewDefinition viewDef(viewNss.dbName(), viewNss.coll(), backingNss.coll(), BSONObj(), nullptr);
ASSERT(viewDef.pipeline().empty());
BSONObj matchStage = BSON("match" << BSON("x" << 9));
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
index 4a45aabafc4..09be9e4f4d4 100644
--- a/src/mongo/dbtests/dbtests.cpp
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -181,7 +181,7 @@ Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj
WriteContextForTests::WriteContextForTests(OperationContext* opCtx, StringData ns)
: _opCtx(opCtx), _nss(ns) {
// Lock the database and collection
- _autoDb.emplace(opCtx, DatabaseName{_nss.db()}, MODE_IX);
+ _autoDb.emplace(opCtx, _nss.dbName(), MODE_IX);
_collLock.emplace(opCtx, _nss, MODE_IX);
const bool doShardVersionCheck = false;
diff --git a/src/mongo/s/commands/cluster_drop_collection_cmd.cpp b/src/mongo/s/commands/cluster_drop_collection_cmd.cpp
index f20e8cd974c..19139bc73dc 100644
--- a/src/mongo/s/commands/cluster_drop_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_collection_cmd.cpp
@@ -101,7 +101,7 @@ public:
// Send it to the primary shard
ShardsvrDropCollection dropCollectionCommand(nss);
- dropCollectionCommand.setDbName(DatabaseName{nss.db()});
+ dropCollectionCommand.setDbName(nss.dbName());
dropCollectionCommand.setCollectionUUID(request().getCollectionUUID());
auto cmdResponse = executeCommandAgainstDatabasePrimary(
diff --git a/src/mongo/s/commands/cluster_rename_collection_cmd.cpp b/src/mongo/s/commands/cluster_rename_collection_cmd.cpp
index 9f2f32ec4dd..b0b6b8d8264 100644
--- a/src/mongo/s/commands/cluster_rename_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_rename_collection_cmd.cpp
@@ -91,7 +91,7 @@ public:
request().getDropTarget());
ShardsvrRenameCollection renameCollRequest(fromNss);
- renameCollRequest.setDbName(DatabaseName{fromNss.db()});
+ renameCollRequest.setDbName(fromNss.dbName());
renameCollRequest.setRenameCollectionRequest(renameCollReq);
renameCollRequest.setAllowEncryptedCollectionRename(
AuthorizationSession::get(opCtx->getClient())
diff --git a/src/mongo/s/commands/cluster_set_cluster_parameter_cmd.cpp b/src/mongo/s/commands/cluster_set_cluster_parameter_cmd.cpp
index 9cb643ff895..43f31d87181 100644
--- a/src/mongo/s/commands/cluster_set_cluster_parameter_cmd.cpp
+++ b/src/mongo/s/commands/cluster_set_cluster_parameter_cmd.cpp
@@ -66,7 +66,7 @@ public:
void typedRun(OperationContext* opCtx) {
ConfigsvrSetClusterParameter configsvrSetClusterParameter(
request().getCommandParameter());
- configsvrSetClusterParameter.setDbName(DatabaseName{ns().db()});
+ configsvrSetClusterParameter.setDbName(ns().dbName());
const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 077a1c56a54..32a218c8401 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -112,7 +112,7 @@ public:
requestParamsObj.setImplicitlyCreateIndex(shardCollRequest.getImplicitlyCreateIndex());
requestParamsObj.setEnforceUniquenessCheck(shardCollRequest.getEnforceUniquenessCheck());
shardsvrCollRequest.setCreateCollectionRequest(std::move(requestParamsObj));
- shardsvrCollRequest.setDbName(DatabaseName{nss.db()});
+ shardsvrCollRequest.setDbName(nss.dbName());
cluster::createCollection(opCtx, shardsvrCollRequest);