summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp2
-rw-r--r--src/mongo/db/catalog/database_impl.cpp12
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp6
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/db_raii.cpp13
-rw-r--r--src/mongo/db/db_raii.h2
-rw-r--r--src/mongo/db/dbhelpers.cpp35
-rw-r--r--src/mongo/db/dbhelpers.h12
-rw-r--r--src/mongo/db/index_build_entry_helpers.cpp6
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp3
-rw-r--r--src/mongo/db/keys_collection_cache_test.cpp2
-rw-r--r--src/mongo/db/mongod_main.cpp2
-rw-r--r--src/mongo/db/op_observer_impl.cpp4
-rw-r--r--src/mongo/db/repl/apply_ops.cpp4
-rw-r--r--src/mongo/db/repl/bgsync.cpp3
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/oplog_applier_utils.cpp2
-rw-r--r--src/mongo/db/repl/oplog_interface_local.cpp2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp2
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp30
-rw-r--r--src/mongo/db/repl/replication_info.cpp3
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp3
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp4
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp6
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/storage_timestamp_test.cpp10
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_service.cpp4
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp4
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_util.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp2
-rw-r--r--src/mongo/db/serverless/shard_split_donor_service.cpp2
-rw-r--r--src/mongo/db/serverless/shard_split_utils.cpp6
-rw-r--r--src/mongo/db/startup_recovery.cpp2
-rw-r--r--src/mongo/db/storage/storage_repair_observer.cpp4
-rw-r--r--src/mongo/db/storage/storage_repair_observer_test.cpp9
-rw-r--r--src/mongo/dbtests/counttests.cpp2
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp4
-rw-r--r--src/mongo/dbtests/dbtests.cpp2
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp10
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp2
-rw-r--r--src/mongo/dbtests/querytests.cpp24
-rw-r--r--src/mongo/dbtests/repltests.cpp8
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp16
54 files changed, 143 insertions, 156 deletions
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index f3aae154159..4ce332bd825 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -770,7 +770,7 @@ Status _collModInternal(OperationContext* opCtx,
// This is necessary to set up CurOp, update the Top stats, and check shard version if the
// operation is not on a view.
- OldClientContext ctx(opCtx, nss.ns(), !view);
+ OldClientContext ctx(opCtx, nss, !view);
bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss);
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index f37417cb3ac..14fcad95fad 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -86,7 +86,7 @@ StatusWith<int64_t> compactCollection(OperationContext* opCtx,
auto recordStore = collection->getRecordStore();
- OldClientContext ctx(opCtx, collectionNss.ns());
+ OldClientContext ctx(opCtx, collectionNss);
if (!recordStore->compactSupported())
return Status(ErrorCodes::CommandNotSupported,
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index e8c9bf6c571..41fa27c8f4a 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -213,7 +213,7 @@ Status DatabaseImpl::init(OperationContext* const opCtx) {
try {
Lock::CollectionLock systemViewsLock(
opCtx,
- NamespaceString(_name.db(), NamespaceString::kSystemDotViewsCollectionName),
+ NamespaceString(_name, NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
ViewsForDatabase viewsForDb{std::make_unique<DurableViewCatalogImpl>(this)};
Status reloadStatus = viewsForDb.reload(opCtx);
@@ -466,7 +466,7 @@ Status DatabaseImpl::dropCollection(OperationContext* opCtx,
return Status::OK();
}
- invariant(nss.db() == _name.db());
+ invariant(nss.dbName() == _name);
// Returns true if the supplied namespace 'nss' is a system collection that can be dropped,
// false otherwise.
@@ -653,7 +653,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
void DatabaseImpl::_dropCollectionIndexes(OperationContext* opCtx,
const NamespaceString& nss,
Collection* collection) const {
- invariant(_name.db() == nss.db());
+ invariant(_name == nss.dbName());
LOGV2_DEBUG(
20316, 1, "dropCollection: {namespace} - dropAllIndexes start", "namespace"_attr = nss);
collection->getIndexCatalog()->dropAllIndexes(opCtx, collection, true, {});
@@ -703,8 +703,8 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
invariant(opCtx->lockState()->isCollectionLockedForMode(fromNss, MODE_X));
invariant(opCtx->lockState()->isCollectionLockedForMode(toNss, MODE_X));
- invariant(fromNss.db() == _name.db());
- invariant(toNss.db() == _name.db());
+ invariant(fromNss.dbName() == _name);
+ invariant(toNss.dbName() == _name);
if (CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, toNss)) {
return Status(ErrorCodes::NamespaceExists,
str::stream() << "Cannot rename '" << fromNss << "' to '" << toNss
@@ -990,7 +990,7 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
collectionName.begin(),
replacePercentSign);
- NamespaceString nss(_name.db(), collectionName);
+ NamespaceString nss(_name, collectionName);
if (!CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)) {
return nss;
}
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 0e383f9add5..a0371a80e0a 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -510,7 +510,7 @@ DropIndexesReply dropIndexes(OperationContext* opCtx,
WriteUnitOfWork wuow(opCtx);
// This is necessary to check shard version.
- OldClientContext ctx(opCtx, (*collection)->ns().ns());
+ OldClientContext ctx(opCtx, (*collection)->ns());
// Iterate through all the aborted indexes and drop any indexes that are ready in
// the index catalog. This would indicate that while we yielded our locks during the
@@ -574,7 +574,7 @@ DropIndexesReply dropIndexes(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
// This is necessary to check shard version.
- OldClientContext ctx(opCtx, (*collection)->ns().ns());
+ OldClientContext ctx(opCtx, (*collection)->ns());
dropReadyIndexes(
opCtx, collection->getWritableCollection(opCtx), indexNames, &reply, false);
wunit.commit();
@@ -623,7 +623,7 @@ Status dropIndexesForApplyOps(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
// This is necessary to check shard version.
- OldClientContext ctx(opCtx, nss.ns());
+ OldClientContext ctx(opCtx, nss);
DropIndexesReply ignoredReply;
dropReadyIndexes(opCtx,
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index e95da5c1378..9313915c9d3 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -177,7 +177,7 @@ public:
collection->uuid());
// This is necessary to set up CurOp and update the Top stats.
- OldClientContext ctx(opCtx, toReIndexNss.ns());
+ OldClientContext ctx(opCtx, toReIndexNss);
const auto defaultIndexVersion = IndexDescriptor::getDefaultIndexVersion();
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index f335d0623a1..351b7c99dc7 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -99,7 +99,7 @@ public:
// TODO SERVER-66561 Use DatabaseName obj passed in
DatabaseName dbName(boost::none, dbname);
Lock::DBLock lk(opCtx, dbName, MODE_X);
- OldClientContext ctx(opCtx, nss.ns());
+ OldClientContext ctx(opCtx, nss);
Database* db = ctx.db();
WriteUnitOfWork wunit(opCtx);
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index 86490bc9e36..9fd6cd273cb 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -885,11 +885,11 @@ AutoGetCollectionForReadCommandLockFree::AutoGetCollectionForReadCommandLockFree
}
}
-OldClientContext::OldClientContext(OperationContext* opCtx, const std::string& ns, bool doVersion)
+OldClientContext::OldClientContext(OperationContext* opCtx,
+ const NamespaceString& nss,
+ bool doVersion)
: _opCtx(opCtx) {
- // TODO SERVER-65488 Grab the DatabaseName from the NamespaceString passed in
- const auto db = nsToDatabaseSubstring(ns);
- const DatabaseName dbName(boost::none, db);
+ const auto dbName = nss.dbName();
_db = DatabaseHolder::get(opCtx)->getDb(opCtx, dbName);
if (!_db) {
@@ -906,14 +906,13 @@ OldClientContext::OldClientContext(OperationContext* opCtx, const std::string& n
case dbDelete: // path, so no need to check them here as well
break;
default:
- CollectionShardingState::get(_opCtx, NamespaceString(ns))
- ->checkShardVersionOrThrow(_opCtx);
+ CollectionShardingState::get(_opCtx, nss)->checkShardVersionOrThrow(_opCtx);
break;
}
}
stdx::lock_guard<Client> lk(*_opCtx->getClient());
- currentOp->enter_inlock(ns.c_str(),
+ currentOp->enter_inlock(nss.toString().c_str(),
CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(_db->name()));
}
diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h
index 63bdf8c621d..f2b2b8e0c0c 100644
--- a/src/mongo/db/db_raii.h
+++ b/src/mongo/db/db_raii.h
@@ -541,7 +541,7 @@ class OldClientContext {
OldClientContext& operator=(const OldClientContext&) = delete;
public:
- OldClientContext(OperationContext* opCtx, const std::string& ns, bool doVersion = true);
+ OldClientContext(OperationContext* opCtx, const NamespaceString& nss, bool doVersion = true);
~OldClientContext();
Database* db() const {
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 4afc53b4840..bcd4e652387 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -223,10 +223,10 @@ const CollectionPtr& getCollectionForRead(
}
}
-bool Helpers::getSingleton(OperationContext* opCtx, const char* ns, BSONObj& result) {
+bool Helpers::getSingleton(OperationContext* opCtx, const NamespaceString& nss, BSONObj& result) {
boost::optional<AutoGetCollectionForReadCommand> autoColl;
boost::optional<AutoGetOplog> autoOplog;
- const auto& collection = getCollectionForRead(opCtx, NamespaceString(ns), autoColl, autoOplog);
+ const auto& collection = getCollectionForRead(opCtx, nss, autoColl, autoOplog);
if (!collection) {
return false;
}
@@ -248,10 +248,10 @@ bool Helpers::getSingleton(OperationContext* opCtx, const char* ns, BSONObj& res
return false;
}
-bool Helpers::getLast(OperationContext* opCtx, const char* ns, BSONObj& result) {
+bool Helpers::getLast(OperationContext* opCtx, const NamespaceString& nss, BSONObj& result) {
boost::optional<AutoGetCollectionForReadCommand> autoColl;
boost::optional<AutoGetOplog> autoOplog;
- const auto& collection = getCollectionForRead(opCtx, NamespaceString(ns), autoColl, autoOplog);
+ const auto& collection = getCollectionForRead(opCtx, nss, autoColl, autoOplog);
if (!collection) {
return false;
}
@@ -272,25 +272,24 @@ bool Helpers::getLast(OperationContext* opCtx, const char* ns, BSONObj& result)
}
UpdateResult Helpers::upsert(OperationContext* opCtx,
- const string& ns,
+ const NamespaceString& nss,
const BSONObj& o,
bool fromMigrate) {
BSONElement e = o["_id"];
verify(e.type());
BSONObj id = e.wrap();
- return upsert(opCtx, ns, id, o, fromMigrate);
+ return upsert(opCtx, nss, id, o, fromMigrate);
}
UpdateResult Helpers::upsert(OperationContext* opCtx,
- const string& ns,
+ const NamespaceString& nss,
const BSONObj& filter,
const BSONObj& updateMod,
bool fromMigrate) {
- OldClientContext context(opCtx, ns);
+ OldClientContext context(opCtx, nss);
- const NamespaceString requestNs(ns);
auto request = UpdateRequest();
- request.setNamespaceString(requestNs);
+ request.setNamespaceString(nss);
request.setQuery(filter);
request.setUpdateModification(write_ops::UpdateModification::parseFromClassicUpdate(updateMod));
@@ -304,15 +303,14 @@ UpdateResult Helpers::upsert(OperationContext* opCtx,
}
void Helpers::update(OperationContext* opCtx,
- const string& ns,
+ const NamespaceString& nss,
const BSONObj& filter,
const BSONObj& updateMod,
bool fromMigrate) {
- OldClientContext context(opCtx, ns);
+ OldClientContext context(opCtx, nss);
- const NamespaceString requestNs(ns);
auto request = UpdateRequest();
- request.setNamespaceString(requestNs);
+ request.setNamespaceString(nss);
request.setQuery(filter);
request.setUpdateModification(write_ops::UpdateModification::parseFromClassicUpdate(updateMod));
@@ -324,12 +322,11 @@ void Helpers::update(OperationContext* opCtx,
::mongo::update(opCtx, context.db(), request);
}
-void Helpers::putSingleton(OperationContext* opCtx, const char* ns, BSONObj obj) {
- OldClientContext context(opCtx, ns);
+void Helpers::putSingleton(OperationContext* opCtx, const NamespaceString& nss, BSONObj obj) {
+ OldClientContext context(opCtx, nss);
- const NamespaceString requestNs(ns);
auto request = UpdateRequest();
- request.setNamespaceString(requestNs);
+ request.setNamespaceString(nss);
request.setUpdateModification(write_ops::UpdateModification::parseFromClassicUpdate(obj));
request.setUpsert();
@@ -356,7 +353,7 @@ BSONObj Helpers::inferKeyPattern(const BSONObj& o) {
}
void Helpers::emptyCollection(OperationContext* opCtx, const NamespaceString& nss) {
- OldClientContext context(opCtx, nss.ns());
+ OldClientContext context(opCtx, nss);
repl::UnreplicatedWritesBlock uwb(opCtx);
CollectionPtr collection = context.db()
? CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index ecb7081f29e..047aa9f8534 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -110,18 +110,18 @@ struct Helpers {
*
* Returns false if there is no such object.
*/
- static bool getSingleton(OperationContext* opCtx, const char* ns, BSONObj& result);
+ static bool getSingleton(OperationContext* opCtx, const NamespaceString& nss, BSONObj& result);
/**
* Same as getSingleton, but with a reverse natural-order scan on "ns".
*/
- static bool getLast(OperationContext* opCtx, const char* ns, BSONObj& result);
+ static bool getLast(OperationContext* opCtx, const NamespaceString& nss, BSONObj& result);
/**
* Performs an upsert of "obj" into the collection "ns", with an empty update predicate.
* Callers must have "ns" locked.
*/
- static void putSingleton(OperationContext* opCtx, const char* ns, BSONObj obj);
+ static void putSingleton(OperationContext* opCtx, const NamespaceString& nss, BSONObj obj);
/**
* Callers are expected to hold the collection lock.
@@ -129,7 +129,7 @@ struct Helpers {
* o has to have an _id field or will assert
*/
static UpdateResult upsert(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& o,
bool fromMigrate = false);
@@ -140,7 +140,7 @@ struct Helpers {
* on the same storage snapshot.
*/
static UpdateResult upsert(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& filter,
const BSONObj& updateMod,
bool fromMigrate = false);
@@ -152,7 +152,7 @@ struct Helpers {
* on the same storage snapshot.
*/
static void update(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& filter,
const BSONObj& updateMod,
bool fromMigrate = false);
diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp
index b51ed281b3a..471b2dc2fc3 100644
--- a/src/mongo/db/index_build_entry_helpers.cpp
+++ b/src/mongo/db/index_build_entry_helpers.cpp
@@ -73,7 +73,7 @@ Status upsert(OperationContext* opCtx, const IndexBuildEntry& indexBuildEntry) {
WriteUnitOfWork wuow(opCtx);
Helpers::upsert(opCtx,
- NamespaceString::kIndexBuildEntryNamespace.ns(),
+ NamespaceString::kIndexBuildEntryNamespace,
indexBuildEntry.toBSON(),
/*fromMigrate=*/false);
wuow.commit();
@@ -128,7 +128,7 @@ Status upsert(OperationContext* opCtx, const BSONObj& filter, const BSONObj& upd
WriteUnitOfWork wuow(opCtx);
Helpers::upsert(opCtx,
- NamespaceString::kIndexBuildEntryNamespace.ns(),
+ NamespaceString::kIndexBuildEntryNamespace,
filter,
updateMod,
/*fromMigrate=*/false);
@@ -153,7 +153,7 @@ Status update(OperationContext* opCtx, const BSONObj& filter, const BSONObj& upd
WriteUnitOfWork wuow(opCtx);
Helpers::update(opCtx,
- NamespaceString::kIndexBuildEntryNamespace.ns(),
+ NamespaceString::kIndexBuildEntryNamespace,
filter,
updateMod,
/*fromMigrate=*/false);
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 70f70006fe6..5000a651b36 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -361,8 +361,7 @@ repl::OpTime getLatestOplogOpTime(OperationContext* opCtx) {
// exceptions and we must protect it from unanticipated write conflicts from reads.
writeConflictRetry(
opCtx, "getLatestOplogOpTime", NamespaceString::kRsOplogNamespace.ns(), [&]() {
- invariant(Helpers::getLast(
- opCtx, NamespaceString::kRsOplogNamespace.ns().c_str(), oplogEntryBSON));
+ invariant(Helpers::getLast(opCtx, NamespaceString::kRsOplogNamespace, oplogEntryBSON));
});
auto optime = repl::OpTime::parseFromOplogEntry(oplogEntryBSON);
diff --git a/src/mongo/db/keys_collection_cache_test.cpp b/src/mongo/db/keys_collection_cache_test.cpp
index 625a138e313..5805bb97c5a 100644
--- a/src/mongo/db/keys_collection_cache_test.cpp
+++ b/src/mongo/db/keys_collection_cache_test.cpp
@@ -72,7 +72,7 @@ protected:
void insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) {
AutoGetCollection coll(opCtx, nss, MODE_IX);
- auto updateResult = Helpers::upsert(opCtx, nss.toString(), doc);
+ auto updateResult = Helpers::upsert(opCtx, nss, doc);
ASSERT_EQ(0, updateResult.numDocsModified);
}
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 4c32e81f948..9e53e29420f 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -741,7 +741,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
if (replSettings.usingReplSets()) {
Lock::GlobalWrite lk(startupOpCtx.get());
- OldClientContext ctx(startupOpCtx.get(), NamespaceString::kRsOplogNamespace.ns());
+ OldClientContext ctx(startupOpCtx.get(), NamespaceString::kRsOplogNamespace);
tenant_migration_util::createOplogViewForTenantMigrations(startupOpCtx.get(), ctx.db());
}
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index f390ef71980..29b894985b7 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -325,8 +325,8 @@ void writeToImageCollection(OperationContext* opCtx,
opCtx, NamespaceString::kConfigImagesNamespace, LockMode::MODE_IX);
auto curOp = CurOp::get(opCtx);
const std::string existingNs = curOp->getNS();
- UpdateResult res = Helpers::upsert(
- opCtx, NamespaceString::kConfigImagesNamespace.toString(), imageEntry.toBSON());
+ UpdateResult res =
+ Helpers::upsert(opCtx, NamespaceString::kConfigImagesNamespace, imageEntry.toBSON());
{
stdx::lock_guard<Client> clientLock(*opCtx->getClient());
curOp->setNS_inlock(existingNs);
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 96ff0518616..546f7ad01a1 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -152,7 +152,7 @@ Status _applyOps(OperationContext* opCtx,
// Malformed operations should have already been caught and retried in non-atomic mode.
invariant(entry.isOK());
- OldClientContext ctx(opCtx, nss.ns());
+ OldClientContext ctx(opCtx, nss);
const auto& op = entry.getValue();
const bool isDataConsistent = true;
@@ -226,7 +226,7 @@ Status _applyOps(OperationContext* opCtx,
<< nss.ns() << ": " << mongo::redact(opObj));
}
- OldClientContext ctx(opCtx, nss.ns());
+ OldClientContext ctx(opCtx, nss);
// We return the status rather than merely aborting so failure of CRUD
// ops doesn't stop the applyOps from trying to process the rest of the
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 69d9868e1b0..0e5d3fa573b 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -951,8 +951,7 @@ OpTime BackgroundSync::_readLastAppliedOpTime(OperationContext* opCtx) {
try {
bool success = writeConflictRetry(
opCtx, "readLastAppliedOpTime", NamespaceString::kRsOplogNamespace.ns(), [&] {
- return Helpers::getLast(
- opCtx, NamespaceString::kRsOplogNamespace.ns().c_str(), oplogEntry);
+ return Helpers::getLast(opCtx, NamespaceString::kRsOplogNamespace, oplogEntry);
});
if (!success) {
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 2919ebe31b5..3fad6ccec9b 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -723,7 +723,7 @@ void createOplog(OperationContext* opCtx,
const ReplSettings& replSettings = ReplicationCoordinator::get(opCtx)->getSettings();
- OldClientContext ctx(opCtx, oplogCollectionName.ns());
+ OldClientContext ctx(opCtx, oplogCollectionName);
CollectionPtr collection =
CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, oplogCollectionName);
diff --git a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
index d4b8ad32e2f..53b640225c2 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
@@ -462,7 +462,7 @@ void createCollection(OperationContext* opCtx,
Lock::DBLock dbLk(opCtx, nss.dbName(), MODE_IX);
Lock::CollectionLock collLk(opCtx, nss, MODE_X);
- OldClientContext ctx(opCtx, nss.ns());
+ OldClientContext ctx(opCtx, nss);
auto db = ctx.db();
ASSERT_TRUE(db);
diff --git a/src/mongo/db/repl/oplog_applier_utils.cpp b/src/mongo/db/repl/oplog_applier_utils.cpp
index 7c5c78a63dc..0d2e5504b89 100644
--- a/src/mongo/db/repl/oplog_applier_utils.cpp
+++ b/src/mongo/db/repl/oplog_applier_utils.cpp
@@ -221,7 +221,7 @@ Status OplogApplierUtils::applyOplogEntryOrGroupedInsertsCommon(
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "missing database (" << nss.db() << ")",
db);
- OldClientContext ctx(opCtx, autoColl.getNss().ns(), db);
+ OldClientContext ctx(opCtx, autoColl.getNss(), db);
// We convert updates to upserts in secondary mode when the
// oplogApplicationEnforcesSteadyStateConstraints parameter is false, to avoid
diff --git a/src/mongo/db/repl/oplog_interface_local.cpp b/src/mongo/db/repl/oplog_interface_local.cpp
index 8f5e589aa6c..564a5600c80 100644
--- a/src/mongo/db/repl/oplog_interface_local.cpp
+++ b/src/mongo/db/repl/oplog_interface_local.cpp
@@ -57,7 +57,7 @@ private:
OplogIteratorLocal::OplogIteratorLocal(OperationContext* opCtx)
: _oplogRead(opCtx, OplogAccessMode::kRead),
- _ctx(opCtx, NamespaceString::kRsOplogNamespace.ns()),
+ _ctx(opCtx, NamespaceString::kRsOplogNamespace),
_exec(_oplogRead.getCollection()
? InternalPlanner::collectionScan(opCtx,
&_oplogRead.getCollection(),
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index 2411b496c7c..826653c9717 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -728,7 +728,7 @@ bool replHasDatabases(OperationContext* opCtx) {
// we have a local database. return true if oplog isn't empty
BSONObj o;
- if (Helpers::getSingleton(opCtx, NamespaceString::kRsOplogNamespace.ns().c_str(), o)) {
+ if (Helpers::getSingleton(opCtx, NamespaceString::kRsOplogNamespace, o)) {
return true;
}
}
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp b/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp
index b797fbd36ed..78c23a18f65 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp
@@ -67,7 +67,7 @@ BSONObj getMinValidDocument(OperationContext* opCtx, const NamespaceString& minV
Lock::DBLock dblk(opCtx, minValidNss.dbName(), MODE_IS);
Lock::CollectionLock lk(opCtx, minValidNss, MODE_IS);
BSONObj mv;
- if (Helpers::getSingleton(opCtx, minValidNss.ns().c_str(), mv)) {
+ if (Helpers::getSingleton(opCtx, minValidNss, mv)) {
return mv;
}
return mv;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 45480cd9020..17f32375ee8 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -431,8 +431,7 @@ Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(Operati
{
// Writes to 'local.system.replset' must be untimestamped.
WriteUnitOfWork wuow(opCtx);
- Helpers::putSingleton(
- opCtx, configCollectionNS.ns().c_str(), config);
+ Helpers::putSingleton(opCtx, configCollectionNS, config);
wuow.commit();
}
{
@@ -576,7 +575,7 @@ StatusWith<BSONObj> ReplicationCoordinatorExternalStateImpl::loadLocalConfigDocu
return writeConflictRetry(
opCtx, "load replica set config", configCollectionNS.ns(), [opCtx] {
BSONObj config;
- if (!Helpers::getSingleton(opCtx, configCollectionNS.ns().c_str(), config)) {
+ if (!Helpers::getSingleton(opCtx, configCollectionNS, config)) {
return StatusWith<BSONObj>(
ErrorCodes::NoMatchingDocument,
"Did not find replica set configuration document in {}"_format(
@@ -598,7 +597,7 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(Operati
// Writes to 'local.system.replset' must be untimestamped.
WriteUnitOfWork wuow(opCtx);
Lock::DBLock dbWriteLock(opCtx, configDatabaseName, MODE_X);
- Helpers::putSingleton(opCtx, configCollectionNS.ns().c_str(), config);
+ Helpers::putSingleton(opCtx, configCollectionNS, config);
wuow.commit();
}
@@ -628,7 +627,7 @@ Status ReplicationCoordinatorExternalStateImpl::replaceLocalConfigDocument(
WriteUnitOfWork wuow(opCtx);
Lock::DBLock dbWriteLock(opCtx, configDatabaseName, MODE_X);
Helpers::emptyCollection(opCtx, configCollectionNS);
- Helpers::putSingleton(opCtx, configCollectionNS.ns().c_str(), config);
+ Helpers::putSingleton(opCtx, configCollectionNS, config);
wuow.commit();
});
return Status::OK();
@@ -656,12 +655,12 @@ Status ReplicationCoordinatorExternalStateImpl::createLocalLastVoteCollection(
[opCtx] {
AutoGetCollection coll(opCtx, NamespaceString::kLastVoteNamespace, MODE_X);
BSONObj result;
- bool exists = Helpers::getSingleton(
- opCtx, NamespaceString::kLastVoteNamespace.ns().c_str(), result);
+ bool exists =
+ Helpers::getSingleton(opCtx, NamespaceString::kLastVoteNamespace, result);
if (!exists) {
LastVote lastVote{OpTime::kInitialTerm, -1};
Helpers::putSingleton(
- opCtx, NamespaceString::kLastVoteNamespace.ns().c_str(), lastVote.toBSON());
+ opCtx, NamespaceString::kLastVoteNamespace, lastVote.toBSON());
}
});
} catch (const DBException& ex) {
@@ -680,9 +679,8 @@ StatusWith<LastVote> ReplicationCoordinatorExternalStateImpl::loadLocalLastVoteD
NamespaceString::kLastVoteNamespace.toString(),
[opCtx] {
BSONObj lastVoteObj;
- if (!Helpers::getSingleton(opCtx,
- NamespaceString::kLastVoteNamespace.toString().c_str(),
- lastVoteObj)) {
+ if (!Helpers::getSingleton(
+ opCtx, NamespaceString::kLastVoteNamespace, lastVoteObj)) {
return StatusWith<LastVote>(
ErrorCodes::NoMatchingDocument,
str::stream() << "Did not find replica set lastVote document in "
@@ -728,16 +726,15 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument(
// operations. We have already ensured at startup time that there is an old
// document.
BSONObj result;
- bool exists = Helpers::getSingleton(
- opCtx, NamespaceString::kLastVoteNamespace.ns().c_str(), result);
+ bool exists =
+ Helpers::getSingleton(opCtx, NamespaceString::kLastVoteNamespace, result);
fassert(51241, exists);
StatusWith<LastVote> oldLastVoteDoc = LastVote::readFromLastVote(result);
if (!oldLastVoteDoc.isOK()) {
return oldLastVoteDoc.getStatus();
}
if (lastVote.getTerm() > oldLastVoteDoc.getValue().getTerm()) {
- Helpers::putSingleton(
- opCtx, NamespaceString::kLastVoteNamespace.ns().c_str(), lastVoteObj);
+ Helpers::putSingleton(opCtx, NamespaceString::kLastVoteNamespace, lastVoteObj);
}
wunit.commit();
return Status::OK();
@@ -782,8 +779,7 @@ StatusWith<OpTimeAndWallTime> ReplicationCoordinatorExternalStateImpl::loadLastO
if (!writeConflictRetry(
opCtx, "Load last opTime", NamespaceString::kRsOplogNamespace.ns().c_str(), [&] {
- return Helpers::getLast(
- opCtx, NamespaceString::kRsOplogNamespace.ns().c_str(), oplogEntry);
+ return Helpers::getLast(opCtx, NamespaceString::kRsOplogNamespace, oplogEntry);
})) {
return StatusWith<OpTimeAndWallTime>(ErrorCodes::NoMatchingDocument,
str::stream()
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index e0bd311ba98..d38c7265e69 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -279,8 +279,7 @@ public:
// Note that getSingleton will take a global IS lock, but this won't block because
// we are already holding the global IS lock.
BSONObj o;
- if (Helpers::getSingleton(
- opCtx, NamespaceString::kRsOplogNamespace.ns().c_str(), o)) {
+ if (Helpers::getSingleton(opCtx, NamespaceString::kRsOplogNamespace, o)) {
return o["ts"].timestamp();
}
}
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 8e32fba099b..91893402542 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -530,8 +530,7 @@ void RollbackImpl::_restoreTxnsTableEntryFromRetryableWrites(OperationContext* o
// Perform an untimestamped write so that it will not be rolled back on recovering
// to the 'stableTimestamp' if we were to crash. This is safe because this update is
// meant to be consistent with the 'stableTimestamp' and not the common point.
- Helpers::upsert(
- opCtx, nss.ns(), filter, sessionTxnRecord.toBSON(), /*fromMigrate=*/false);
+ Helpers::upsert(opCtx, nss, filter, sessionTxnRecord.toBSON(), /*fromMigrate=*/false);
});
}
// Take a stable checkpoint so that writes to the 'config.transactions' table are
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index d28b881cc20..577a07bf9eb 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -1737,7 +1737,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// TODO: Lots of overhead in context. This can be faster.
const NamespaceString docNss(doc.ns);
Lock::DBLock docDbLock(opCtx, docNss.dbName(), MODE_X);
- OldClientContext ctx(opCtx, doc.ns.toString());
+ OldClientContext ctx(opCtx, docNss);
CollectionWriter collection(opCtx, uuid);
// Adds the doc to our rollback file if the collection was not dropped while
@@ -1957,7 +1957,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
const NamespaceString oplogNss(NamespaceString::kRsOplogNamespace);
Lock::DBLock oplogDbLock(opCtx, oplogNss.dbName(), MODE_IX);
Lock::CollectionLock oplogCollectionLoc(opCtx, oplogNss, MODE_X);
- OldClientContext ctx(opCtx, oplogNss.ns());
+ OldClientContext ctx(opCtx, oplogNss);
auto oplogCollection =
CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, oplogNss);
if (!oplogCollection) {
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 22d7c7648e4..b2a0560d145 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -1279,8 +1279,7 @@ Timestamp StorageInterfaceImpl::getEarliestOplogTimestamp(OperationContext* opCt
BSONObj oplogEntryBSON;
tassert(5869100,
"Failed reading the earliest oplog entry",
- Helpers::getSingleton(
- opCtx, NamespaceString::kRsOplogNamespace.ns().c_str(), oplogEntryBSON));
+ Helpers::getSingleton(opCtx, NamespaceString::kRsOplogNamespace, oplogEntryBSON));
auto optime = OpTime::parseFromOplogEntry(oplogEntryBSON);
tassert(5869101,
@@ -1312,8 +1311,7 @@ Timestamp StorageInterfaceImpl::getLatestOplogTimestamp(OperationContext* opCtx)
// Helpers::getLast will bypass the oplog visibility rules by doing a backwards collection
// scan.
BSONObj oplogEntryBSON;
- invariant(Helpers::getLast(
- opCtx, NamespaceString::kRsOplogNamespace.ns().c_str(), oplogEntryBSON));
+ invariant(Helpers::getLast(opCtx, NamespaceString::kRsOplogNamespace, oplogEntryBSON));
auto optime = OpTime::parseFromOplogEntry(oplogEntryBSON);
invariant(optime.isOK(),
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index cd85959ef3b..55d9142d8ce 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -123,7 +123,7 @@ void createCollection(OperationContext* opCtx,
const CollectionOptions& options = generateOptionsWithUuid()) {
writeConflictRetry(opCtx, "createCollection", nss.ns(), [&] {
Lock::DBLock dblk(opCtx, nss.dbName(), MODE_X);
- OldClientContext ctx(opCtx, nss.ns());
+ OldClientContext ctx(opCtx, nss);
auto db = ctx.db();
ASSERT_TRUE(db);
mongo::WriteUnitOfWork wuow(opCtx);
diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp
index 2ec52d2ffec..2732b2cd5e7 100644
--- a/src/mongo/db/repl/storage_timestamp_test.cpp
+++ b/src/mongo/db/repl/storage_timestamp_test.cpp
@@ -485,7 +485,7 @@ public:
Timestamp getTopOfOplog() {
OneOffRead oor(_opCtx, Timestamp::min());
BSONObj ret;
- ASSERT_TRUE(Helpers::getLast(_opCtx, NamespaceString::kRsOplogNamespace.ns().c_str(), ret));
+ ASSERT_TRUE(Helpers::getLast(_opCtx, NamespaceString::kRsOplogNamespace, ret));
return ret["ts"].timestamp();
}
@@ -856,7 +856,7 @@ TEST_F(StorageTimestampTest, SecondaryInsertTimes) {
OneOffRead oor(_opCtx, firstInsertTime.addTicks(idx).asTimestamp());
BSONObj result;
- ASSERT(Helpers::getLast(_opCtx, nss.ns().c_str(), result)) << " idx is " << idx;
+ ASSERT(Helpers::getLast(_opCtx, nss, result)) << " idx is " << idx;
ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(result, BSON("_id" << idx)))
<< "Doc: " << result.toString() << " Expected: " << BSON("_id" << idx);
}
@@ -914,7 +914,7 @@ TEST_F(StorageTimestampTest, SecondaryArrayInsertTimes) {
OneOffRead oor(_opCtx, firstInsertTime.addTicks(idx).asTimestamp());
BSONObj result;
- ASSERT(Helpers::getLast(_opCtx, nss.ns().c_str(), result)) << " idx is " << idx;
+ ASSERT(Helpers::getLast(_opCtx, nss, result)) << " idx is " << idx;
ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(result, BSON("_id" << idx)))
<< "Doc: " << result.toString() << " Expected: " << BSON("_id" << idx);
}
@@ -1357,7 +1357,7 @@ TEST_F(StorageTimestampTest, PrimaryCreateCollectionInApplyOps) {
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
BSONObj result;
- ASSERT(Helpers::getLast(_opCtx, NamespaceString::kRsOplogNamespace.toString().c_str(), result));
+ ASSERT(Helpers::getLast(_opCtx, NamespaceString::kRsOplogNamespace, result));
repl::OplogEntry op(result);
ASSERT(op.getOpType() == repl::OpTypeEnum::kCommand) << op.toBSONForLogging();
// The next logOp() call will get 'futureTs', which will be the timestamp at which we do
@@ -2779,7 +2779,7 @@ TEST_F(StorageTimestampTest, IndexBuildsResolveErrorsDuringStateChangeToPrimary)
// Update one documents to be valid, and delete the other. These modifications are written
// to the side writes table and must be drained.
- Helpers::upsert(_opCtx, collection->ns().ns(), BSON("_id" << 0 << "a" << 1 << "b" << 1));
+ Helpers::upsert(_opCtx, collection->ns(), BSON("_id" << 0 << "a" << 1 << "b" << 1));
{
RecordId badRecord = Helpers::findOne(_opCtx, collection.get(), BSON("_id" << 1));
WriteUnitOfWork wuow(_opCtx);
diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp
index 45f758dfef3..6ef4f4a7894 100644
--- a/src/mongo/db/repl/tenant_migration_donor_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp
@@ -505,7 +505,7 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_insertState
return BSON("$setOnInsert" << _stateDoc.toBSON());
}();
auto updateResult = Helpers::upsert(
- opCtx, _stateDocumentsNS.ns(), filter, updateMod, /*fromMigrate=*/false);
+ opCtx, _stateDocumentsNS, filter, updateMod, /*fromMigrate=*/false);
// '$setOnInsert' update operator can never modify an existing on-disk state
// doc.
@@ -660,7 +660,7 @@ TenantMigrationDonorService::Instance::_markStateDocAsGarbageCollectable(
return _stateDoc.toBSON();
}();
auto updateResult = Helpers::upsert(
- opCtx, _stateDocumentsNS.ns(), filter, updateMod, /*fromMigrate=*/false);
+ opCtx, _stateDocumentsNS, filter, updateMod, /*fromMigrate=*/false);
invariant(updateResult.numDocsModified == 1);
});
diff --git a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
index b77606577b2..e62a6121b36 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
@@ -75,7 +75,7 @@ Status insertStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc
<< BSON("$exists" << false));
const auto updateMod = BSON("$setOnInsert" << stateDoc.toBSON());
auto updateResult =
- Helpers::upsert(opCtx, nss.ns(), filter, updateMod, /*fromMigrate=*/false);
+ Helpers::upsert(opCtx, nss, filter, updateMod, /*fromMigrate=*/false);
// '$setOnInsert' update operator can no way modify the existing on-disk state doc.
invariant(!updateResult.numDocsModified);
@@ -102,7 +102,7 @@ Status updateStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc
return writeConflictRetry(
opCtx, "updateTenantMigrationRecipientStateDoc", nss.ns(), [&]() -> Status {
auto updateResult =
- Helpers::upsert(opCtx, nss.ns(), stateDoc.toBSON(), /*fromMigrate=*/false);
+ Helpers::upsert(opCtx, nss, stateDoc.toBSON(), /*fromMigrate=*/false);
if (updateResult.numMatched == 0) {
return {ErrorCodes::NoSuchKey,
str::stream()
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
index 78e6a4b89cc..ea4ea2da1bf 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
@@ -197,7 +197,7 @@ public:
repl::createOplog(opCtx.get());
{
Lock::GlobalWrite lk(opCtx.get());
- OldClientContext ctx(opCtx.get(), NamespaceString::kRsOplogNamespace.ns());
+ OldClientContext ctx(opCtx.get(), NamespaceString::kRsOplogNamespace);
tenant_migration_util::createOplogViewForTenantMigrations(opCtx.get(), ctx.db());
}
diff --git a/src/mongo/db/repl/tenant_migration_util.cpp b/src/mongo/db/repl/tenant_migration_util.cpp
index 205bf70dd14..39b1bf2fdff 100644
--- a/src/mongo/db/repl/tenant_migration_util.cpp
+++ b/src/mongo/db/repl/tenant_migration_util.cpp
@@ -96,7 +96,7 @@ repl::OpTime storeExternalClusterTimeKeyDocs(std::vector<ExternalKeysCollectionD
const auto updateMod = keyDoc.toBSON();
Helpers::upsert(opCtx,
- nss.ns(),
+ nss,
filter,
updateMod,
/*fromMigrate=*/false);
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index fa303126008..fd823932380 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -1839,7 +1839,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
// We are in write lock here, so sure we aren't killing
writeConflictRetry(opCtx, "transferModsUpdates", _nss.ns(), [&] {
- auto res = Helpers::upsert(opCtx, _nss.ns(), updatedDoc, true);
+ auto res = Helpers::upsert(opCtx, _nss, updatedDoc, true);
if (!res.upsertedId.isEmpty()) {
changeInOrphans++;
}
diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
index 632b387a817..859450a3b6d 100644
--- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
@@ -260,7 +260,7 @@ protected:
const BSONObj& update,
const ReshardingEnv& env) {
AutoGetCollection coll(opCtx, nss, MODE_IX);
- Helpers::update(opCtx, nss.toString(), filter, update);
+ Helpers::update(opCtx, nss, filter, update);
}
void deleteDoc(OperationContext* opCtx,
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp
index 40b1f17f179..dd1c510accf 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp
@@ -991,7 +991,7 @@ void ReshardingDonorService::DonorStateMachine::_updateDonorDocument(
WriteUnitOfWork wuow(opCtx.get());
Helpers::update(opCtx.get(),
- nss.toString(),
+ nss,
BSON(ReshardingDonorDocument::kReshardingUUIDFieldName
<< _metadata.getReshardingUUID()),
BSON("$set" << BSON(ReshardingDonorDocument::kMutableStateFieldName
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
index 68523519f41..60d66a06c16 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
@@ -95,7 +95,7 @@ public:
{
Lock::GlobalWrite lk(_opCtx);
- OldClientContext ctx(_opCtx, NamespaceString::kRsOplogNamespace.ns());
+ OldClientContext ctx(_opCtx, NamespaceString::kRsOplogNamespace);
}
_metrics = ReshardingMetrics::makeInstance(_reshardingUUID,
diff --git a/src/mongo/db/serverless/shard_split_donor_service.cpp b/src/mongo/db/serverless/shard_split_donor_service.cpp
index 03642e92069..104787a3a23 100644
--- a/src/mongo/db/serverless/shard_split_donor_service.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_service.cpp
@@ -993,7 +993,7 @@ ExecutorFuture<repl::OpTime> ShardSplitDonorService::DonorStateMachine::_updateS
return BSON("$set" << _stateDoc.toBSON());
}();
auto updateResult = Helpers::upsert(opCtx,
- _stateDocumentsNS.ns(),
+ _stateDocumentsNS,
filter,
updatedStateDocBson,
/*fromMigrate=*/false);
diff --git a/src/mongo/db/serverless/shard_split_utils.cpp b/src/mongo/db/serverless/shard_split_utils.cpp
index 041c133b02b..63b19c861c5 100644
--- a/src/mongo/db/serverless/shard_split_utils.cpp
+++ b/src/mongo/db/serverless/shard_split_utils.cpp
@@ -162,8 +162,7 @@ Status insertStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& st
<< stateDoc.getId() << ShardSplitDonorDocument::kExpireAtFieldName
<< BSON("$exists" << false));
const auto updateMod = BSON("$setOnInsert" << stateDoc.toBSON());
- auto updateResult =
- Helpers::upsert(opCtx, nss.ns(), filter, updateMod, /*fromMigrate=*/false);
+ auto updateResult = Helpers::upsert(opCtx, nss, filter, updateMod, /*fromMigrate=*/false);
invariant(!updateResult.numDocsModified);
if (updateResult.upsertedId.isEmpty()) {
@@ -185,8 +184,7 @@ Status updateStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& st
}
return writeConflictRetry(opCtx, "updateShardSplitStateDoc", nss.ns(), [&]() -> Status {
- auto updateResult =
- Helpers::upsert(opCtx, nss.ns(), stateDoc.toBSON(), /*fromMigrate=*/false);
+ auto updateResult = Helpers::upsert(opCtx, nss, stateDoc.toBSON(), /*fromMigrate=*/false);
if (updateResult.numMatched == 0) {
return {ErrorCodes::NoSuchKey,
str::stream() << "Existing shard split state document not found for id: "
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index 1f1a3496371..b4d9b4e8fe7 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -282,7 +282,7 @@ bool hasReplSetConfigDoc(OperationContext* opCtx) {
databaseHolder->openDb(opCtx, nss.dbName());
BSONObj config;
- return Helpers::getSingleton(opCtx, nss.ns().c_str(), config);
+ return Helpers::getSingleton(opCtx, nss, config);
}
/**
diff --git a/src/mongo/db/storage/storage_repair_observer.cpp b/src/mongo/db/storage/storage_repair_observer.cpp
index f88f61616d5..1921b0ee42e 100644
--- a/src/mongo/db/storage/storage_repair_observer.cpp
+++ b/src/mongo/db/storage/storage_repair_observer.cpp
@@ -158,7 +158,7 @@ void StorageRepairObserver::_invalidateReplConfigIfNeeded(OperationContext* opCt
// If this node is a standalone, this would lead to a confusing error message if it were
// added to a replica set later on.
BSONObj config;
- if (!Helpers::getSingleton(opCtx, kConfigNss.ns().c_str(), config)) {
+ if (!Helpers::getSingleton(opCtx, kConfigNss, config)) {
return;
}
if (config.hasField(repl::ReplSetConfig::kRepairedFieldName)) {
@@ -166,7 +166,7 @@ void StorageRepairObserver::_invalidateReplConfigIfNeeded(OperationContext* opCt
}
BSONObjBuilder configBuilder(config);
configBuilder.append(repl::ReplSetConfig::kRepairedFieldName, true);
- Helpers::putSingleton(opCtx, kConfigNss.ns().c_str(), configBuilder.obj());
+ Helpers::putSingleton(opCtx, kConfigNss, configBuilder.obj());
JournalFlusher::get(opCtx)->waitForJournalFlush();
}
diff --git a/src/mongo/db/storage/storage_repair_observer_test.cpp b/src/mongo/db/storage/storage_repair_observer_test.cpp
index bed95255be1..4cd072cc7ba 100644
--- a/src/mongo/db/storage/storage_repair_observer_test.cpp
+++ b/src/mongo/db/storage/storage_repair_observer_test.cpp
@@ -68,12 +68,14 @@ public:
void createMockReplConfig(OperationContext* opCtx) {
BSONObj replConfig;
Lock::DBLock dbLock(opCtx, DatabaseName(boost::none, "local"), MODE_X);
- Helpers::putSingleton(opCtx, "local.system.replset", replConfig);
+ Helpers::putSingleton(
+ opCtx, NamespaceString(boost::none, "local.system.replset"), replConfig);
}
void assertReplConfigValid(OperationContext* opCtx, bool valid) {
BSONObj replConfig;
- ASSERT(Helpers::getSingleton(opCtx, "local.system.replset", replConfig));
+ ASSERT(Helpers::getSingleton(
+ opCtx, NamespaceString(boost::none, "local.system.replset"), replConfig));
if (valid) {
ASSERT(!replConfig.hasField("repaired"));
} else {
@@ -84,7 +86,8 @@ public:
bool hasReplConfig(OperationContext* opCtx) {
BSONObj replConfig;
Lock::DBLock dbLock(opCtx, DatabaseName(boost::none, "local"), MODE_IS);
- return Helpers::getSingleton(opCtx, "local.system.replset", replConfig);
+ return Helpers::getSingleton(
+ opCtx, NamespaceString(boost::none, "local.system.replset"), replConfig);
}
path repairFilePath() {
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 3ce5d0ece06..1b807d28395 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -42,7 +42,7 @@ namespace CountTests {
class Base {
public:
- Base() : _lk(&_opCtx, nss().dbName(), MODE_X), _context(&_opCtx, ns()), _client(&_opCtx) {
+ Base() : _lk(&_opCtx, nss().dbName(), MODE_X), _context(&_opCtx, nss()), _client(&_opCtx) {
_database = _context.db();
{
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 5b4dfc552c7..bdfd223e8d3 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -169,7 +169,7 @@ public:
// Assert that findByIdAndNoopUpdate did not generate an oplog entry.
BSONObj oplogEntry;
- Helpers::getLast(opCtx1.get(), NamespaceString::kRsOplogNamespace.ns().c_str(), oplogEntry);
+ Helpers::getLast(opCtx1.get(), NamespaceString::kRsOplogNamespace, oplogEntry);
ASSERT_BSONOBJ_NE(oplogEntry, BSONObj());
ASSERT_TRUE(oplogEntry.getStringField("op") == "i"_sd);
@@ -223,7 +223,7 @@ private:
// Assert that findByIdAndNoopUpdate did not generate an oplog entry.
BSONObj oplogEntry;
- Helpers::getLast(opCtx2, NamespaceString::kRsOplogNamespace.ns().c_str(), oplogEntry);
+ Helpers::getLast(opCtx2, NamespaceString::kRsOplogNamespace, oplogEntry);
ASSERT_BSONOBJ_NE(oplogEntry, BSONObj());
ASSERT_TRUE(oplogEntry.getStringField("op") == "i"_sd);
}
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
index f84874775f7..a0c99d5d5e5 100644
--- a/src/mongo/dbtests/dbtests.cpp
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -182,7 +182,7 @@ WriteContextForTests::WriteContextForTests(OperationContext* opCtx, StringData n
const bool doShardVersionCheck = false;
- _clientContext.emplace(opCtx, _nss.ns(), doShardVersionCheck);
+ _clientContext.emplace(opCtx, _nss, doShardVersionCheck);
auto db = _autoDb->ensureDbExists(opCtx);
invariant(db, _nss.ns());
invariant(db == _clientContext->db());
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index b4876849c3a..f8bf760bd82 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -60,7 +60,7 @@ public:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
Lock::DBLock lk(&opCtx, _nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, _nss.ns());
+ OldClientContext ctx(&opCtx, _nss);
WriteUnitOfWork wuow(&opCtx);
ctx.db()->createCollection(&opCtx, _nss);
@@ -71,7 +71,7 @@ public:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
Lock::DBLock lk(&opCtx, _nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, _nss.ns());
+ OldClientContext ctx(&opCtx, _nss);
WriteUnitOfWork wuow(&opCtx);
ctx.db()->dropCollection(&opCtx, _nss).transitional_ignore();
@@ -118,7 +118,7 @@ public:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
Lock::DBLock lk(&opCtx, _nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, _nss.ns());
+ OldClientContext ctx(&opCtx, _nss);
WriteUnitOfWork wuow(&opCtx);
ctx.db()->createCollection(&opCtx, _nss);
@@ -164,7 +164,7 @@ public:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
Lock::DBLock lk(&opCtx, _nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, _nss.ns());
+ OldClientContext ctx(&opCtx, _nss);
WriteUnitOfWork wuow(&opCtx);
ctx.db()->createCollection(&opCtx, _nss);
@@ -175,7 +175,7 @@ public:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
Lock::DBLock lk(&opCtx, _nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, _nss.ns());
+ OldClientContext ctx(&opCtx, _nss);
WriteUnitOfWork wuow(&opCtx);
ctx.db()->dropCollection(&opCtx, _nss).transitional_ignore();
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 8535e81942c..b22fee7e066 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -45,7 +45,7 @@ namespace PdfileTests {
namespace Insert {
class Base {
public:
- Base() : _lk(&_opCtx), _context(&_opCtx, nss().ns()) {}
+ Base() : _lk(&_opCtx), _context(&_opCtx, nss()) {}
virtual ~Base() {
if (!collection())
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index ddaf1dcca5c..712ad9b7298 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -59,7 +59,7 @@ class CountStageTest {
public:
CountStageTest()
: _dbLock(&_opCtx, nss().dbName(), MODE_X),
- _ctx(&_opCtx, ns()),
+ _ctx(&_opCtx, nss()),
_expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, kTestNss)),
_coll(nullptr) {}
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 85fffb7f917..1ee92963d10 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -167,7 +167,7 @@ class FetchStageFilter : public QueryStageFetchBase {
public:
void run() {
Lock::DBLock lk(&_opCtx, nss().dbName(), MODE_X);
- OldClientContext ctx(&_opCtx, ns());
+ OldClientContext ctx(&_opCtx, nss());
Database* db = ctx.db();
CollectionPtr coll =
CollectionCatalog::get(&_opCtx)->lookupCollectionByNamespace(&_opCtx, nss());
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index 197dc45a7f3..3a51a307bcc 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -48,7 +48,7 @@ class IndexScanTest {
public:
IndexScanTest()
: _dbLock(&_opCtx, nss().dbName(), MODE_X),
- _ctx(&_opCtx, ns()),
+ _ctx(&_opCtx, nss()),
_coll(nullptr),
_expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss())) {}
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 13437626b2a..a10c20504aa 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -76,7 +76,7 @@ using std::vector;
class Base {
public:
- Base() : _lk(&_opCtx), _context(&_opCtx, ns()) {
+ Base() : _lk(&_opCtx), _context(&_opCtx, nss()) {
{
WriteUnitOfWork wunit(&_opCtx);
_database = _context.db();
@@ -203,7 +203,7 @@ public:
// We don't normally allow empty objects in the database, but test that we can find
// an empty object (one might be allowed inside a reserved namespace at some point).
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext ctx(&_opCtx, "unittests.querytests");
+ OldClientContext ctx(&_opCtx, NamespaceString(boost::none, "unittests.querytests"));
{
WriteUnitOfWork wunit(&_opCtx);
@@ -1150,7 +1150,7 @@ class DirectLocking : public ClientBase {
public:
void run() {
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext ctx(&_opCtx, "unittests.DirectLocking");
+ OldClientContext ctx(&_opCtx, NamespaceString(boost::none, "unittests.DirectLocking"));
_client.remove("a.b", BSONObj());
ASSERT_EQUALS("unittests", ctx.db()->name().db());
}
@@ -1685,7 +1685,7 @@ public:
coll_opts.uuid = UUID::gen();
{
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext context(&_opCtx, ns());
+ OldClientContext context(&_opCtx, nss());
WriteUnitOfWork wunit(&_opCtx);
context.db()->createCollection(&_opCtx, nss(), coll_opts, false);
wunit.commit();
@@ -1714,7 +1714,7 @@ public:
coll_opts.uuid = UUID::gen();
{
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext context(&_opCtx, ns());
+ OldClientContext context(&_opCtx, nss());
WriteUnitOfWork wunit(&_opCtx);
context.db()->createCollection(&_opCtx, nss(), coll_opts, false);
wunit.commit();
@@ -1741,7 +1741,7 @@ public:
coll_opts.uuid = UUID::gen();
{
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext context(&_opCtx, ns());
+ OldClientContext context(&_opCtx, nss());
WriteUnitOfWork wunit(&_opCtx);
context.db()->createCollection(&_opCtx, nss(), coll_opts, true);
wunit.commit();
@@ -1777,7 +1777,7 @@ public:
const char* ns1 = "unittestsdb1.querytests.coll1";
{
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext context(&_opCtx, ns1);
+ OldClientContext context(&_opCtx, NamespaceString(boost::none, ns1));
WriteUnitOfWork wunit(&_opCtx);
context.db()->createCollection(&_opCtx, NamespaceString(ns1));
wunit.commit();
@@ -1790,9 +1790,9 @@ public:
const char* ns2 = "unittestsdb2.querytests.coll2";
{
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext context(&_opCtx, ns2);
+ OldClientContext context(&_opCtx, NamespaceString(boost::none, ns2));
WriteUnitOfWork wunit(&_opCtx);
- context.db()->createCollection(&_opCtx, NamespaceString(ns2));
+ context.db()->createCollection(&_opCtx, NamespaceString(boost::none, ns2));
wunit.commit();
}
insert(ns2, BSON("b" << 2));
@@ -1803,9 +1803,9 @@ public:
const char* ns3 = "unittestsdb3.querytests.coll3";
{
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext context(&_opCtx, ns3);
+ OldClientContext context(&_opCtx, NamespaceString(boost::none, ns3));
WriteUnitOfWork wunit(&_opCtx);
- context.db()->createCollection(&_opCtx, NamespaceString(ns3));
+ context.db()->createCollection(&_opCtx, NamespaceString(boost::none, ns3));
wunit.commit();
}
insert(ns3, BSON("c" << 3));
@@ -1829,7 +1829,7 @@ public:
CollectionInternalBase(const char* nsLeaf)
: CollectionBase(nsLeaf),
_lk(&_opCtx, DatabaseName(boost::none, "unittests"), MODE_X),
- _ctx(&_opCtx, ns()) {}
+ _ctx(&_opCtx, nss()) {}
private:
Lock::DBLock _lk;
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index a929a347030..216f93ee72f 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -198,7 +198,7 @@ protected:
}
int count() const {
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext ctx(&_opCtx, ns());
+ OldClientContext ctx(&_opCtx, nss());
Database* db = ctx.db();
CollectionPtr coll =
CollectionCatalog::get(&_opCtx)->lookupCollectionByNamespace(&_opCtx, nss());
@@ -240,7 +240,7 @@ protected:
}
}
- OldClientContext ctx(&_opCtx, ns());
+ OldClientContext ctx(&_opCtx, nss());
for (vector<BSONObj>::iterator i = ops.begin(); i != ops.end(); ++i) {
if (0) {
LOGV2(22501, "op: {i}", "i"_attr = *i);
@@ -270,7 +270,7 @@ protected:
::mongo::writeConflictRetry(&_opCtx, "deleteAll", ns, [&] {
NamespaceString nss(ns);
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext ctx(&_opCtx, ns);
+ OldClientContext ctx(&_opCtx, nss);
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
Collection* coll =
@@ -293,7 +293,7 @@ protected:
}
void insert(const BSONObj& o) const {
Lock::GlobalWrite lk(&_opCtx);
- OldClientContext ctx(&_opCtx, ns());
+ OldClientContext ctx(&_opCtx, nss());
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
CollectionPtr coll =
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index f639b05a722..1c2b2a1dc1c 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -68,7 +68,7 @@ bool collectionExists(OperationContext* opCtx, OldClientContext* ctx, const stri
void createCollection(OperationContext* opCtx, const NamespaceString& nss) {
Lock::DBLock dbXLock(opCtx, nss.dbName(), MODE_X);
- OldClientContext ctx(opCtx, nss.ns());
+ OldClientContext ctx(opCtx, nss);
{
WriteUnitOfWork uow(opCtx);
ASSERT(!collectionExists(opCtx, &ctx, nss.ns()));
@@ -170,7 +170,7 @@ public:
dropDatabase(&opCtx, nss);
Lock::DBLock dbXLock(&opCtx, nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, ns);
+ OldClientContext ctx(&opCtx, nss);
{
WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&opCtx, &ctx, ns));
@@ -207,7 +207,7 @@ public:
dropDatabase(&opCtx, nss);
Lock::DBLock dbXLock(&opCtx, nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, ns);
+ OldClientContext ctx(&opCtx, nss);
{
WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&opCtx, &ctx, ns));
@@ -256,7 +256,7 @@ public:
dropDatabase(&opCtx, target);
Lock::GlobalWrite globalWriteLock(&opCtx);
- OldClientContext ctx(&opCtx, source.ns());
+ OldClientContext ctx(&opCtx, source);
{
WriteUnitOfWork uow(&opCtx);
@@ -310,7 +310,7 @@ public:
dropDatabase(&opCtx, target);
Lock::GlobalWrite globalWriteLock(&opCtx);
- OldClientContext ctx(&opCtx, source.ns());
+ OldClientContext ctx(&opCtx, source);
BSONObj sourceDoc = BSON("_id"
<< "source");
@@ -378,7 +378,7 @@ public:
dropDatabase(&opCtx, nss);
Lock::DBLock dbXLock(&opCtx, nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, nss.ns());
+ OldClientContext ctx(&opCtx, nss);
BSONObj oldDoc = BSON("_id"
<< "old");
@@ -436,7 +436,7 @@ public:
dropDatabase(&opCtx, nss);
Lock::DBLock dbXLock(&opCtx, nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, nss.ns());
+ OldClientContext ctx(&opCtx, nss);
BSONObj doc = BSON("_id"
<< "foo");
@@ -628,7 +628,7 @@ public:
dropDatabase(&opCtx, nss);
Lock::DBLock dbXLock(&opCtx, nss.dbName(), MODE_X);
- OldClientContext ctx(&opCtx, nss.ns());
+ OldClientContext ctx(&opCtx, nss);
string idxNameA = "indexA";
string idxNameB = "indexB";