summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2019-04-17 16:18:22 -0400
committerGeert Bosch <geert@mongodb.com>2019-04-26 16:18:38 -0400
commit8cbbba49935f632e876037f9f2d9eecc779eb96a (patch)
treef1b4416f63dd87ed97223751c2c0d4eff4063628 /src
parent93dd23880759430872510ef0b539e746192e44e2 (diff)
downloadmongo-8cbbba49935f632e876037f9f2d9eecc779eb96a.tar.gz
SERVER-40724 Change namespace arguments to use NamespaceString
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp2
-rw-r--r--src/mongo/db/catalog/catalog_control_test.cpp2
-rw-r--r--src/mongo/db/catalog/create_collection.cpp6
-rw-r--r--src/mongo/db/catalog/database.h25
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp2
-rw-r--r--src/mongo/db/catalog/database_impl.cpp168
-rw-r--r--src/mongo/db/catalog/database_impl.h33
-rw-r--r--src/mongo/db/catalog/database_test.cpp24
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp2
-rw-r--r--src/mongo/db/catalog/drop_database_test.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp2
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp14
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp6
-rw-r--r--src/mongo/db/catalog/uuid_catalog.cpp2
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp4
-rw-r--r--src/mongo/db/commands/dbhash.cpp18
-rw-r--r--src/mongo/db/commands/mr.cpp12
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/index_build_entry_helpers.cpp2
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/op_observer_impl.cpp2
-rw-r--r--src/mongo/db/ops/update.cpp4
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone.cpp2
-rw-r--r--src/mongo/db/repair_database.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp15
-rw-r--r--src/mongo/db/repl/oplog.h6
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp9
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp9
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp11
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp2
-rw-r--r--src/mongo/db/stats/top.cpp14
-rw-r--r--src/mongo/db/stats/top.h6
-rw-r--r--src/mongo/db/stats/top_test.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.cpp117
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.h39
-rw-r--r--src/mongo/db/storage/kv/kv_catalog_test_fixture.h5
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp30
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h4
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp73
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp31
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h2
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine_test.cpp6
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine_test_fixture.h13
-rw-r--r--src/mongo/db/storage/storage_engine.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp10
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp22
-rw-r--r--src/mongo/db/transaction_participant_test.cpp8
-rw-r--r--src/mongo/dbtests/commandtests.cpp49
-rw-r--r--src/mongo/dbtests/counttests.cpp12
-rw-r--r--src/mongo/dbtests/extensions_callback_real_test.cpp4
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp38
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp17
-rw-r--r--src/mongo/dbtests/multikey_paths_test.cpp4
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp13
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp41
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_count_scan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp13
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp7
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp44
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp27
-rw-r--r--src/mongo/dbtests/querytests.cpp22
-rw-r--r--src/mongo/dbtests/repltests.cpp25
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp2
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp10
-rw-r--r--src/mongo/dbtests/validate_tests.cpp52
72 files changed, 603 insertions, 577 deletions
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index a13ca2d2650..4e91be65a5b 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -179,7 +179,7 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
invariant(db, str::stream() << "failed to reopen database " << dbName);
for (auto&& collNss : UUIDCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, dbName)) {
// Note that the collection name already includes the database component.
- auto collection = db->getCollection(opCtx, collNss.ns());
+ auto collection = db->getCollection(opCtx, collNss);
invariant(collection,
str::stream() << "failed to get valid collection pointer for namespace "
<< collNss);
diff --git a/src/mongo/db/catalog/catalog_control_test.cpp b/src/mongo/db/catalog/catalog_control_test.cpp
index 4f3f25da4c1..3a9565917fa 100644
--- a/src/mongo/db/catalog/catalog_control_test.cpp
+++ b/src/mongo/db/catalog/catalog_control_test.cpp
@@ -69,7 +69,7 @@ public:
int flushAllFiles(OperationContext* opCtx, bool sync) final {
return 0;
}
- Status repairRecordStore(OperationContext* opCtx, const std::string& ns) final {
+ Status repairRecordStore(OperationContext* opCtx, const NamespaceString& ns) final {
return Status::OK();
}
std::unique_ptr<TemporaryRecordStore> makeTemporaryRecordStore(OperationContext* opCtx) final {
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 92ba6b13141..75c2dc37ecd 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -265,8 +265,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
log() << "CMD: create " << newCollName
<< " - renaming existing collection with conflicting UUID " << uuid
<< " to temporary collection " << tmpName;
- Status status =
- db->renameCollection(opCtx, newCollName.ns(), tmpName.ns(), stayTemp);
+ Status status = db->renameCollection(opCtx, newCollName, tmpName, stayTemp);
if (!status.isOK())
return Result(status);
opObserver->onRenameCollection(opCtx,
@@ -284,8 +283,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
uassert(40655,
str::stream() << "Invalid name " << newCollName << " for UUID " << uuid,
currentName.db() == newCollName.db());
- Status status =
- db->renameCollection(opCtx, currentName.ns(), newCollName.ns(), stayTemp);
+ Status status = db->renameCollection(opCtx, currentName, newCollName, stayTemp);
if (!status.isOK())
return Result(status);
opObserver->onRenameCollection(opCtx,
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index fe6be299938..c13a0920b57 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -128,19 +128,19 @@ public:
*
* The caller should hold a DB X lock and ensure there are no index builds in progress on the
* collection.
+ * N.B. Namespace argument is passed by value as it may otherwise disappear or change.
*/
virtual Status dropCollection(OperationContext* const opCtx,
- const StringData fullns,
+ NamespaceString nss,
repl::OpTime dropOpTime = {}) const = 0;
virtual Status dropCollectionEvenIfSystem(OperationContext* const opCtx,
- const NamespaceString& fullns,
+ NamespaceString nss,
repl::OpTime dropOpTime = {}) const = 0;
- virtual Status dropView(OperationContext* const opCtx,
- const NamespaceString& viewName) const = 0;
+ virtual Status dropView(OperationContext* const opCtx, NamespaceString viewName) const = 0;
virtual Collection* createCollection(OperationContext* const opCtx,
- StringData ns,
+ const NamespaceString& nss,
const CollectionOptions& options = CollectionOptions(),
const bool createDefaultIndexes = true,
const BSONObj& idIndex = BSONObj()) const = 0;
@@ -149,19 +149,18 @@ public:
const NamespaceString& viewName,
const CollectionOptions& options) const = 0;
- /**
- * @param ns - this is fully qualified, which is maybe not ideal ???
- */
- virtual Collection* getCollection(OperationContext* opCtx, const StringData ns) const = 0;
-
- virtual Collection* getCollection(OperationContext* opCtx, const NamespaceString& ns) const = 0;
+ virtual Collection* getCollection(OperationContext* opCtx,
+ const NamespaceString& nss) const = 0;
virtual Collection* getOrCreateCollection(OperationContext* const opCtx,
const NamespaceString& nss) const = 0;
+ /**
+ * Arguments are passed by value as they otherwise would be changing as result of renaming.
+ */
virtual Status renameCollection(OperationContext* const opCtx,
- const StringData fromNS,
- const StringData toNS,
+ NamespaceString fromNss,
+ NamespaceString toNss,
const bool stayTemp) const = 0;
virtual const NamespaceString& getSystemViewsName() const = 0;
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index 9560ec6f468..dfc9b7f7ae3 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -189,7 +189,7 @@ void DatabaseHolderImpl::dropDb(OperationContext* opCtx, Database* db) {
break;
}
- Top::get(serviceContext).collectionDropped(coll->ns().ns(), true);
+ Top::get(serviceContext).collectionDropped(coll->ns(), true);
}
close(opCtx, name);
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index b215bca2dd7..b37b3a596e7 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -206,7 +206,7 @@ void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
continue;
try {
WriteUnitOfWork wunit(opCtx);
- Status status = dropCollection(opCtx, nss.ns(), {});
+ Status status = dropCollection(opCtx, nss, {});
if (!status.isOK()) {
warning() << "could not drop temp collection '" << nss << "': " << redact(status);
@@ -327,40 +327,37 @@ void DatabaseImpl::getStats(OperationContext* opCtx, BSONObjBuilder* output, dou
}
}
-Status DatabaseImpl::dropView(OperationContext* opCtx, const NamespaceString& viewName) const {
+Status DatabaseImpl::dropView(OperationContext* opCtx, NamespaceString viewName) const {
dassert(opCtx->lockState()->isDbLockedForMode(name(), MODE_IX));
dassert(opCtx->lockState()->isCollectionLockedForMode(viewName, MODE_IX));
dassert(opCtx->lockState()->isCollectionLockedForMode(NamespaceString(_viewsName), MODE_X));
auto views = ViewCatalog::get(this);
Status status = views->dropView(opCtx, viewName);
- Top::get(opCtx->getServiceContext()).collectionDropped(viewName.toString());
+ Top::get(opCtx->getServiceContext()).collectionDropped(viewName);
return status;
}
Status DatabaseImpl::dropCollection(OperationContext* opCtx,
- StringData fullns,
+ NamespaceString nss,
repl::OpTime dropOpTime) const {
- if (!getCollection(opCtx, fullns)) {
+ if (!getCollection(opCtx, nss)) {
// Collection doesn't exist so don't bother validating if it can be dropped.
return Status::OK();
}
- NamespaceString nss(fullns);
- {
- verify(nss.db() == _name);
+ invariant(nss.db() == _name);
- if (nss.isSystem()) {
- if (nss.isSystemDotProfile()) {
- if (_profile.load() != 0)
- return Status(ErrorCodes::IllegalOperation,
- "turn off profiling before dropping system.profile collection");
- } else if (!(nss.isSystemDotViews() || nss.isHealthlog() ||
- nss == NamespaceString::kLogicalSessionsNamespace ||
- nss == NamespaceString::kSystemKeysNamespace)) {
+ if (nss.isSystem()) {
+ if (nss.isSystemDotProfile()) {
+ if (_profile.load() != 0)
return Status(ErrorCodes::IllegalOperation,
- str::stream() << "can't drop system collection " << fullns);
- }
+ "turn off profiling before dropping system.profile collection");
+ } else if (!(nss.isSystemDotViews() || nss.isHealthlog() ||
+ nss == NamespaceString::kLogicalSessionsNamespace ||
+ nss == NamespaceString::kSystemKeysNamespace)) {
+ return Status(ErrorCodes::IllegalOperation,
+ str::stream() << "can't drop system collection " << nss);
}
}
@@ -368,11 +365,11 @@ Status DatabaseImpl::dropCollection(OperationContext* opCtx,
}
Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
- const NamespaceString& fullns,
+ NamespaceString nss,
repl::OpTime dropOpTime) const {
- invariant(opCtx->lockState()->isCollectionLockedForMode(fullns, MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
- LOG(1) << "dropCollection: " << fullns;
+ LOG(1) << "dropCollection: " << nss;
// A valid 'dropOpTime' is not allowed when writes are replicated.
if (!dropOpTime.isNull() && opCtx->writesAreReplicated()) {
@@ -381,7 +378,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
"dropCollection() cannot accept a valid drop optime when writes are replicated.");
}
- Collection* collection = getCollection(opCtx, fullns);
+ Collection* collection = getCollection(opCtx, nss);
if (!collection) {
return Status::OK(); // Post condition already met.
@@ -392,32 +389,32 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
auto uuid = collection->uuid();
auto uuidString = uuid ? uuid.get().toString() : "no UUID";
- uassertNamespaceNotIndex(fullns.toString(), "dropCollection");
+ uassertNamespaceNotIndex(nss.toString(), "dropCollection");
// Make sure no indexes builds are in progress.
// Use massert() to be consistent with IndexCatalog::dropAllIndexes().
auto numIndexesInProgress = collection->getIndexCatalog()->numIndexesInProgress(opCtx);
massert(ErrorCodes::BackgroundOperationInProgressForNamespace,
- str::stream() << "cannot drop collection " << fullns << " (" << uuidString << ") when "
+ str::stream() << "cannot drop collection " << nss << " (" << uuidString << ") when "
<< numIndexesInProgress
<< " index builds in progress.",
numIndexesInProgress == 0);
- audit::logDropCollection(&cc(), fullns.toString());
+ audit::logDropCollection(&cc(), nss.toString());
auto serviceContext = opCtx->getServiceContext();
- Top::get(serviceContext).collectionDropped(fullns.toString());
+ Top::get(serviceContext).collectionDropped(nss);
// Drop unreplicated collections immediately.
// If 'dropOpTime' is provided, we should proceed to rename the collection.
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
auto opObserver = serviceContext->getOpObserver();
- auto isOplogDisabledForNamespace = replCoord->isOplogDisabledFor(opCtx, fullns);
+ auto isOplogDisabledForNamespace = replCoord->isOplogDisabledFor(opCtx, nss);
if (dropOpTime.isNull() && isOplogDisabledForNamespace) {
- _dropCollectionIndexes(opCtx, fullns, collection);
+ _dropCollectionIndexes(opCtx, nss, collection);
opObserver->onDropCollection(
- opCtx, fullns, uuid, numRecords, OpObserver::CollectionDropType::kOnePhase);
- return _finishDropCollection(opCtx, fullns, collection);
+ opCtx, nss, uuid, numRecords, OpObserver::CollectionDropType::kOnePhase);
+ return _finishDropCollection(opCtx, nss, collection);
}
// Replicated collections should be dropped in two phases.
@@ -426,27 +423,27 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
// storage engine and will no longer be visible at the catalog layer with 3.6-style
// <db>.system.drop.* namespaces.
if (serviceContext->getStorageEngine()->supportsPendingDrops()) {
- _dropCollectionIndexes(opCtx, fullns, collection);
+ _dropCollectionIndexes(opCtx, nss, collection);
auto commitTimestamp = opCtx->recoveryUnit()->getCommitTimestamp();
- log() << "dropCollection: " << fullns << " (" << uuidString
+ log() << "dropCollection: " << nss << " (" << uuidString
<< ") - storage engine will take ownership of drop-pending collection with optime "
<< dropOpTime << " and commit timestamp " << commitTimestamp;
if (dropOpTime.isNull()) {
// Log oplog entry for collection drop and remove the UUID.
dropOpTime = opObserver->onDropCollection(
- opCtx, fullns, uuid, numRecords, OpObserver::CollectionDropType::kOnePhase);
+ opCtx, nss, uuid, numRecords, OpObserver::CollectionDropType::kOnePhase);
invariant(!dropOpTime.isNull());
} else {
// If we are provided with a valid 'dropOpTime', it means we are dropping this
// collection in the context of applying an oplog entry on a secondary.
auto opTime = opObserver->onDropCollection(
- opCtx, fullns, uuid, numRecords, OpObserver::CollectionDropType::kOnePhase);
+ opCtx, nss, uuid, numRecords, OpObserver::CollectionDropType::kOnePhase);
// OpObserver::onDropCollection should not be writing to the oplog on the secondary.
invariant(opTime.isNull());
}
- return _finishDropCollection(opCtx, fullns, collection);
+ return _finishDropCollection(opCtx, nss, collection);
}
// Old two-phase drop: Replicated collections will be renamed with a special drop-pending
@@ -455,24 +452,24 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
if (dropOpTime.isNull()) {
// Log oplog entry for collection drop.
dropOpTime = opObserver->onDropCollection(
- opCtx, fullns, uuid, numRecords, OpObserver::CollectionDropType::kTwoPhase);
+ opCtx, nss, uuid, numRecords, OpObserver::CollectionDropType::kTwoPhase);
invariant(!dropOpTime.isNull());
} else {
// If we are provided with a valid 'dropOpTime', it means we are dropping this
// collection in the context of applying an oplog entry on a secondary.
auto opTime = opObserver->onDropCollection(
- opCtx, fullns, uuid, numRecords, OpObserver::CollectionDropType::kTwoPhase);
+ opCtx, nss, uuid, numRecords, OpObserver::CollectionDropType::kTwoPhase);
// OpObserver::onDropCollection should not be writing to the oplog on the secondary.
invariant(opTime.isNull());
}
// Rename collection using drop-pending namespace generated from drop optime.
- auto dpns = fullns.makeDropPendingNamespace(dropOpTime);
+ auto dpns = nss.makeDropPendingNamespace(dropOpTime);
const bool stayTemp = true;
- log() << "dropCollection: " << fullns << " (" << uuidString
+ log() << "dropCollection: " << nss << " (" << uuidString
<< ") - renaming to drop-pending collection: " << dpns << " with drop optime "
<< dropOpTime;
- fassert(40464, renameCollection(opCtx, fullns.ns(), dpns.ns(), stayTemp));
+ fassert(40464, renameCollection(opCtx, nss, dpns, stayTemp));
// Register this drop-pending namespace with DropPendingCollectionReaper to remove when the
// committed optime reaches the drop optime.
@@ -482,34 +479,28 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
}
void DatabaseImpl::_dropCollectionIndexes(OperationContext* opCtx,
- const NamespaceString& fullns,
+ const NamespaceString& nss,
Collection* collection) const {
- invariant(_name == fullns.db());
- LOG(1) << "dropCollection: " << fullns << " - dropAllIndexes start";
+ invariant(_name == nss.db());
+ LOG(1) << "dropCollection: " << nss << " - dropAllIndexes start";
collection->getIndexCatalog()->dropAllIndexes(opCtx, true);
invariant(collection->getCatalogEntry()->getTotalIndexCount(opCtx) == 0);
- LOG(1) << "dropCollection: " << fullns << " - dropAllIndexes done";
+ LOG(1) << "dropCollection: " << nss << " - dropAllIndexes done";
}
Status DatabaseImpl::_finishDropCollection(OperationContext* opCtx,
- const NamespaceString& fullns,
+ const NamespaceString& nss,
Collection* collection) const {
UUID uuid = *collection->uuid();
- log() << "Finishing collection drop for " << fullns << " (" << uuid << ").";
+ log() << "Finishing collection drop for " << nss << " (" << uuid << ").";
UUIDCatalog& catalog = UUIDCatalog::get(opCtx);
catalog.onDropCollection(opCtx, uuid);
auto storageEngine =
checked_cast<KVStorageEngine*>(opCtx->getServiceContext()->getStorageEngine());
- return storageEngine->getCatalog()->dropCollection(opCtx, fullns.toString());
-}
-
-Collection* DatabaseImpl::getCollection(OperationContext* opCtx, StringData ns) const {
- NamespaceString nss(ns);
- invariant(_name == nss.db());
- return getCollection(opCtx, nss);
+ return storageEngine->getCatalog()->dropCollection(opCtx, nss);
}
Collection* DatabaseImpl::getCollection(OperationContext* opCtx, const NamespaceString& nss) const {
@@ -528,57 +519,54 @@ Collection* DatabaseImpl::getCollection(OperationContext* opCtx, const Namespace
}
Status DatabaseImpl::renameCollection(OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
+ NamespaceString fromNss,
+ NamespaceString toNss,
bool stayTemp) const {
- audit::logRenameCollection(&cc(), fromNS, toNS);
+ audit::logRenameCollection(&cc(), fromNss.ns(), toNss.ns());
// TODO SERVER-39518 : Temporarily comment this out because dropCollection uses
// this function and now it only takes a database IX lock. We can change
// this invariant to IX once renameCollection only MODE_IX as well.
// invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
- const NamespaceString fromNSS(fromNS);
- const NamespaceString toNSS(toNS);
-
- invariant(fromNSS.db() == _name);
- invariant(toNSS.db() == _name);
- if (getCollection(opCtx, toNSS)) {
+ invariant(fromNss.db() == _name);
+ invariant(toNss.db() == _name);
+ if (getCollection(opCtx, toNss)) {
return Status(ErrorCodes::NamespaceExists,
- str::stream() << "Cannot rename '" << fromNS << "' to '" << toNS
+ str::stream() << "Cannot rename '" << fromNss << "' to '" << toNss
<< "' because the destination namespace already exists");
}
- Collection* collToRename = getCollection(opCtx, fromNSS);
+ Collection* collToRename = getCollection(opCtx, fromNss);
if (!collToRename) {
return Status(ErrorCodes::NamespaceNotFound, "collection not found to rename");
}
invariant(!collToRename->getIndexCatalog()->haveAnyIndexesInProgress(),
str::stream() << "cannot perform operation: an index build is currently running for "
"collection "
- << fromNSS);
+ << fromNss);
- Collection* toColl = getCollection(opCtx, toNSS);
+ Collection* toColl = getCollection(opCtx, toNss);
if (toColl) {
invariant(
!toColl->getIndexCatalog()->haveAnyIndexesInProgress(),
str::stream() << "cannot perform operation: an index build is currently running for "
"collection "
- << toNSS);
+ << toNss);
}
log() << "renameCollection: renaming collection " << collToRename->uuid()->toString()
- << " from " << fromNS << " to " << toNS;
+ << " from " << fromNss << " to " << toNss;
- Top::get(opCtx->getServiceContext()).collectionDropped(fromNS.toString());
+ Top::get(opCtx->getServiceContext()).collectionDropped(fromNss);
auto storageEngine =
checked_cast<KVStorageEngine*>(opCtx->getServiceContext()->getStorageEngine());
- Status status = storageEngine->getCatalog()->renameCollection(opCtx, fromNS, toNS, stayTemp);
+ Status status = storageEngine->getCatalog()->renameCollection(opCtx, fromNss, toNss, stayTemp);
// Set the namespace of 'collToRename' from within the UUIDCatalog. This is necessary because
// the UUIDCatalog mutex synchronizes concurrent access to the collection's namespace for
// callers that may not hold a collection lock.
- UUIDCatalog::get(opCtx).setCollectionNamespace(opCtx, collToRename, fromNSS, toNSS);
+ UUIDCatalog::get(opCtx).setCollectionNamespace(opCtx, collToRename, fromNss, toNss);
opCtx->recoveryUnit()->onCommit([collToRename](auto commitTime) {
// Ban reading from this collection on committed reads on snapshots before now.
@@ -595,7 +583,7 @@ Collection* DatabaseImpl::getOrCreateCollection(OperationContext* opCtx,
Collection* c = getCollection(opCtx, nss);
if (!c) {
- c = createCollection(opCtx, nss.ns());
+ c = createCollection(opCtx, nss);
}
return c;
}
@@ -650,12 +638,11 @@ Status DatabaseImpl::createView(OperationContext* opCtx,
}
Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
const CollectionOptions& options,
bool createIdIndex,
const BSONObj& idIndex) const {
invariant(!options.isView());
- NamespaceString nss(ns);
invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_IX));
@@ -695,9 +682,9 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
}
_checkCanCreateCollection(opCtx, nss, optionsWithUUID);
- audit::logCreateCollection(&cc(), ns);
+ audit::logCreateCollection(&cc(), nss.ns());
- log() << "createCollection: " << ns << " with " << (generatedUUID ? "generated" : "provided")
+ log() << "createCollection: " << nss << " with " << (generatedUUID ? "generated" : "provided")
<< " UUID: " << optionsWithUUID.uuid.get();
// Create CollectionCatalogEntry
@@ -836,7 +823,7 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
if (nss.isSystem())
continue;
- Collection* coll = getCollection(opCtx, nss.ns());
+ Collection* coll = getCollection(opCtx, nss);
if (!coll)
continue;
@@ -852,24 +839,24 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
}
Status DatabaseImpl::userCreateNS(OperationContext* opCtx,
- const NamespaceString& fullns,
+ const NamespaceString& nss,
CollectionOptions collectionOptions,
bool createDefaultIndexes,
const BSONObj& idIndex) const {
- LOG(1) << "create collection " << fullns << ' ' << collectionOptions.toBSON();
+ LOG(1) << "create collection " << nss << ' ' << collectionOptions.toBSON();
- if (!NamespaceString::validCollectionComponent(fullns.ns()))
- return Status(ErrorCodes::InvalidNamespace, str::stream() << "invalid ns: " << fullns);
+ if (!NamespaceString::validCollectionComponent(nss.ns()))
+ return Status(ErrorCodes::InvalidNamespace, str::stream() << "invalid ns: " << nss);
- Collection* collection = getCollection(opCtx, fullns);
+ Collection* collection = getCollection(opCtx, nss);
if (collection)
return Status(ErrorCodes::NamespaceExists,
- str::stream() << "a collection '" << fullns << "' already exists");
+ str::stream() << "a collection '" << nss << "' already exists");
- if (ViewCatalog::get(this)->lookup(opCtx, fullns.ns()))
+ if (ViewCatalog::get(this)->lookup(opCtx, nss.ns()))
return Status(ErrorCodes::NamespaceExists,
- str::stream() << "a view '" << fullns << "' already exists");
+ str::stream() << "a view '" << nss << "' already exists");
// Validate the collation, if there is one.
std::unique_ptr<CollatorInterface> collator;
@@ -938,13 +925,12 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx,
}
if (collectionOptions.isView()) {
- uassertStatusOK(createView(opCtx, fullns, collectionOptions));
+ uassertStatusOK(createView(opCtx, nss, collectionOptions));
} else {
- invariant(
- createCollection(opCtx, fullns.ns(), collectionOptions, createDefaultIndexes, idIndex),
- str::stream() << "Collection creation failed after validating options: " << fullns
- << ". Options: "
- << collectionOptions.toBSON());
+ invariant(createCollection(opCtx, nss, collectionOptions, createDefaultIndexes, idIndex),
+ str::stream() << "Collection creation failed after validating options: " << nss
+ << ". Options: "
+ << collectionOptions.toBSON());
}
return Status::OK();
diff --git a/src/mongo/db/catalog/database_impl.h b/src/mongo/db/catalog/database_impl.h
index bb9fd30b2a3..ffaee26c449 100644
--- a/src/mongo/db/catalog/database_impl.h
+++ b/src/mongo/db/catalog/database_impl.h
@@ -83,22 +83,22 @@ public:
* collection.
*/
Status dropCollection(OperationContext* opCtx,
- StringData fullns,
+ NamespaceString nss,
repl::OpTime dropOpTime) const final;
Status dropCollectionEvenIfSystem(OperationContext* opCtx,
- const NamespaceString& fullns,
+ NamespaceString nss,
repl::OpTime dropOpTime) const final;
- Status dropView(OperationContext* opCtx, const NamespaceString& viewName) const final;
+ Status dropView(OperationContext* opCtx, NamespaceString viewName) const final;
Status userCreateNS(OperationContext* opCtx,
- const NamespaceString& fullns,
+ const NamespaceString& nss,
CollectionOptions collectionOptions,
bool createDefaultIndexes,
const BSONObj& idIndex) const final;
Collection* createCollection(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
const CollectionOptions& options = CollectionOptions(),
bool createDefaultIndexes = true,
const BSONObj& idIndex = BSONObj()) const final;
@@ -107,27 +107,14 @@ public:
const NamespaceString& viewName,
const CollectionOptions& options) const final;
- /**
- * @param ns - this is fully qualified, which is maybe not ideal ???
- */
- Collection* getCollection(OperationContext* opCtx, StringData ns) const final;
-
- Collection* getCollection(OperationContext* opCtx, const NamespaceString& ns) const;
+ Collection* getCollection(OperationContext* opCtx, const NamespaceString& nss) const;
Collection* getOrCreateCollection(OperationContext* opCtx,
const NamespaceString& nss) const final;
- /**
- * Renames the fully qualified namespace 'fromNS' to the fully qualified namespace 'toNS'.
- * Illegal to call unless both 'fromNS' and 'toNS' are within this database. Returns an error if
- * 'toNS' already exists or 'fromNS' does not exist.
- *
- * The caller should hold a DB X lock and ensure there are no index builds in progress on either
- * the 'fromNS' or the 'toNS'.
- */
Status renameCollection(OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
+ NamespaceString fromNss,
+ NamespaceString toNss,
bool stayTemp) const final;
static Status validateDBName(StringData dbname);
@@ -168,14 +155,14 @@ private:
* unreplicated collection drops.
*/
Status _finishDropCollection(OperationContext* opCtx,
- const NamespaceString& fullns,
+ const NamespaceString& nss,
Collection* collection) const;
/**
* Removes all indexes for a collection.
*/
void _dropCollectionIndexes(OperationContext* opCtx,
- const NamespaceString& fullns,
+ const NamespaceString& nss,
Collection* collection) const;
const std::string _name; // "dbname"
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index a1ad7565aff..337a56d0a96 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -158,7 +158,7 @@ TEST_F(DatabaseTest, CreateCollectionThrowsExceptionWhenDatabaseIsInADropPending
ON_BLOCK_EXIT([&wuow] { wuow.commit(); });
ASSERT_THROWS_CODE_AND_WHAT(
- db->createCollection(_opCtx.get(), _nss.ns()),
+ db->createCollection(_opCtx.get(), _nss),
AssertionException,
ErrorCodes::DatabaseDropPending,
(StringBuilder() << "Cannot create collection " << _nss
@@ -179,12 +179,12 @@ void _testDropCollection(OperationContext* opCtx,
WriteUnitOfWork wuow(opCtx);
if (createCollectionBeforeDrop) {
- ASSERT_TRUE(db->createCollection(opCtx, nss.ns(), collOpts));
+ ASSERT_TRUE(db->createCollection(opCtx, nss, collOpts));
} else {
ASSERT_FALSE(db->getCollection(opCtx, nss));
}
- ASSERT_OK(db->dropCollection(opCtx, nss.ns(), dropOpTime));
+ ASSERT_OK(db->dropCollection(opCtx, nss, dropOpTime));
ASSERT_FALSE(db->getCollection(opCtx, nss));
wuow.commit();
@@ -247,10 +247,10 @@ TEST_F(DatabaseTest, DropCollectionRejectsProvidedDropOpTimeIfWritesAreReplicate
ASSERT_TRUE(db);
WriteUnitOfWork wuow(opCtx);
- ASSERT_TRUE(db->createCollection(opCtx, nss.ns()));
+ ASSERT_TRUE(db->createCollection(opCtx, nss));
repl::OpTime dropOpTime(Timestamp(Seconds(100), 0), 1LL);
- ASSERT_EQUALS(ErrorCodes::BadValue, db->dropCollection(opCtx, nss.ns(), dropOpTime));
+ ASSERT_EQUALS(ErrorCodes::BadValue, db->dropCollection(opCtx, nss, dropOpTime));
});
}
@@ -290,7 +290,7 @@ void _testDropCollectionThrowsExceptionIfThereAreIndexesInProgress(OperationCont
Collection* collection = nullptr;
{
WriteUnitOfWork wuow(opCtx);
- ASSERT_TRUE(collection = db->createCollection(opCtx, nss.ns()));
+ ASSERT_TRUE(collection = db->createCollection(opCtx, nss));
wuow.commit();
}
@@ -319,7 +319,7 @@ void _testDropCollectionThrowsExceptionIfThereAreIndexesInProgress(OperationCont
ASSERT_GREATER_THAN(indexCatalog->numIndexesInProgress(opCtx), 0);
WriteUnitOfWork wuow(opCtx);
- ASSERT_THROWS_CODE(db->dropCollection(opCtx, nss.ns()),
+ ASSERT_THROWS_CODE(db->dropCollection(opCtx, nss),
AssertionException,
ErrorCodes::BackgroundOperationInProgressForNamespace);
});
@@ -357,11 +357,11 @@ TEST_F(DatabaseTest, RenameCollectionPreservesUuidOfSourceCollectionAndUpdatesUu
WriteUnitOfWork wuow(opCtx);
CollectionOptions fromCollectionOptions;
fromCollectionOptions.uuid = fromUuid;
- ASSERT_TRUE(db->createCollection(opCtx, fromNss.ns(), fromCollectionOptions));
+ ASSERT_TRUE(db->createCollection(opCtx, fromNss, fromCollectionOptions));
ASSERT_EQUALS(fromNss, uuidCatalog.lookupNSSByUUID(fromUuid));
auto stayTemp = false;
- ASSERT_OK(db->renameCollection(opCtx, fromNss.ns(), toNss.ns(), stayTemp));
+ ASSERT_OK(db->renameCollection(opCtx, fromNss, toNss, stayTemp));
ASSERT_FALSE(db->getCollection(opCtx, fromNss));
auto toCollection = db->getCollection(opCtx, toNss);
@@ -428,7 +428,7 @@ TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandom
// collections in the database for collisions while generating the namespace.
{
WriteUnitOfWork wuow(_opCtx.get());
- ASSERT_TRUE(db->createCollection(_opCtx.get(), nss1.ns()));
+ ASSERT_TRUE(db->createCollection(_opCtx.get(), nss1));
wuow.commit();
}
@@ -465,7 +465,7 @@ TEST_F(
for (const auto c : charsToChooseFrom) {
NamespaceString nss(_nss.db(), model.substr(0, model.find('%')) + std::string(1U, c));
WriteUnitOfWork wuow(_opCtx.get());
- ASSERT_TRUE(db->createCollection(_opCtx.get(), nss.ns()));
+ ASSERT_TRUE(db->createCollection(_opCtx.get(), nss));
wuow.commit();
}
@@ -544,7 +544,7 @@ TEST_F(DatabaseTest, CreateCollectionProhibitsReplicatedCollectionsWithoutIdInde
options.setNoIdIndex();
ASSERT_THROWS_CODE_AND_WHAT(
- db->createCollection(_opCtx.get(), _nss.ns(), options),
+ db->createCollection(_opCtx.get(), _nss, options),
AssertionException,
50001,
(StringBuilder() << "autoIndexId:false is not allowed for collection " << _nss
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index b1d76ca11b4..d48064b8ffc 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -132,7 +132,7 @@ Status _dropCollection(OperationContext* opCtx,
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(coll->uuid().get());
Status status =
systemCollectionMode == DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops
- ? db->dropCollection(opCtx, collectionName.ns(), dropOpTime)
+ ? db->dropCollection(opCtx, collectionName, dropOpTime)
: db->dropCollectionEvenIfSystem(opCtx, collectionName, dropOpTime);
if (!status.isOK()) {
diff --git a/src/mongo/db/catalog/drop_database_test.cpp b/src/mongo/db/catalog/drop_database_test.cpp
index 26ada8ef871..d7f13c67e90 100644
--- a/src/mongo/db/catalog/drop_database_test.cpp
+++ b/src/mongo/db/catalog/drop_database_test.cpp
@@ -186,7 +186,7 @@ void _createCollection(OperationContext* opCtx, const NamespaceString& nss) {
ASSERT_TRUE(db);
WriteUnitOfWork wuow(opCtx);
- ASSERT_TRUE(db->createCollection(opCtx, nss.ns()));
+ ASSERT_TRUE(db->createCollection(opCtx, nss));
wuow.commit();
});
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index f946ebf7c63..3c26997d12e 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -167,7 +167,7 @@ IndexCatalogEntry* IndexCatalogImpl::_setupInMemoryStructures(
KVStorageEngine* engine =
checked_cast<KVStorageEngine*>(opCtx->getServiceContext()->getStorageEngine());
std::string ident =
- engine->getCatalog()->getIndexIdent(opCtx, _collection->ns().ns(), desc->indexName());
+ engine->getCatalog()->getIndexIdent(opCtx, _collection->ns(), desc->indexName());
SortedDataInterface* sdi =
engine->getEngine()->getGroupedSortedDataInterface(opCtx, ident, desc, entry->getPrefix());
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 89e99ac6cd4..e005942cb62 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -103,7 +103,7 @@ Status renameTargetCollectionToTmp(OperationContext* opCtx,
const bool stayTemp = true;
return writeConflictRetry(opCtx, "renameCollection", targetNs.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
- auto status = targetDB->renameCollection(opCtx, targetNs.ns(), tmpName.ns(), stayTemp);
+ auto status = targetDB->renameCollection(opCtx, targetNs, tmpName, stayTemp);
if (!status.isOK())
return status;
@@ -118,8 +118,8 @@ Status renameTargetCollectionToTmp(OperationContext* opCtx,
}
Status renameCollectionCommon(OperationContext* opCtx,
- const NamespaceString& source,
- const NamespaceString& target,
+ NamespaceString source, // Don't use a ref, as it's going to change.
+ NamespaceString target,
OptionalCollectionUUID targetUUID,
repl::OpTime renameOpTimeFromApplyOps,
const RenameCollectionOptions& options) {
@@ -272,7 +272,7 @@ Status renameCollectionCommon(OperationContext* opCtx,
{
// No logOp necessary because the entire renameCollection command is one logOp.
repl::UnreplicatedWritesBlock uwb(opCtx);
- status = targetDB->renameCollection(opCtx, source.ns(), target.ns(), stayTemp);
+ status = targetDB->renameCollection(opCtx, source, target, stayTemp);
if (!status.isOK()) {
return status;
}
@@ -360,12 +360,12 @@ Status renameCollectionCommon(OperationContext* opCtx,
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
targetColl->uuid().get());
- status = targetDB->dropCollection(opCtx, targetColl->ns().ns(), renameOpTime);
+ status = targetDB->dropCollection(opCtx, targetColl->ns(), renameOpTime);
if (!status.isOK()) {
return status;
}
- status = targetDB->renameCollection(opCtx, source.ns(), target.ns(), options.stayTemp);
+ status = targetDB->renameCollection(opCtx, source, target, options.stayTemp);
if (!status.isOK()) {
return status;
}
@@ -415,7 +415,7 @@ Status renameCollectionCommon(OperationContext* opCtx,
writeConflictRetry(opCtx, "renameCollection", tmpName.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
- tmpColl = targetDB->createCollection(opCtx, tmpName.ns(), collectionOptions);
+ tmpColl = targetDB->createCollection(opCtx, tmpName, collectionOptions);
wunit.commit();
});
}
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index a3a6a9a46cc..5a0354f94bd 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -329,8 +329,8 @@ void _createCollection(OperationContext* opCtx,
<< " does not exist.";
WriteUnitOfWork wuow(opCtx);
- ASSERT_TRUE(db->createCollection(opCtx, nss.ns(), options))
- << "Failed to create collection " << nss << " due to unknown error.";
+ ASSERT_TRUE(db->createCollection(opCtx, nss, options)) << "Failed to create collection "
+ << nss << " due to unknown error.";
wuow.commit();
});
@@ -459,7 +459,7 @@ Collection* _getCollection_inlock(OperationContext* opCtx, const NamespaceString
if (!db) {
return nullptr;
}
- return db->getCollection(opCtx, nss.ns());
+ return db->getCollection(opCtx, nss);
}
TEST_F(RenameCollectionTest, RenameCollectionReturnsNamespaceNotFoundIfDatabaseDoesNotExist) {
diff --git a/src/mongo/db/catalog/uuid_catalog.cpp b/src/mongo/db/catalog/uuid_catalog.cpp
index f45f505ecaa..fb48f34fbc8 100644
--- a/src/mongo/db/catalog/uuid_catalog.cpp
+++ b/src/mongo/db/catalog/uuid_catalog.cpp
@@ -510,7 +510,7 @@ std::unique_ptr<Collection> UUIDCatalog::deregisterCollectionObject(CollectionUU
LOG(0) << "Deregistering collection object " << ns << " with UUID " << uuid;
- // Make sure collection object eixsts.
+ // Make sure collection object exists.
invariant(_collections.find(ns) != _collections.end());
invariant(_orderedCollections.find(dbIdPair) != _orderedCollections.end());
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index b52ef891603..75b1e8d8d60 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -595,7 +595,7 @@ Status Cloner::createCollectionsForDb(
opCtx->checkForInterrupt();
WriteUnitOfWork wunit(opCtx);
- Collection* collection = db->getCollection(opCtx, nss.ns());
+ Collection* collection = db->getCollection(opCtx, nss);
if (collection) {
if (!params.shardedColl) {
// If the collection is unsharded then we want to fail when a collection
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index da34fa509ad..c23ff7f44fc 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -320,7 +320,7 @@ bool runCreateIndexes(OperationContext* opCtx,
writeConflictRetry(opCtx, kCommandName, ns.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
- collection = db->createCollection(opCtx, ns.ns(), CollectionOptions());
+ collection = db->createCollection(opCtx, ns, CollectionOptions());
invariant(collection);
wunit.commit();
});
@@ -561,7 +561,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
options.uuid = collectionUUID;
writeConflictRetry(opCtx, kCommandName, ns.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
- collection = db->createCollection(opCtx, ns.ns(), options);
+ collection = db->createCollection(opCtx, ns, options);
invariant(collection,
str::stream() << "Failed to create collection " << ns.ns()
<< " during index creation: "
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index db939ea91b6..20e14103f79 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -262,7 +262,7 @@ public:
}
// Compute the hash for this collection.
- std::string hash = _hashCollection(opCtx, db, collNss.toString());
+ std::string hash = _hashCollection(opCtx, db, collNss);
collectionToHashMap[collNss.coll().toString()] = hash;
@@ -308,13 +308,9 @@ public:
}
private:
- std::string _hashCollection(OperationContext* opCtx,
- Database* db,
- const std::string& fullCollectionName) {
+ std::string _hashCollection(OperationContext* opCtx, Database* db, const NamespaceString& nss) {
- NamespaceString ns(fullCollectionName);
-
- Collection* collection = db->getCollection(opCtx, ns);
+ Collection* collection = db->getCollection(opCtx, nss);
invariant(collection);
boost::optional<Lock::CollectionLock> collLock;
@@ -324,7 +320,7 @@ private:
// intent mode. We need to also acquire the collection lock in intent mode to ensure
// reading from the consistent snapshot doesn't overlap with any catalog operations on
// the collection.
- invariant(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_IS));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IS));
auto minSnapshot = collection->getMinimumVisibleSnapshot();
auto mySnapshot = opCtx->recoveryUnit()->getPointInTimeReadTimestamp();
@@ -357,9 +353,9 @@ private:
InternalPlanner::IXSCAN_FETCH);
} else if (collection->isCapped()) {
exec = InternalPlanner::collectionScan(
- opCtx, fullCollectionName, collection, PlanExecutor::NO_YIELD);
+ opCtx, nss.ns(), collection, PlanExecutor::NO_YIELD);
} else {
- log() << "can't find _id index for: " << fullCollectionName;
+ log() << "can't find _id index for: " << nss;
return "no _id _index";
}
@@ -375,7 +371,7 @@ private:
n++;
}
if (PlanExecutor::IS_EOF != state) {
- warning() << "error while hashing, db dropped? ns=" << fullCollectionName;
+ warning() << "error while hashing, db dropped? ns=" << nss;
uasserted(34371,
"Plan executor error while running dbHash command: " +
WorkingSetCommon::toStatusString(c));
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 1327d5fe560..083a682cbab 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -188,7 +188,7 @@ void dropTempCollections(OperationContext* cleanupOpCtx,
IndexBuildsCoordinator::get(cleanupOpCtx)
->assertNoIndexBuildInProgForCollection(collection->uuid().get());
WriteUnitOfWork wunit(cleanupOpCtx);
- uassertStatusOK(db->dropCollection(cleanupOpCtx, tempNamespace.ns()));
+ uassertStatusOK(db->dropCollection(cleanupOpCtx, tempNamespace));
wunit.commit();
}
}
@@ -207,7 +207,7 @@ void dropTempCollections(OperationContext* cleanupOpCtx,
IndexBuildsCoordinator::get(cleanupOpCtx)
->assertNoIndexBuildInProgForCollection(collection->uuid().get());
WriteUnitOfWork wunit(cleanupOpCtx);
- uassertStatusOK(db->dropCollection(cleanupOpCtx, incLong.ns()));
+ uassertStatusOK(db->dropCollection(cleanupOpCtx, incLong));
wunit.commit();
}
}
@@ -517,7 +517,7 @@ void State::prepTempCollection() {
WriteUnitOfWork wuow(_opCtx);
auto incColl = db->createCollection(
- _opCtx, _config.incLong.ns(), options, false /* force no _id index */);
+ _opCtx, _config.incLong, options, false /* force no _id index */);
auto rawIndexSpec =
BSON("key" << BSON("0" << 1) << "ns" << _config.incLong.ns() << "name"
@@ -597,7 +597,7 @@ void State::prepTempCollection() {
WriteUnitOfWork wuow(_opCtx);
auto const tempColl =
- db->createCollection(_opCtx, _config.tempNamespace.ns(), options, buildIdIndex);
+ db->createCollection(_opCtx, _config.tempNamespace, options, buildIdIndex);
for (const auto& indexToInsert : indexesToInsert) {
try {
@@ -811,7 +811,7 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
uassert(
ErrorCodes::PrimarySteppedDown,
str::stream() << "no longer primary while inserting mapReduce result into collection: "
- << nss.ns()
+ << nss
<< ": "
<< redact(o),
repl::ReplicationCoordinator::get(_opCtx)->canAcceptWritesFor(_opCtx, nss));
@@ -1443,7 +1443,7 @@ public:
State state(opCtx, config);
if (!state.sourceExists()) {
uasserted(ErrorCodes::NamespaceNotFound,
- str::stream() << "namespace does not exist: " << config.nss.ns());
+ str::stream() << "namespace does not exist: " << config.nss);
}
state.init();
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 29a3be76ea2..1410bb5a3bc 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -91,7 +91,7 @@ public:
UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
Collection* collection = db->getCollection(opCtx, nss);
if (!collection) {
- collection = db->createCollection(opCtx, nss.ns());
+ collection = db->createCollection(opCtx, nss);
if (!collection) {
errmsg = "could not create collection";
return false;
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index ea7280d114e..5a4abebd032 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -138,7 +138,7 @@ bool Helpers::findById(OperationContext* opCtx,
bool* indexFound) {
invariant(database);
- Collection* collection = database->getCollection(opCtx, ns);
+ Collection* collection = database->getCollection(opCtx, NamespaceString(ns));
if (!collection) {
return false;
}
diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp
index df3ef2ebfb2..5d5048945a1 100644
--- a/src/mongo/db/index_build_entry_helpers.cpp
+++ b/src/mongo/db/index_build_entry_helpers.cpp
@@ -108,7 +108,7 @@ void ensureIndexBuildEntriesNamespaceExists(OperationContext* opCtx) {
WriteUnitOfWork wuow(opCtx);
CollectionOptions options;
Collection* collection = db->createCollection(
- opCtx, NamespaceString::kIndexBuildEntryNamespace.ns(), options);
+ opCtx, NamespaceString::kIndexBuildEntryNamespace, options);
// Ensure the collection exists.
invariant(collection);
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index a654feee30a..a82e67a0e6c 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -250,7 +250,7 @@ Status createProfileCollection(OperationContext* opCtx, Database* db) {
WriteUnitOfWork wunit(opCtx);
repl::UnreplicatedWritesBlock uwb(opCtx);
- invariant(db->createCollection(opCtx, dbProfilingNS.ns(), collectionOptions));
+ invariant(db->createCollection(opCtx, dbProfilingNS, collectionOptions));
wunit.commit();
return Status::OK();
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index d8426660df1..0b5cdaaac89 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -795,7 +795,7 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx,
if (!db) {
return;
}
- Collection* coll = db->getCollection(opCtx, nss.ns());
+ Collection* coll = db->getCollection(opCtx, nss);
invariant(coll->uuid());
invariant(coll->uuid() == uuid);
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 84f7e31e565..e42c6786f2a 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -81,11 +81,11 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
if (userInitiatedWritesAndNotPrimary) {
uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while creating collection "
- << nsString.ns()
+ << nsString
<< " during upsert"));
}
WriteUnitOfWork wuow(opCtx);
- collection = db->createCollection(opCtx, nsString.ns(), CollectionOptions());
+ collection = db->createCollection(opCtx, nsString, CollectionOptions());
invariant(collection);
wuow.commit();
});
diff --git a/src/mongo/db/pipeline/process_interface_standalone.cpp b/src/mongo/db/pipeline/process_interface_standalone.cpp
index 0a60ce26907..a0d787794f4 100644
--- a/src/mongo/db/pipeline/process_interface_standalone.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone.cpp
@@ -288,7 +288,7 @@ void MongoInterfaceStandalone::appendLatencyStats(OperationContext* opCtx,
const NamespaceString& nss,
bool includeHistograms,
BSONObjBuilder* builder) const {
- Top::get(opCtx->getServiceContext()).appendLatencyStats(nss.ns(), includeHistograms, builder);
+ Top::get(opCtx->getServiceContext()).appendLatencyStats(nss, includeHistograms, builder);
}
Status MongoInterfaceStandalone::appendStorageStats(OperationContext* opCtx,
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 635d9e7a2bb..86d52397346 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -149,7 +149,7 @@ Status repairCollections(OperationContext* opCtx,
log() << "Repairing collection " << nss;
- Status status = engine->repairRecordStore(opCtx, nss.ns());
+ Status status = engine->repairRecordStore(opCtx, nss);
if (!status.isOK())
return status;
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 7041bddc2bf..1a7e90075e0 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -703,14 +703,16 @@ long long getNewOplogSizeBytes(OperationContext* opCtx, const ReplSettings& repl
} // namespace
-void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName, bool isReplSet) {
+void createOplog(OperationContext* opCtx,
+ const NamespaceString& oplogCollectionName,
+ bool isReplSet) {
Lock::GlobalWrite lk(opCtx);
const auto service = opCtx->getServiceContext();
const ReplSettings& replSettings = ReplicationCoordinator::get(opCtx)->getSettings();
- OldClientContext ctx(opCtx, oplogCollectionName);
+ OldClientContext ctx(opCtx, oplogCollectionName.ns());
Collection* collection = ctx.db()->getCollection(opCtx, oplogCollectionName);
if (collection) {
@@ -745,7 +747,7 @@ void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName
options.cappedSize = sz;
options.autoIndexId = CollectionOptions::NO;
- writeConflictRetry(opCtx, "createCollection", oplogCollectionName, [&] {
+ writeConflictRetry(opCtx, "createCollection", oplogCollectionName.ns(), [&] {
WriteUnitOfWork uow(opCtx);
invariant(ctx.db()->createCollection(opCtx, oplogCollectionName, options));
acquireOplogCollectionForLogging(opCtx);
@@ -765,7 +767,7 @@ void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName
void createOplog(OperationContext* opCtx) {
const auto isReplSet = ReplicationCoordinator::get(opCtx)->getReplicationMode() ==
ReplicationCoordinator::modeReplSet;
- createOplog(opCtx, LocalOplogInfo::get(opCtx)->getOplogCollectionName().ns(), isReplSet);
+ createOplog(opCtx, LocalOplogInfo::get(opCtx)->getOplogCollectionName(), isReplSet);
}
std::vector<OplogSlot> getNextOpTimes(OperationContext* opCtx, std::size_t count) {
@@ -1987,10 +1989,11 @@ void setNewTimestamp(ServiceContext* service, const Timestamp& newTime) {
LocalOplogInfo::get(service)->setNewTimestamp(service, newTime);
}
-void initTimestampFromOplog(OperationContext* opCtx, const std::string& oplogNS) {
+void initTimestampFromOplog(OperationContext* opCtx, const NamespaceString& oplogNss) {
DBDirectClient c(opCtx);
static const BSONObj reverseNaturalObj = BSON("$natural" << -1);
- BSONObj lastOp = c.findOne(oplogNS, Query().sort(reverseNaturalObj), NULL, QueryOption_SlaveOk);
+ BSONObj lastOp =
+ c.findOne(oplogNss.ns(), Query().sort(reverseNaturalObj), NULL, QueryOption_SlaveOk);
if (!lastOp.isEmpty()) {
LOG(1) << "replSet setting last Timestamp";
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index a586510e0b5..926665fb76f 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -84,7 +84,9 @@ struct OplogLink {
* If the collection already exists (and isReplSet is false),
* set the 'last' Timestamp from the last entry of the oplog collection (side effect!)
*/
-void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName, bool isReplSet);
+void createOplog(OperationContext* opCtx,
+ const NamespaceString& oplogCollectionName,
+ bool isReplSet);
/*
* Shortcut for above function using oplogCollectionName = _oplogCollectionName,
@@ -232,7 +234,7 @@ Status applyCommand_inlock(OperationContext* opCtx,
/**
* Initializes the global Timestamp with the value from the timestamp of the last oplog entry.
*/
-void initTimestampFromOplog(OperationContext* opCtx, const std::string& oplogNS);
+void initTimestampFromOplog(OperationContext* opCtx, const NamespaceString& oplogNS);
/**
* Sets the global Timestamp to be 'newTime'.
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index e5548d8ad33..a2de3b5b0b6 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -195,8 +195,8 @@ Collection* RollbackTest::_createCollection(OperationContext* opCtx,
auto databaseHolder = DatabaseHolder::get(opCtx);
auto db = databaseHolder->openDb(opCtx, nss.db());
ASSERT_TRUE(db);
- db->dropCollection(opCtx, nss.ns()).transitional_ignore();
- auto coll = db->createCollection(opCtx, nss.ns(), options);
+ db->dropCollection(opCtx, nss).transitional_ignore();
+ auto coll = db->createCollection(opCtx, nss, options);
ASSERT_TRUE(coll);
wuow.commit();
return coll;
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index abe2ba25a3a..d072c164ca1 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -345,7 +345,7 @@ int _testRollbackDelete(OperationContext* opCtx,
auto databaseHolder = DatabaseHolder::get(opCtx);
auto db = databaseHolder->getDb(opCtx, "test");
ASSERT_TRUE(db);
- auto collection = db->getCollection(opCtx, "test.t");
+ auto collection = db->getCollection(opCtx, NamespaceString("test.t"));
if (!collection) {
return -1;
}
@@ -1572,9 +1572,10 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
{
AutoGetOrCreateDb autoDb(_opCtx.get(), "test", MODE_X);
mongo::WriteUnitOfWork wuow(_opCtx.get());
- coll = autoDb.getDb()->getCollection(_opCtx.get(), "test.t");
+ coll = autoDb.getDb()->getCollection(_opCtx.get(), NamespaceString("test.t"));
if (!coll) {
- coll = autoDb.getDb()->createCollection(_opCtx.get(), "test.t", options);
+ coll =
+ autoDb.getDb()->createCollection(_opCtx.get(), NamespaceString("test.t"), options);
}
ASSERT(coll);
OpDebug* const nullOpDebug = nullptr;
@@ -1765,7 +1766,7 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
auto databaseHolder = DatabaseHolder::get(_opCtx.get());
auto db = databaseHolder->getDb(_opCtx.get(), "test");
ASSERT_TRUE(db);
- ASSERT_FALSE(db->getCollection(_opCtx.get(), "test.t"));
+ ASSERT_FALSE(db->getCollection(_opCtx.get(), NamespaceString("test.t")));
}
}
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 34128f17e80..44c2904ba1d 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -230,7 +230,7 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
{
// Create the collection.
WriteUnitOfWork wunit(opCtx.get());
- fassert(40332, db.getDb()->createCollection(opCtx.get(), nss.ns(), options, false));
+ fassert(40332, db.getDb()->createCollection(opCtx.get(), nss, options, false));
wunit.commit();
}
@@ -411,7 +411,7 @@ Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* opCtx) {
}
Status StorageInterfaceImpl::createOplog(OperationContext* opCtx, const NamespaceString& nss) {
- mongo::repl::createOplog(opCtx, nss.ns(), true);
+ mongo::repl::createOplog(opCtx, nss, true);
return Status::OK();
}
@@ -444,7 +444,7 @@ Status StorageInterfaceImpl::createCollection(OperationContext* opCtx,
}
WriteUnitOfWork wuow(opCtx);
try {
- auto coll = db->createCollection(opCtx, nss.ns(), options);
+ auto coll = db->createCollection(opCtx, nss, options);
invariant(coll);
} catch (const AssertionException& ex) {
return ex.toStatus();
@@ -516,8 +516,7 @@ Status StorageInterfaceImpl::renameCollection(OperationContext* opCtx,
<< " not found.");
}
WriteUnitOfWork wunit(opCtx);
- const auto status =
- autoDB.getDb()->renameCollection(opCtx, fromNS.ns(), toNS.ns(), stayTemp);
+ const auto status = autoDB.getDb()->renameCollection(opCtx, fromNS, toNS, stayTemp);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index b2c58004a5c..ecfed99d14d 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -122,7 +122,7 @@ void createCollection(OperationContext* opCtx,
auto db = ctx.db();
ASSERT_TRUE(db);
mongo::WriteUnitOfWork wuow(opCtx);
- auto coll = db->createCollection(opCtx, nss.ns(), options);
+ auto coll = db->createCollection(opCtx, nss, options);
ASSERT_TRUE(coll);
wuow.commit();
});
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 485aee900c8..befd6b94771 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -488,23 +488,24 @@ public:
return it->second;
}
- auto collProperties = getCollectionPropertiesImpl(opCtx, ns.key());
+ auto collProperties = getCollectionPropertiesImpl(opCtx, NamespaceString(ns.key()));
_cache[ns] = collProperties;
return collProperties;
}
private:
- CollectionProperties getCollectionPropertiesImpl(OperationContext* opCtx, StringData ns) {
+ CollectionProperties getCollectionPropertiesImpl(OperationContext* opCtx,
+ const NamespaceString& nss) {
CollectionProperties collProperties;
- Lock::DBLock dbLock(opCtx, nsToDatabaseSubstring(ns), MODE_IS);
+ Lock::DBLock dbLock(opCtx, nss.db(), MODE_IS);
auto databaseHolder = DatabaseHolder::get(opCtx);
- auto db = databaseHolder->getDb(opCtx, ns);
+ auto db = databaseHolder->getDb(opCtx, nss.db());
if (!db) {
return collProperties;
}
- auto collection = db->getCollection(opCtx, ns);
+ auto collection = db->getCollection(opCtx, nss);
if (!collection) {
return collProperties;
}
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index cb3fc8011b3..9151c223afa 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -187,7 +187,7 @@ void createCollection(OperationContext* opCtx,
auto db = ctx.db();
ASSERT_TRUE(db);
mongo::WriteUnitOfWork wuow(opCtx);
- auto coll = db->createCollection(opCtx, nss.ns(), options);
+ auto coll = db->createCollection(opCtx, nss, options);
ASSERT_TRUE(coll);
wuow.commit();
});
diff --git a/src/mongo/db/stats/top.cpp b/src/mongo/db/stats/top.cpp
index 1043a3401be..e9fea54b0cc 100644
--- a/src/mongo/db/stats/top.cpp
+++ b/src/mongo/db/stats/top.cpp
@@ -140,14 +140,14 @@ void Top::_record(OperationContext* opCtx,
}
}
-void Top::collectionDropped(StringData ns, bool databaseDropped) {
+void Top::collectionDropped(const NamespaceString& nss, bool databaseDropped) {
stdx::lock_guard<SimpleMutex> lk(_lock);
- _usage.erase(ns);
+ _usage.erase(nss.ns());
if (!databaseDropped) {
// If a collection drop occurred, there will be a subsequent call to record for this
// collection namespace which must be ignored. This does not apply to a database drop.
- _collDropNs.insert(ns.toString());
+ _collDropNs.insert(nss.toString());
}
}
@@ -199,12 +199,14 @@ void Top::_appendStatsEntry(BSONObjBuilder& b, const char* statsName, const Usag
bb.done();
}
-void Top::appendLatencyStats(StringData ns, bool includeHistograms, BSONObjBuilder* builder) {
- auto hashedNs = UsageMap::hasher().hashed_key(ns);
+void Top::appendLatencyStats(const NamespaceString& nss,
+ bool includeHistograms,
+ BSONObjBuilder* builder) {
+ auto hashedNs = UsageMap::hasher().hashed_key(nss.ns());
stdx::lock_guard<SimpleMutex> lk(_lock);
BSONObjBuilder latencyStatsBuilder;
_usage[hashedNs].opLatencyHistogram.append(includeHistograms, &latencyStatsBuilder);
- builder->append("ns", ns);
+ builder->append("ns", nss.ns());
builder->append("latencyStats", latencyStatsBuilder.obj());
}
diff --git a/src/mongo/db/stats/top.h b/src/mongo/db/stats/top.h
index f1586a2b05e..2fcbb620f6c 100644
--- a/src/mongo/db/stats/top.h
+++ b/src/mongo/db/stats/top.h
@@ -108,12 +108,14 @@ public:
void cloneMap(UsageMap& out) const;
- void collectionDropped(StringData ns, bool databaseDropped = false);
+ void collectionDropped(const NamespaceString& nss, bool databaseDropped = false);
/**
* Appends the collection-level latency statistics
*/
- void appendLatencyStats(StringData ns, bool includeHistograms, BSONObjBuilder* builder);
+ void appendLatencyStats(const NamespaceString& nss,
+ bool includeHistograms,
+ BSONObjBuilder* builder);
/**
* Increments the global histogram only if the operation came from a user.
diff --git a/src/mongo/db/stats/top_test.cpp b/src/mongo/db/stats/top_test.cpp
index 2125506625b..4c2a459d44a 100644
--- a/src/mongo/db/stats/top_test.cpp
+++ b/src/mongo/db/stats/top_test.cpp
@@ -37,7 +37,7 @@ namespace {
using namespace mongo;
TEST(TopTest, CollectionDropped) {
- Top().collectionDropped("coll");
+ Top().collectionDropped(NamespaceString("test.coll"));
}
} // namespace
diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp
index 534a83c9e40..69b39c30192 100644
--- a/src/mongo/db/storage/kv/kv_catalog.cpp
+++ b/src/mongo/db/storage/kv/kv_catalog.cpp
@@ -361,11 +361,11 @@ std::string KVCatalog::getFilesystemPathForDb(const std::string& dbName) const {
}
}
-std::string KVCatalog::_newUniqueIdent(StringData ns, const char* kind) {
+std::string KVCatalog::_newUniqueIdent(const NamespaceString& nss, const char* kind) {
// If this changes to not put _rand at the end, _hasEntryCollidingWithRand will need fixing.
StringBuilder buf;
if (_directoryPerDb) {
- buf << escapeDbName(nsToDatabaseSubstring(ns)) << '/';
+ buf << escapeDbName(nss.db()) << '/';
}
buf << kind;
buf << (_directoryForIndexes ? '/' : '-');
@@ -407,11 +407,13 @@ void KVCatalog::init(OperationContext* opCtx) {
}
}
-void KVCatalog::getAllCollections(std::vector<std::string>* out) const {
+std::vector<NamespaceString> KVCatalog::getAllCollections() const {
stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ std::vector<NamespaceString> result;
for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
- out->push_back(it->first);
+ result.push_back(NamespaceString(it->first));
}
+ return result;
}
Status KVCatalog::_addEntry(OperationContext* opCtx,
@@ -420,7 +422,7 @@ Status KVCatalog::_addEntry(OperationContext* opCtx,
KVPrefix prefix) {
invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_IX));
- const string ident = _newUniqueIdent(nss.ns(), "collection");
+ const string ident = _newUniqueIdent(nss, "collection");
stdx::lock_guard<stdx::mutex> lk(_identsLock);
Entry& old = _idents[nss.toString()];
@@ -451,31 +453,33 @@ Status KVCatalog::_addEntry(OperationContext* opCtx,
return Status::OK();
}
-std::string KVCatalog::getCollectionIdent(StringData ns) const {
+std::string KVCatalog::getCollectionIdent(const NamespaceString& nss) const {
stdx::lock_guard<stdx::mutex> lk(_identsLock);
- NSToIdentMap::const_iterator it = _idents.find(ns.toString());
+ NSToIdentMap::const_iterator it = _idents.find(nss.toString());
invariant(it != _idents.end());
return it->second.ident;
}
std::string KVCatalog::getIndexIdent(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
StringData idxName) const {
- BSONObj obj = _findEntry(opCtx, ns);
+ BSONObj obj = _findEntry(opCtx, nss);
BSONObj idxIdent = obj["idxIdent"].Obj();
return idxIdent[idxName].String();
}
-BSONObj KVCatalog::_findEntry(OperationContext* opCtx, StringData ns, RecordId* out) const {
+BSONObj KVCatalog::_findEntry(OperationContext* opCtx,
+ const NamespaceString& nss,
+ RecordId* out) const {
RecordId dl;
{
stdx::lock_guard<stdx::mutex> lk(_identsLock);
- NSToIdentMap::const_iterator it = _idents.find(ns.toString());
- invariant(it != _idents.end(), str::stream() << "Did not find collection. Ns: " << ns);
+ NSToIdentMap::const_iterator it = _idents.find(nss.toString());
+ invariant(it != _idents.end(), str::stream() << "Did not find collection. Ns: " << nss);
dl = it->second.storedLoc;
}
- LOG(3) << "looking up metadata for: " << ns << " @ " << dl;
+ LOG(3) << "looking up metadata for: " << nss << " @ " << dl;
RecordData data;
if (!_rs->findRecord(opCtx, dl, &data)) {
// since the in memory meta data isn't managed with mvcc
@@ -491,8 +495,8 @@ BSONObj KVCatalog::_findEntry(OperationContext* opCtx, StringData ns, RecordId*
}
BSONCollectionCatalogEntry::MetaData KVCatalog::getMetaData(OperationContext* opCtx,
- StringData ns) const {
- BSONObj obj = _findEntry(opCtx, ns);
+ const NamespaceString& nss) const {
+ BSONObj obj = _findEntry(opCtx, nss);
LOG(3) << " fetched CCE metadata: " << obj;
BSONCollectionCatalogEntry::MetaData md;
const BSONElement mdElement = obj["md"];
@@ -504,10 +508,10 @@ BSONCollectionCatalogEntry::MetaData KVCatalog::getMetaData(OperationContext* op
}
void KVCatalog::putMetaData(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
BSONCollectionCatalogEntry::MetaData& md) {
RecordId loc;
- BSONObj obj = _findEntry(opCtx, ns, &loc);
+ BSONObj obj = _findEntry(opCtx, nss, &loc);
{
// rebuilt doc
@@ -528,7 +532,7 @@ void KVCatalog::putMetaData(OperationContext* opCtx,
continue;
}
// missing, create new
- newIdentMap.append(name, _newUniqueIdent(ns, "index"));
+ newIdentMap.append(name, _newUniqueIdent(nss, "index"));
}
b.append("idxIdent", newIdentMap.obj());
@@ -543,19 +547,19 @@ void KVCatalog::putMetaData(OperationContext* opCtx,
}
Status KVCatalog::_replaceEntry(OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
+ const NamespaceString& fromNss,
+ const NamespaceString& toNss,
bool stayTemp) {
RecordId loc;
- BSONObj old = _findEntry(opCtx, fromNS, &loc).getOwned();
+ BSONObj old = _findEntry(opCtx, fromNss, &loc).getOwned();
{
BSONObjBuilder b;
- b.append("ns", toNS);
+ b.append("ns", toNss.ns());
BSONCollectionCatalogEntry::MetaData md;
md.parse(old["md"].Obj());
- md.rename(toNS);
+ md.rename(toNss.ns());
if (!stayTemp)
md.options.temp = false;
b.append("md", md.toBSON());
@@ -568,29 +572,30 @@ Status KVCatalog::_replaceEntry(OperationContext* opCtx,
}
stdx::lock_guard<stdx::mutex> lk(_identsLock);
- const NSToIdentMap::iterator fromIt = _idents.find(fromNS.toString());
+ const NSToIdentMap::iterator fromIt = _idents.find(fromNss.toString());
invariant(fromIt != _idents.end());
- opCtx->recoveryUnit()->registerChange(new RemoveIdentChange(this, fromNS, fromIt->second));
- opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, toNS));
+ opCtx->recoveryUnit()->registerChange(
+ new RemoveIdentChange(this, fromNss.ns(), fromIt->second));
+ opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, toNss.ns()));
_idents.erase(fromIt);
- _idents[toNS.toString()] = Entry(old["ident"].String(), loc);
+ _idents[toNss.toString()] = Entry(old["ident"].String(), loc);
return Status::OK();
}
-Status KVCatalog::_removeEntry(OperationContext* opCtx, StringData ns) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(NamespaceString(ns), MODE_X));
+Status KVCatalog::_removeEntry(OperationContext* opCtx, const NamespaceString& nss) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
stdx::lock_guard<stdx::mutex> lk(_identsLock);
- const NSToIdentMap::iterator it = _idents.find(ns.toString());
+ const NSToIdentMap::iterator it = _idents.find(nss.toString());
if (it == _idents.end()) {
return Status(ErrorCodes::NamespaceNotFound, "collection not found");
}
- opCtx->recoveryUnit()->registerChange(new RemoveIdentChange(this, ns, it->second));
+ opCtx->recoveryUnit()->registerChange(new RemoveIdentChange(this, nss.ns(), it->second));
- LOG(1) << "deleting metadata for " << ns << " @ " << it->second.storedLoc;
+ LOG(1) << "deleting metadata for " << nss << " @ " << it->second.storedLoc;
_rs->deleteRecord(opCtx, it->second.storedLoc);
_idents.erase(it);
@@ -702,14 +707,16 @@ StatusWith<std::string> KVCatalog::newOrphanedIdent(OperationContext* opCtx, std
return StatusWith<std::string>(std::move(ns));
}
-void KVCatalog::initCollection(OperationContext* opCtx, const std::string& ns, bool forRepair) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, ns);
+void KVCatalog::initCollection(OperationContext* opCtx,
+ const NamespaceString& nss,
+ bool forRepair) {
+ BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, nss);
uassert(ErrorCodes::MustDowngrade,
- str::stream() << "Collection does not have UUID in KVCatalog. Collection: " << ns,
+ str::stream() << "Collection does not have UUID in KVCatalog. Collection: " << nss,
md.options.uuid);
auto uuid = md.options.uuid.get();
- auto ident = getCollectionIdent(ns);
+ auto ident = getCollectionIdent(nss);
std::unique_ptr<RecordStore> rs;
if (forRepair) {
@@ -717,21 +724,21 @@ void KVCatalog::initCollection(OperationContext* opCtx, const std::string& ns, b
// repaired. This also ensures that if we try to use it, it will blow up.
rs = nullptr;
} else {
- rs = _engine->getEngine()->getGroupedRecordStore(opCtx, ns, ident, md.options, md.prefix);
+ rs = _engine->getEngine()->getGroupedRecordStore(
+ opCtx, nss.ns(), ident, md.options, md.prefix);
invariant(rs);
}
UUIDCatalog::get(getGlobalServiceContext())
- .registerCatalogEntry(
- uuid,
- std::make_unique<KVCollectionCatalogEntry>(_engine, this, ns, ident, std::move(rs)));
+ .registerCatalogEntry(uuid,
+ std::make_unique<KVCollectionCatalogEntry>(
+ _engine, this, nss.ns(), ident, std::move(rs)));
}
-void KVCatalog::reinitCollectionAfterRepair(OperationContext* opCtx, const std::string& ns) {
- auto nss = NamespaceString(ns);
+void KVCatalog::reinitCollectionAfterRepair(OperationContext* opCtx, const NamespaceString& nss) {
auto& uuidCatalog = UUIDCatalog::get(getGlobalServiceContext());
uuidCatalog.deregisterCatalogEntry(uuidCatalog.lookupUUIDByNSS(nss).get());
- initCollection(opCtx, ns, false);
+ initCollection(opCtx, nss, false);
}
Status KVCatalog::createCollection(OperationContext* opCtx,
@@ -753,7 +760,7 @@ Status KVCatalog::createCollection(OperationContext* opCtx,
if (!status.isOK())
return status;
- std::string ident = getCollectionIdent(nss.ns());
+ std::string ident = getCollectionIdent(nss);
status =
_engine->getEngine()->createGroupedRecordStore(opCtx, nss.ns(), ident, options, prefix);
@@ -788,27 +795,26 @@ Status KVCatalog::createCollection(OperationContext* opCtx,
}
Status KVCatalog::renameCollection(OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
+ const NamespaceString& fromNss,
+ const NamespaceString& toNss,
bool stayTemp) {
- const NamespaceString fromNss(fromNS);
- const NamespaceString toNss(toNS);
// TODO SERVER-39518 : Temporarily comment this out because dropCollection uses
// this function and now it only takes a database IX lock. We can change
// this invariant to IX once renameCollection only MODE_IX as well.
// invariant(opCtx->lockState()->isDbLockedForMode(fromNss.db(), MODE_X));
- const std::string identFrom = _engine->getCatalog()->getCollectionIdent(fromNS);
+ const std::string identFrom = _engine->getCatalog()->getCollectionIdent(fromNss);
- Status status = _engine->getEngine()->okToRename(opCtx, fromNS, toNS, identFrom, nullptr);
+ Status status =
+ _engine->getEngine()->okToRename(opCtx, fromNss.ns(), toNss.ns(), identFrom, nullptr);
if (!status.isOK())
return status;
- status = _replaceEntry(opCtx, fromNS, toNS, stayTemp);
+ status = _replaceEntry(opCtx, fromNss, toNss, stayTemp);
if (!status.isOK())
return status;
- const std::string identTo = getCollectionIdent(toNS);
+ const std::string identTo = getCollectionIdent(toNss);
invariant(identFrom == identTo);
return Status::OK();
@@ -837,8 +843,7 @@ private:
CollectionUUID _uuid;
};
-Status KVCatalog::dropCollection(OperationContext* opCtx, StringData ns) {
- NamespaceString nss(ns);
+Status KVCatalog::dropCollection(OperationContext* opCtx, const NamespaceString& nss) {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
CollectionCatalogEntry* const entry =
@@ -862,10 +867,10 @@ Status KVCatalog::dropCollection(OperationContext* opCtx, StringData ns) {
invariant(entry->getTotalIndexCount(opCtx) == 0);
- const std::string ident = getCollectionIdent(ns);
+ const std::string ident = getCollectionIdent(nss);
// Remove metadata from mdb_catalog
- Status status = _removeEntry(opCtx, ns);
+ Status status = _removeEntry(opCtx, nss);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/storage/kv/kv_catalog.h b/src/mongo/db/storage/kv/kv_catalog.h
index 62451d73495..23b5c3cb561 100644
--- a/src/mongo/db/storage/kv/kv_catalog.h
+++ b/src/mongo/db/storage/kv/kv_catalog.h
@@ -63,15 +63,18 @@ public:
void init(OperationContext* opCtx);
- void getAllCollections(std::vector<std::string>* out) const;
+ std::vector<NamespaceString> getAllCollections() const;
- std::string getCollectionIdent(StringData ns) const;
+ std::string getCollectionIdent(const NamespaceString& nss) const;
- std::string getIndexIdent(OperationContext* opCtx, StringData ns, StringData idName) const;
+ std::string getIndexIdent(OperationContext* opCtx,
+ const NamespaceString& nss,
+ StringData idName) const;
- BSONCollectionCatalogEntry::MetaData getMetaData(OperationContext* opCtx, StringData ns) const;
+ BSONCollectionCatalogEntry::MetaData getMetaData(OperationContext* opCtx,
+ const NamespaceString& nss) const;
void putMetaData(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
BSONCollectionCatalogEntry::MetaData& md);
std::vector<std::string> getAllIdentsForDB(StringData db) const;
@@ -107,9 +110,9 @@ public:
*/
std::string newInternalIdent();
- void initCollection(OperationContext* opCtx, const std::string& ns, bool forRepair);
+ void initCollection(OperationContext* opCtx, const NamespaceString& nss, bool forRepair);
- void reinitCollectionAfterRepair(OperationContext* opCtx, const std::string& ns);
+ void reinitCollectionAfterRepair(OperationContext* opCtx, const NamespaceString& nss);
Status createCollection(OperationContext* opCtx,
const NamespaceString& nss,
@@ -117,11 +120,11 @@ public:
bool allocateDefaultSpace);
Status renameCollection(OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
+ const NamespaceString& fromNss,
+ const NamespaceString& toNss,
bool stayTemp);
- Status dropCollection(OperationContext* opCtx, StringData ns);
+ Status dropCollection(OperationContext* opCtx, const NamespaceString& nss);
private:
class AddIdentChange;
@@ -132,23 +135,25 @@ private:
friend class KVCatalogTest;
friend class KVStorageEngineTest;
- BSONObj _findEntry(OperationContext* opCtx, StringData ns, RecordId* out = NULL) const;
+ BSONObj _findEntry(OperationContext* opCtx,
+ const NamespaceString& nss,
+ RecordId* out = nullptr) const;
Status _addEntry(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const CollectionOptions& options,
KVPrefix prefix);
Status _replaceEntry(OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
+ const NamespaceString& fromNss,
+ const NamespaceString& toNss,
bool stayTemp);
- Status _removeEntry(OperationContext* opCtx, StringData ns);
+ Status _removeEntry(OperationContext* opCtx, const NamespaceString& nss);
/**
* Generates a new unique identifier for a new "thing".
- * @param ns - the containing ns
+ * @param nss - the containing namespace
* @param kind - what this "thing" is, likely collection or index
*/
- std::string _newUniqueIdent(StringData ns, const char* kind);
+ std::string _newUniqueIdent(const NamespaceString& nss, const char* kind);
// Helpers only used by constructor and init(). Don't call from elsewhere.
static std::string _newRand();
diff --git a/src/mongo/db/storage/kv/kv_catalog_test_fixture.h b/src/mongo/db/storage/kv/kv_catalog_test_fixture.h
index 10f0251a4e3..eaf9af0ba98 100644
--- a/src/mongo/db/storage/kv/kv_catalog_test_fixture.h
+++ b/src/mongo/db/storage/kv/kv_catalog_test_fixture.h
@@ -47,11 +47,12 @@ protected:
StringData toNS,
bool stayTemp,
KVCatalog* catalog) {
- return catalog->_replaceEntry(opCtx, fromNS, toNS, stayTemp);
+ return catalog->_replaceEntry(
+ opCtx, NamespaceString(fromNS), NamespaceString(toNS), stayTemp);
}
Status dropCollection(OperationContext* opCtx, StringData ns, KVCatalog* catalog) {
- return catalog->_removeEntry(opCtx, ns);
+ return catalog->_removeEntry(opCtx, NamespaceString(ns));
}
};
}
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
index 20bad4a699d..7c63a745530 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
@@ -176,7 +176,7 @@ bool KVCollectionCatalogEntry::setIndexIsMultikey(OperationContext* opCtx,
}
}
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
return true;
}
@@ -196,7 +196,7 @@ void KVCollectionCatalogEntry::setIndexHead(OperationContext* opCtx,
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
md.indexes[offset].head = newHead;
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
}
Status KVCollectionCatalogEntry::removeIndex(OperationContext* opCtx, StringData indexName) {
@@ -205,10 +205,10 @@ Status KVCollectionCatalogEntry::removeIndex(OperationContext* opCtx, StringData
if (md.findIndexOffset(indexName) < 0)
return Status::OK(); // never had the index so nothing to do.
- const string ident = _catalog->getIndexIdent(opCtx, ns().ns(), indexName);
+ const string ident = _catalog->getIndexIdent(opCtx, ns(), indexName);
md.eraseIndex(indexName);
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
// Lazily remove to isolate underlying engine from rollback.
opCtx->recoveryUnit()->registerChange(new RemoveIndexChange(
@@ -250,9 +250,9 @@ Status KVCollectionCatalogEntry::prepareForIndexBuild(OperationContext* opCtx,
}
md.indexes.push_back(imd);
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
- string ident = _catalog->getIndexIdent(opCtx, ns().ns(), spec->indexName());
+ string ident = _catalog->getIndexIdent(opCtx, ns(), spec->indexName());
auto kvEngine = _engine->getEngine();
const Status status = kvEngine->createGroupedSortedDataInterface(opCtx, ident, spec, prefix);
@@ -294,7 +294,7 @@ void KVCollectionCatalogEntry::setIndexBuildScanning(
md.indexes[offset].buildPhase = kIndexBuildScanning.toString();
md.indexes[offset].sideWritesIdent = sideWritesIdent;
md.indexes[offset].constraintViolationsIdent = constraintViolationsIdent;
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
}
bool KVCollectionCatalogEntry::isIndexBuildScanning(OperationContext* opCtx,
@@ -315,7 +315,7 @@ void KVCollectionCatalogEntry::setIndexBuildDraining(OperationContext* opCtx,
invariant(md.indexes[offset].buildPhase == kIndexBuildScanning.toString());
md.indexes[offset].buildPhase = kIndexBuildDraining.toString();
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
}
bool KVCollectionCatalogEntry::isIndexBuildDraining(OperationContext* opCtx,
@@ -335,7 +335,7 @@ void KVCollectionCatalogEntry::indexBuildSuccess(OperationContext* opCtx, String
md.indexes[offset].buildPhase = boost::none;
md.indexes[offset].sideWritesIdent = boost::none;
md.indexes[offset].constraintViolationsIdent = boost::none;
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
}
boost::optional<std::string> KVCollectionCatalogEntry::getSideWritesIdent(
@@ -361,13 +361,13 @@ void KVCollectionCatalogEntry::updateTTLSetting(OperationContext* opCtx,
int offset = md.findIndexOffset(idxName);
invariant(offset >= 0);
md.indexes[offset].updateTTLSetting(newExpireSeconds);
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
}
void KVCollectionCatalogEntry::updateIndexMetadata(OperationContext* opCtx,
const IndexDescriptor* desc) {
// Update any metadata Ident has for this index
- const string ident = _catalog->getIndexIdent(opCtx, ns().ns(), desc->indexName());
+ const string ident = _catalog->getIndexIdent(opCtx, ns(), desc->indexName());
auto kvEngine = _engine->getEngine();
kvEngine->alterIdentMetadata(opCtx, ident, desc);
}
@@ -386,23 +386,23 @@ void KVCollectionCatalogEntry::updateValidator(OperationContext* opCtx,
md.options.validator = validator;
md.options.validationLevel = validationLevel.toString();
md.options.validationAction = validationAction.toString();
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
}
void KVCollectionCatalogEntry::setIsTemp(OperationContext* opCtx, bool isTemp) {
MetaData md = _getMetaData(opCtx);
md.options.temp = isTemp;
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
}
void KVCollectionCatalogEntry::updateCappedSize(OperationContext* opCtx, long long size) {
MetaData md = _getMetaData(opCtx);
md.options.cappedSize = size;
- _catalog->putMetaData(opCtx, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns(), md);
}
BSONCollectionCatalogEntry::MetaData KVCollectionCatalogEntry::_getMetaData(
OperationContext* opCtx) const {
- return _catalog->getMetaData(opCtx, ns().toString());
+ return _catalog->getMetaData(opCtx, ns());
}
}
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index 621314e1a3d..ebdf490b78f 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -186,10 +186,10 @@ public:
* it still exists when recovered.
*/
virtual Status recoverOrphanedIdent(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
StringData ident,
const CollectionOptions& options) {
- auto status = createRecordStore(opCtx, ns, ident, options);
+ auto status = createRecordStore(opCtx, nss.ns(), ident, options);
if (status.isOK()) {
return {ErrorCodes::DataModifiedByRepair, "Orphan recovery created a new record store"};
}
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 1bbeb4d3b55..81c8d5f5256 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -305,11 +305,11 @@ TEST_F(KVCatalogTest, Coll1) {
CollectionOptions(),
KVPrefix::kNotPrefixed,
catalog.get()));
- ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b"));
+ ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent(NamespaceString("a.b")));
uow.commit();
}
- string ident = catalog->getCollectionIdent("a.b");
+ string ident = catalog->getCollectionIdent(NamespaceString("a.b"));
{
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
@@ -317,7 +317,7 @@ TEST_F(KVCatalogTest, Coll1) {
catalog->init(&opCtx);
uow.commit();
}
- ASSERT_EQUALS(ident, catalog->getCollectionIdent("a.b"));
+ ASSERT_EQUALS(ident, catalog->getCollectionIdent(NamespaceString("a.b")));
{
MyOperationContext opCtx(engine);
@@ -331,7 +331,7 @@ TEST_F(KVCatalogTest, Coll1) {
.transitional_ignore();
uow.commit();
}
- ASSERT_NOT_EQUALS(ident, catalog->getCollectionIdent("a.b"));
+ ASSERT_NOT_EQUALS(ident, catalog->getCollectionIdent(NamespaceString("a.b")));
}
TEST_F(KVCatalogTest, Idx1) {
@@ -357,8 +357,8 @@ TEST_F(KVCatalogTest, Idx1) {
CollectionOptions(),
KVPrefix::kNotPrefixed,
catalog.get()));
- ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b"));
- ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
+ ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent(NamespaceString("a.b")));
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent(NamespaceString("a.b"))));
uow.commit();
}
@@ -378,20 +378,21 @@ TEST_F(KVCatalogTest, Idx1) {
imd.prefix = KVPrefix::kNotPrefixed;
imd.isBackgroundSecondaryBuild = false;
md.indexes.push_back(imd);
- catalog->putMetaData(&opCtx, "a.b", md);
+ catalog->putMetaData(&opCtx, NamespaceString("a.b"), md);
uow.commit();
}
string idxIndent;
{
MyOperationContext opCtx(engine);
- idxIndent = catalog->getIndexIdent(&opCtx, "a.b", "foo");
+ idxIndent = catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo");
}
{
MyOperationContext opCtx(engine);
- ASSERT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo"));
- ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
+ ASSERT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo"));
+ ASSERT_TRUE(catalog->isUserDataIdent(
+ catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo")));
}
{
@@ -400,7 +401,7 @@ TEST_F(KVCatalogTest, Idx1) {
BSONCollectionCatalogEntry::MetaData md;
md.ns = "a.b";
- catalog->putMetaData(&opCtx, "a.b", md); // remove index
+ catalog->putMetaData(&opCtx, NamespaceString("a.b"), md); // remove index
BSONCollectionCatalogEntry::IndexMetaData imd;
imd.spec = BSON("name"
@@ -411,13 +412,13 @@ TEST_F(KVCatalogTest, Idx1) {
imd.prefix = KVPrefix::kNotPrefixed;
imd.isBackgroundSecondaryBuild = false;
md.indexes.push_back(imd);
- catalog->putMetaData(&opCtx, "a.b", md);
+ catalog->putMetaData(&opCtx, NamespaceString("a.b"), md);
uow.commit();
}
{
MyOperationContext opCtx(engine);
- ASSERT_NOT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo"));
+ ASSERT_NOT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo"));
}
}
@@ -444,8 +445,8 @@ TEST_F(KVCatalogTest, DirectoryPerDb1) {
CollectionOptions(),
KVPrefix::kNotPrefixed,
catalog.get()));
- ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/");
- ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
+ ASSERT_STRING_CONTAINS(catalog->getCollectionIdent(NamespaceString("a.b")), "a/");
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent(NamespaceString("a.b"))));
uow.commit();
}
@@ -465,9 +466,10 @@ TEST_F(KVCatalogTest, DirectoryPerDb1) {
imd.prefix = KVPrefix::kNotPrefixed;
imd.isBackgroundSecondaryBuild = false;
md.indexes.push_back(imd);
- catalog->putMetaData(&opCtx, "a.b", md);
- ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "a/");
- ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
+ catalog->putMetaData(&opCtx, NamespaceString("a.b"), md);
+ ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo"), "a/");
+ ASSERT_TRUE(catalog->isUserDataIdent(
+ catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo")));
uow.commit();
}
}
@@ -495,8 +497,8 @@ TEST_F(KVCatalogTest, Split1) {
CollectionOptions(),
KVPrefix::kNotPrefixed,
catalog.get()));
- ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "collection/");
- ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
+ ASSERT_STRING_CONTAINS(catalog->getCollectionIdent(NamespaceString("a.b")), "collection/");
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent(NamespaceString("a.b"))));
uow.commit();
}
@@ -516,9 +518,11 @@ TEST_F(KVCatalogTest, Split1) {
imd.prefix = KVPrefix::kNotPrefixed;
imd.isBackgroundSecondaryBuild = false;
md.indexes.push_back(imd);
- catalog->putMetaData(&opCtx, "a.b", md);
- ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "index/");
- ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
+ catalog->putMetaData(&opCtx, NamespaceString("a.b"), md);
+ ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo"),
+ "index/");
+ ASSERT_TRUE(catalog->isUserDataIdent(
+ catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo")));
uow.commit();
}
}
@@ -546,8 +550,9 @@ TEST_F(KVCatalogTest, DirectoryPerAndSplit1) {
CollectionOptions(),
KVPrefix::kNotPrefixed,
catalog.get()));
- ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/collection/");
- ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
+ ASSERT_STRING_CONTAINS(catalog->getCollectionIdent(NamespaceString("a.b")),
+ "a/collection/");
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent(NamespaceString("a.b"))));
uow.commit();
}
@@ -567,9 +572,11 @@ TEST_F(KVCatalogTest, DirectoryPerAndSplit1) {
imd.prefix = KVPrefix::kNotPrefixed;
imd.isBackgroundSecondaryBuild = false;
md.indexes.push_back(imd);
- catalog->putMetaData(&opCtx, "a.b", md);
- ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "a/index/");
- ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
+ catalog->putMetaData(&opCtx, NamespaceString("a.b"), md);
+ ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo"),
+ "a/index/");
+ ASSERT_TRUE(catalog->isUserDataIdent(
+ catalog->getIndexIdent(&opCtx, NamespaceString("a.b"), "foo")));
uow.commit();
}
}
@@ -600,8 +607,9 @@ TEST_F(KVCatalogTest, RestartForPrefixes) {
WriteUnitOfWork uow(&opCtx);
ASSERT_OK(newCollection(
&opCtx, NamespaceString("a.b"), CollectionOptions(), abCollPrefix, catalog.get()));
- ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b"));
- ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
+ ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent(NamespaceString("a.b")));
+ ASSERT_TRUE(
+ catalog->isUserDataIdent(catalog->getCollectionIdent(NamespaceString("a.b"))));
uow.commit();
}
@@ -622,7 +630,7 @@ TEST_F(KVCatalogTest, RestartForPrefixes) {
imd.isBackgroundSecondaryBuild = false;
md.indexes.push_back(imd);
md.prefix = abCollPrefix;
- catalog->putMetaData(&opCtx, "a.b", md);
+ catalog->putMetaData(&opCtx, NamespaceString("a.b"), md);
uow.commit();
}
}
@@ -637,7 +645,8 @@ TEST_F(KVCatalogTest, RestartForPrefixes) {
stdx::make_unique<KVCatalog>(rs.get(), false, false, nullptr);
catalog->init(&opCtx);
- const BSONCollectionCatalogEntry::MetaData md = catalog->getMetaData(&opCtx, "a.b");
+ const BSONCollectionCatalogEntry::MetaData md =
+ catalog->getMetaData(&opCtx, NamespaceString("a.b"));
ASSERT_EQ("a.b", md.ns);
ASSERT_EQ(abCollPrefix, md.prefix);
ASSERT_EQ(fooIndexPrefix, md.indexes[md.findIndexOffset("foo")].prefix);
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 5e409902648..f0a1fb6dc96 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -135,8 +135,7 @@ void KVStorageEngine::loadCatalog(OperationContext* opCtx) {
std::sort(identsKnownToStorageEngine.begin(), identsKnownToStorageEngine.end());
}
- std::vector<std::string> collectionsKnownToCatalog;
- _catalog->getAllCollections(&collectionsKnownToCatalog);
+ auto collectionsKnownToCatalog = _catalog->getAllCollections();
if (_options.forRepair) {
// It's possible that there are collection files on disk that are unknown to the catalog. In
@@ -148,7 +147,8 @@ void KVStorageEngine::loadCatalog(OperationContext* opCtx) {
bool isOrphan = !std::any_of(collectionsKnownToCatalog.begin(),
collectionsKnownToCatalog.end(),
[this, &ident](const auto& coll) {
- return _catalog->getCollectionIdent(coll) == ident;
+ return _catalog->getCollectionIdent(
+ NamespaceString(coll)) == ident;
});
if (isOrphan) {
// If the catalog does not have information about this
@@ -258,9 +258,9 @@ Status KVStorageEngine::_recoverOrphanedCollection(OperationContext* opCtx,
<< collectionIdent;
WriteUnitOfWork wuow(opCtx);
- const auto metadata = _catalog->getMetaData(opCtx, collectionName.toString());
- auto status = _engine->recoverOrphanedIdent(
- opCtx, collectionName.toString(), collectionIdent, metadata.options);
+ const auto metadata = _catalog->getMetaData(opCtx, collectionName);
+ auto status =
+ _engine->recoverOrphanedIdent(opCtx, collectionName, collectionIdent, metadata.options);
bool dataModified = status.code() == ErrorCodes::DataModifiedByRepair;
@@ -361,8 +361,7 @@ KVStorageEngine::reconcileCatalogAndIdents(OperationContext* opCtx) {
// engine. An omission here is fatal. A missing ident could mean a collection drop was rolled
// back. Note that startup already attempts to open tables; this should only catch errors in
// other contexts such as `recoverToStableTimestamp`.
- std::vector<std::string> collections;
- _catalog->getAllCollections(&collections);
+ auto collections = _catalog->getAllCollections();
if (!_options.forRepair) {
for (const auto& coll : collections) {
const auto& identForColl = _catalog->getCollectionIdent(coll);
@@ -398,7 +397,7 @@ KVStorageEngine::reconcileCatalogAndIdents(OperationContext* opCtx) {
if (indexMetaData.ready && !foundIdent) {
log() << "Expected index data is missing, rebuilding. Collection: " << coll
<< " Index: " << indexName;
- ret.emplace_back(coll, indexName);
+ ret.emplace_back(coll.ns(), indexName);
continue;
}
@@ -435,7 +434,7 @@ KVStorageEngine::reconcileCatalogAndIdents(OperationContext* opCtx) {
log()
<< "Expected background index build did not complete, rebuilding. Collection: "
<< coll << " Index: " << indexName;
- ret.emplace_back(coll, indexName);
+ ret.emplace_back(coll.ns(), indexName);
continue;
}
@@ -571,7 +570,7 @@ Status KVStorageEngine::_dropCollectionsNoTimestamp(OperationContext* opCtx,
WriteUnitOfWork untimestampedDropWuow(opCtx);
for (auto& nss : toDrop) {
invariant(getCatalog());
- Status result = getCatalog()->dropCollection(opCtx, nss.ns());
+ Status result = getCatalog()->dropCollection(opCtx, nss);
if (!result.isOK() && firstError.isOK()) {
firstError = result;
}
@@ -627,21 +626,21 @@ SnapshotManager* KVStorageEngine::getSnapshotManager() const {
return _engine->getSnapshotManager();
}
-Status KVStorageEngine::repairRecordStore(OperationContext* opCtx, const std::string& ns) {
+Status KVStorageEngine::repairRecordStore(OperationContext* opCtx, const NamespaceString& nss) {
auto repairObserver = StorageRepairObserver::get(getGlobalServiceContext());
invariant(repairObserver->isIncomplete());
- Status status = _engine->repairIdent(opCtx, _catalog->getCollectionIdent(ns));
+ Status status = _engine->repairIdent(opCtx, _catalog->getCollectionIdent(nss));
bool dataModified = status.code() == ErrorCodes::DataModifiedByRepair;
if (!status.isOK() && !dataModified) {
return status;
}
if (dataModified) {
- repairObserver->onModification(str::stream() << "Collection " << ns << ": "
+ repairObserver->onModification(str::stream() << "Collection " << nss << ": "
<< status.reason());
}
- _catalog->reinitCollectionAfterRepair(opCtx, ns);
+ _catalog->reinitCollectionAfterRepair(opCtx, nss);
return Status::OK();
}
@@ -923,7 +922,7 @@ int64_t KVStorageEngine::sizeOnDiskForDb(OperationContext* opCtx, StringData dbN
for (size_t i = 0; i < indexNames.size(); i++) {
std::string ident =
- _catalog->getIndexIdent(opCtx, catalogEntry->ns().ns(), indexNames[i]);
+ _catalog->getIndexIdent(opCtx, catalogEntry->ns(), indexNames[i]);
size += _engine->getIdentSize(opCtx, ident);
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index a84d82678a1..8bba8926a9a 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -107,7 +107,7 @@ public:
virtual bool isEphemeral() const;
- virtual Status repairRecordStore(OperationContext* opCtx, const std::string& ns);
+ virtual Status repairRecordStore(OperationContext* opCtx, const NamespaceString& nss);
virtual std::unique_ptr<TemporaryRecordStore> makeTemporaryRecordStore(
OperationContext* opCtx) override;
diff --git a/src/mongo/db/storage/kv/kv_storage_engine_test.cpp b/src/mongo/db/storage/kv/kv_storage_engine_test.cpp
index 33d076d7ce7..370677f8f1c 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine_test.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine_test.cpp
@@ -175,8 +175,7 @@ TEST_F(KVStorageEngineTest, ReconcileDoesNotDropIndexBuildTempTables) {
auto sideWrites = makeTemporary(opCtx.get());
auto constraintViolations = makeTemporary(opCtx.get());
- const auto indexIdent =
- _storageEngine->getCatalog()->getIndexIdent(opCtx.get(), ns.ns(), indexName);
+ const auto indexIdent = _storageEngine->getCatalog()->getIndexIdent(opCtx.get(), ns, indexName);
indexBuildScan(opCtx.get(),
ns,
@@ -217,8 +216,7 @@ TEST_F(KVStorageEngineTest, ReconcileDoesNotDropIndexBuildTempTablesBackgroundSe
auto sideWrites = makeTemporary(opCtx.get());
auto constraintViolations = makeTemporary(opCtx.get());
- const auto indexIdent =
- _storageEngine->getCatalog()->getIndexIdent(opCtx.get(), ns.ns(), indexName);
+ const auto indexIdent = _storageEngine->getCatalog()->getIndexIdent(opCtx.get(), ns, indexName);
indexBuildScan(opCtx.get(),
ns,
diff --git a/src/mongo/db/storage/kv/kv_storage_engine_test_fixture.h b/src/mongo/db/storage/kv/kv_storage_engine_test_fixture.h
index 4a5e00d4547..d530ebb4922 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine_test_fixture.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine_test_fixture.h
@@ -59,7 +59,7 @@ public:
return ret;
}
- return _storageEngine->getCatalog()->getCollectionIdent(ns.ns());
+ return _storageEngine->getCatalog()->getCollectionIdent(ns);
}
std::unique_ptr<TemporaryRecordStore> makeTemporary(OperationContext* opCtx) {
@@ -76,8 +76,7 @@ public:
}
Status dropIndexTable(OperationContext* opCtx, NamespaceString nss, std::string indexName) {
- std::string indexIdent =
- _storageEngine->getCatalog()->getIndexIdent(opCtx, nss.ns(), indexName);
+ std::string indexIdent = _storageEngine->getCatalog()->getIndexIdent(opCtx, nss, indexName);
return dropIdent(opCtx, indexIdent);
}
@@ -95,10 +94,8 @@ public:
}
bool collectionExists(OperationContext* opCtx, const NamespaceString& nss) {
- std::vector<std::string> allCollections;
- _storageEngine->getCatalog()->getAllCollections(&allCollections);
- return std::find(allCollections.begin(), allCollections.end(), nss.toString()) !=
- allCollections.end();
+ auto allCollections = _storageEngine->getCatalog()->getAllCollections();
+ return std::count(allCollections.begin(), allCollections.end(), nss);
}
bool identExists(OperationContext* opCtx, const std::string& ident) {
auto idents = getAllKVEngineIdents(opCtx);
@@ -167,7 +164,7 @@ public:
}
Status removeEntry(OperationContext* opCtx, StringData ns, KVCatalog* catalog) {
- return catalog->_removeEntry(opCtx, ns);
+ return catalog->_removeEntry(opCtx, NamespaceString(ns));
}
KVStorageEngine* _storageEngine;
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index d7172eb3d85..c74c7de72ab 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -285,7 +285,7 @@ public:
* Generally, this method should not be called directly except by the repairDatabase()
* free function.
*/
- virtual Status repairRecordStore(OperationContext* opCtx, const std::string& ns) = 0;
+ virtual Status repairRecordStore(OperationContext* opCtx, const NamespaceString& nss) = 0;
/**
* Creates a temporary RecordStore on the storage engine. This record store will drop itself
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 862545bb8c3..561469471dc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1118,7 +1118,7 @@ Status WiredTigerKVEngine::createGroupedRecordStore(OperationContext* opCtx,
}
Status WiredTigerKVEngine::recoverOrphanedIdent(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
StringData ident,
const CollectionOptions& options) {
#ifdef _WIN32
@@ -1148,10 +1148,9 @@ Status WiredTigerKVEngine::recoverOrphanedIdent(OperationContext* opCtx,
return status;
}
- log() << "Creating new RecordStore for collection " + ns + " with UUID: " +
- (options.uuid ? options.uuid->toString() : "none");
+ log() << "Creating new RecordStore for collection " << nss << " with UUID: " << options.uuid;
- status = createGroupedRecordStore(opCtx, ns, ident, options, KVPrefix::kNotPrefixed);
+ status = createGroupedRecordStore(opCtx, nss.ns(), ident, options, KVPrefix::kNotPrefixed);
if (!status.isOK()) {
return status;
}
@@ -1176,7 +1175,8 @@ Status WiredTigerKVEngine::recoverOrphanedIdent(OperationContext* opCtx,
WiredTigerSession sessionWrapper(_conn);
WT_SESSION* session = sessionWrapper.getSession();
- status = wtRCToStatus(session->salvage(session, _uri(ident).c_str(), NULL), "Salvage failed: ");
+ status =
+ wtRCToStatus(session->salvage(session, _uri(ident).c_str(), nullptr), "Salvage failed: ");
if (status.isOK()) {
return {ErrorCodes::DataModifiedByRepair,
str::stream() << "Salvaged data for ident " << ident};
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 1b1c9af1648..1f07e81debc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -181,7 +181,7 @@ public:
Status repairIdent(OperationContext* opCtx, StringData ident) override;
Status recoverOrphanedIdent(OperationContext* opCtx,
- StringData ns,
+ const NamespaceString& nss,
StringData ident,
const CollectionOptions& options) override;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
index ef37d8dc573..c6e0da77906 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
@@ -119,14 +119,14 @@ public:
TEST_F(WiredTigerKVEngineRepairTest, OrphanedDataFilesCanBeRecovered) {
auto opCtxPtr = makeOperationContext();
- std::string ns = "a.b";
+ NamespaceString nss("a.b");
std::string ident = "collection-1234";
std::string record = "abcd";
CollectionOptions options;
std::unique_ptr<RecordStore> rs;
- ASSERT_OK(_engine->createRecordStore(opCtxPtr.get(), ns, ident, options));
- rs = _engine->getRecordStore(opCtxPtr.get(), ns, ident, options);
+ ASSERT_OK(_engine->createRecordStore(opCtxPtr.get(), nss.ns(), ident, options));
+ rs = _engine->getRecordStore(opCtxPtr.get(), nss.ns(), ident, options);
ASSERT(rs);
RecordId loc;
@@ -149,7 +149,7 @@ TEST_F(WiredTigerKVEngineRepairTest, OrphanedDataFilesCanBeRecovered) {
ASSERT(!boost::filesystem::exists(tmpFile));
#ifdef _WIN32
- auto status = _engine->recoverOrphanedIdent(opCtxPtr.get(), ns, ident, options);
+ auto status = _engine->recoverOrphanedIdent(opCtxPtr.get(), nss, ident, options);
ASSERT_EQ(ErrorCodes::CommandNotSupported, status.code());
#else
// Move the data file out of the way so the ident can be dropped. This not permitted on Windows
@@ -166,7 +166,7 @@ TEST_F(WiredTigerKVEngineRepairTest, OrphanedDataFilesCanBeRecovered) {
boost::filesystem::rename(tmpFile, *dataFilePath, err);
ASSERT(!err) << err.message();
- auto status = _engine->recoverOrphanedIdent(opCtxPtr.get(), ns, ident, options);
+ auto status = _engine->recoverOrphanedIdent(opCtxPtr.get(), nss, ident, options);
ASSERT_EQ(ErrorCodes::DataModifiedByRepair, status.code());
#endif
}
@@ -174,14 +174,14 @@ TEST_F(WiredTigerKVEngineRepairTest, OrphanedDataFilesCanBeRecovered) {
TEST_F(WiredTigerKVEngineRepairTest, UnrecoverableOrphanedDataFilesAreRebuilt) {
auto opCtxPtr = makeOperationContext();
- std::string ns = "a.b";
+ NamespaceString nss("a.b");
std::string ident = "collection-1234";
std::string record = "abcd";
CollectionOptions options;
std::unique_ptr<RecordStore> rs;
- ASSERT_OK(_engine->createRecordStore(opCtxPtr.get(), ns, ident, options));
- rs = _engine->getRecordStore(opCtxPtr.get(), ns, ident, options);
+ ASSERT_OK(_engine->createRecordStore(opCtxPtr.get(), nss.ns(), ident, options));
+ rs = _engine->getRecordStore(opCtxPtr.get(), nss.ns(), ident, options);
ASSERT(rs);
RecordId loc;
@@ -203,7 +203,7 @@ TEST_F(WiredTigerKVEngineRepairTest, UnrecoverableOrphanedDataFilesAreRebuilt) {
ASSERT_OK(_engine->dropIdent(opCtxPtr.get(), ident));
#ifdef _WIN32
- auto status = _engine->recoverOrphanedIdent(opCtxPtr.get(), ns, ident, options);
+ auto status = _engine->recoverOrphanedIdent(opCtxPtr.get(), nss, ident, options);
ASSERT_EQ(ErrorCodes::CommandNotSupported, status.code());
#else
// The ident may not get immediately dropped, so ensure it is completely gone.
@@ -221,13 +221,13 @@ TEST_F(WiredTigerKVEngineRepairTest, UnrecoverableOrphanedDataFilesAreRebuilt) {
// This should recreate an empty data file successfully and move the old one to a name that ends
// in ".corrupt".
- auto status = _engine->recoverOrphanedIdent(opCtxPtr.get(), ns, ident, options);
+ auto status = _engine->recoverOrphanedIdent(opCtxPtr.get(), nss, ident, options);
ASSERT_EQ(ErrorCodes::DataModifiedByRepair, status.code()) << status.reason();
boost::filesystem::path corruptFile = (dataFilePath->string() + ".corrupt");
ASSERT(boost::filesystem::exists(corruptFile));
- rs = _engine->getRecordStore(opCtxPtr.get(), ns, ident, options);
+ rs = _engine->getRecordStore(opCtxPtr.get(), nss.ns(), ident, options);
RecordData data;
ASSERT_FALSE(rs->findRecord(opCtxPtr.get(), loc, &data));
#endif
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index 24c495ea340..cf638610bdf 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -247,7 +247,7 @@ protected:
WriteUnitOfWork wuow(opCtx());
CollectionOptions options;
options.uuid = _uuid;
- db->createCollection(opCtx(), kNss.ns(), options);
+ db->createCollection(opCtx(), kNss, options);
wuow.commit();
}
@@ -545,7 +545,7 @@ TEST_F(TxnParticipantTest, PrepareFailsOnTemporaryCollection) {
CollectionOptions options;
options.uuid = tempCollUUID;
options.temp = true;
- db->createCollection(opCtx(), tempCollNss.ns(), options);
+ db->createCollection(opCtx(), tempCollNss, options);
wuow.commit();
}
@@ -3640,7 +3640,7 @@ TEST_F(TxnParticipantTest, OldestActiveTransactionTimestamp) {
AutoGetOrCreateDb autoDb(opCtx(), nss.db(), MODE_X);
WriteUnitOfWork wuow(opCtx());
- auto coll = autoDb.getDb()->getCollection(opCtx(), nss.ns());
+ auto coll = autoDb.getDb()->getCollection(opCtx(), nss);
ASSERT(coll);
OpDebug* const nullOpDebug = nullptr;
ASSERT_OK(
@@ -3652,7 +3652,7 @@ TEST_F(TxnParticipantTest, OldestActiveTransactionTimestamp) {
Timestamp ts(1, i);
AutoGetOrCreateDb autoDb(opCtx(), nss.db(), MODE_X);
WriteUnitOfWork wuow(opCtx());
- auto coll = autoDb.getDb()->getCollection(opCtx(), nss.ns());
+ auto coll = autoDb.getDb()->getCollection(opCtx(), nss);
ASSERT(coll);
auto cursor = coll->getCursor(opCtx());
while (auto record = cursor->next()) {
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
index a149bc7f85f..a5c7ce5fbf5 100644
--- a/src/mongo/dbtests/commandtests.cpp
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -47,13 +47,13 @@ TEST(CommandTests, InputDocumentSequeceWorksEndToEnd) {
const auto opCtxHolder = cc().makeOperationContext();
auto opCtx = opCtxHolder.get();
- NamespaceString ns("test", "doc_seq");
+ NamespaceString nss("test", "doc_seq");
DBDirectClient db(opCtx);
- db.dropCollection(ns.ns());
- ASSERT_EQ(db.count(ns.ns()), 0u);
+ db.dropCollection(nss.ns());
+ ASSERT_EQ(db.count(nss.ns()), 0u);
OpMsgRequest request;
- request.body = BSON("insert" << ns.coll() << "$db" << ns.db());
+ request.body = BSON("insert" << nss.coll() << "$db" << nss.db());
request.sequences = {{"documents",
{
BSON("_id" << 1),
@@ -66,7 +66,7 @@ TEST(CommandTests, InputDocumentSequeceWorksEndToEnd) {
const auto reply = db.runCommand(std::move(request));
ASSERT_EQ(int(reply->getProtocol()), int(rpc::Protocol::kOpMsg));
ASSERT_BSONOBJ_EQ(reply->getCommandReply(), BSON("n" << 5 << "ok" << 1.0));
- ASSERT_EQ(db.count(ns.ns()), 5u);
+ ASSERT_EQ(db.count(nss.ns()), 5u);
}
using std::string;
@@ -77,11 +77,11 @@ using std::string;
class Base {
public:
Base() : db(&_opCtx) {
- db.dropCollection(ns());
+ db.dropCollection(nss().ns());
}
- const char* ns() {
- return "test.testCollection";
+ NamespaceString nss() {
+ return NamespaceString("test.testCollection");
}
const char* nsDb() {
return "test";
@@ -99,12 +99,12 @@ public:
namespace FileMD5 {
struct Base {
Base() : db(&_opCtx) {
- db.dropCollection(ns());
- ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("files_id" << 1 << "n" << 1)));
+ db.dropCollection(nss().ns());
+ ASSERT_OK(dbtests::createIndex(&_opCtx, nss().ns(), BSON("files_id" << 1 << "n" << 1)));
}
- const char* ns() {
- return "test.fs.chunks";
+ NamespaceString nss() {
+ return NamespaceString("test.fs.chunks");
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
@@ -119,7 +119,7 @@ struct Type0 : Base {
b.append("files_id", 0);
b.append("n", 0);
b.appendBinData("data", 6, BinDataGeneral, "hello ");
- db.insert(ns(), b.obj());
+ db.insert(nss().ns(), b.obj());
}
{
BSONObjBuilder b;
@@ -127,7 +127,7 @@ struct Type0 : Base {
b.append("files_id", 0);
b.append("n", 1);
b.appendBinData("data", 5, BinDataGeneral, "world");
- db.insert(ns(), b.obj());
+ db.insert(nss().ns(), b.obj());
}
BSONObj result;
@@ -143,7 +143,7 @@ struct Type2 : Base {
b.append("files_id", 0);
b.append("n", 0);
b.appendBinDataArrayDeprecated("data", "hello ", 6);
- db.insert(ns(), b.obj());
+ db.insert(nss().ns(), b.obj());
}
{
BSONObjBuilder b;
@@ -151,7 +151,7 @@ struct Type2 : Base {
b.append("files_id", 0);
b.append("n", 1);
b.appendBinDataArrayDeprecated("data", "world", 5);
- db.insert(ns(), b.obj());
+ db.insert(nss().ns(), b.obj());
}
BSONObj result;
@@ -170,7 +170,7 @@ namespace SymbolArgument {
class Drop : Base {
public:
void run() {
- ASSERT(db.createCollection(ns()));
+ ASSERT(db.createCollection(nss().ns()));
{
BSONObjBuilder cmd;
cmd.appendSymbol("drop", nsColl()); // Use Symbol for SERVER-16260
@@ -186,7 +186,7 @@ public:
class DropIndexes : Base {
public:
void run() {
- ASSERT(db.createCollection(ns()));
+ ASSERT(db.createCollection(nss().ns()));
BSONObjBuilder cmd;
cmd.appendSymbol("dropIndexes", nsColl()); // Use Symbol for SERVER-16260
@@ -202,7 +202,7 @@ public:
class CreateIndexWithNoKey : Base {
public:
void run() {
- ASSERT(db.createCollection(ns()));
+ ASSERT(db.createCollection(nss().ns()));
BSONObjBuilder indexSpec;
@@ -223,7 +223,7 @@ public:
class CreateIndexWithDuplicateKey : Base {
public:
void run() {
- ASSERT(db.createCollection(ns()));
+ ASSERT(db.createCollection(nss().ns()));
BSONObjBuilder indexSpec;
indexSpec.append("key", BSON("a" << 1 << "a" << 1 << "b" << 1));
@@ -245,13 +245,13 @@ public:
class FindAndModify : Base {
public:
void run() {
- ASSERT(db.createCollection(ns()));
+ ASSERT(db.createCollection(nss().ns()));
{
BSONObjBuilder b;
b.genOID();
b.append("name", "Tom");
b.append("rating", 0);
- db.insert(ns(), b.obj());
+ db.insert(nss().ns(), b.obj());
}
BSONObjBuilder cmd;
@@ -275,7 +275,8 @@ public:
int n = 0;
for (int x = 0; x < 20; x++) {
for (int y = 0; y < 20; y++) {
- db.insert(ns(), BSON("_id" << n << "loc" << BSON_ARRAY(x << y) << "z" << n % 5));
+ db.insert(nss().ns(),
+ BSON("_id" << n << "loc" << BSON_ARRAY(x << y) << "z" << n % 5));
n++;
}
}
@@ -321,7 +322,7 @@ public:
class Touch : Base {
public:
void run() {
- ASSERT(db.createCollection(ns()));
+ ASSERT(db.createCollection(nss().ns()));
{
BSONObjBuilder cmd;
cmd.appendSymbol("touch", nsColl()); // Use Symbol for SERVER-16260
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 9e8452d929f..36a59a07d90 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -51,11 +51,11 @@ public:
{
WriteUnitOfWork wunit(&_opCtx);
- _collection = _database->getCollection(&_opCtx, ns());
+ _collection = _database->getCollection(&_opCtx, nss());
if (_collection) {
- _database->dropCollection(&_opCtx, ns()).transitional_ignore();
+ _database->dropCollection(&_opCtx, nss()).transitional_ignore();
}
- _collection = _database->createCollection(&_opCtx, ns());
+ _collection = _database->createCollection(&_opCtx, nss());
IndexCatalog* indexCatalog = _collection->getIndexCatalog();
auto indexSpec =
@@ -73,7 +73,7 @@ public:
~Base() {
try {
WriteUnitOfWork wunit(&_opCtx);
- uassertStatusOK(_database->dropCollection(&_opCtx, ns()));
+ uassertStatusOK(_database->dropCollection(&_opCtx, nss()));
wunit.commit();
} catch (...) {
FAIL("Exception while cleaning up collection");
@@ -85,6 +85,10 @@ protected:
return "unittests.counttests";
}
+ static NamespaceString nss() {
+ return NamespaceString(ns());
+ }
+
void insert(const char* s) {
WriteUnitOfWork wunit(&_opCtx);
const BSONObj o = fromjson(s);
diff --git a/src/mongo/dbtests/extensions_callback_real_test.cpp b/src/mongo/dbtests/extensions_callback_real_test.cpp
index 1272dded6aa..bdc84eb61bd 100644
--- a/src/mongo/dbtests/extensions_callback_real_test.cpp
+++ b/src/mongo/dbtests/extensions_callback_real_test.cpp
@@ -54,7 +54,7 @@ public:
Database* database = autoDb.getDb();
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT(database->createCollection(&_opCtx, _nss.ns()));
+ ASSERT(database->createCollection(&_opCtx, _nss));
wunit.commit();
}
}
@@ -67,7 +67,7 @@ public:
}
{
WriteUnitOfWork wunit(&_opCtx);
- static_cast<void>(database->dropCollection(&_opCtx, _nss.ns()));
+ static_cast<void>(database->dropCollection(&_opCtx, _nss));
wunit.commit();
}
}
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index bd726d202ff..f5aef397bcd 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -42,19 +42,19 @@ namespace {
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
} // namespace
-static const char* const _ns = "unittests.indexcatalog";
+static const NamespaceString _nss("unittests.indexcatalog");
class IndexIteratorTests {
public:
IndexIteratorTests() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- Lock::DBLock lk(&opCtx, nsToDatabaseSubstring(_ns), MODE_X);
- OldClientContext ctx(&opCtx, _ns);
+ Lock::DBLock lk(&opCtx, _nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, _nss.ns());
WriteUnitOfWork wuow(&opCtx);
_db = ctx.db();
- _coll = _db->createCollection(&opCtx, _ns);
+ _coll = _db->createCollection(&opCtx, _nss);
_catalog = _coll->getIndexCatalog();
wuow.commit();
}
@@ -62,23 +62,23 @@ public:
~IndexIteratorTests() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- Lock::DBLock lk(&opCtx, nsToDatabaseSubstring(_ns), MODE_X);
- OldClientContext ctx(&opCtx, _ns);
+ Lock::DBLock lk(&opCtx, _nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, _nss.ns());
WriteUnitOfWork wuow(&opCtx);
- _db->dropCollection(&opCtx, _ns).transitional_ignore();
+ _db->dropCollection(&opCtx, _nss).transitional_ignore();
wuow.commit();
}
void run() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- dbtests::WriteContextForTests ctx(&opCtx, _ns);
+ dbtests::WriteContextForTests ctx(&opCtx, _nss.ns());
int numFinishedIndexesStart = _catalog->numIndexesReady(&opCtx);
- dbtests::createIndex(&opCtx, _ns, BSON("x" << 1)).transitional_ignore();
- dbtests::createIndex(&opCtx, _ns, BSON("y" << 1)).transitional_ignore();
+ dbtests::createIndex(&opCtx, _nss.ns(), BSON("x" << 1)).transitional_ignore();
+ dbtests::createIndex(&opCtx, _nss.ns(), BSON("y" << 1)).transitional_ignore();
ASSERT_TRUE(_catalog->numIndexesReady(&opCtx) == numFinishedIndexesStart + 2);
@@ -116,12 +116,12 @@ public:
RefreshEntry() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- Lock::DBLock lk(&opCtx, nsToDatabaseSubstring(_ns), MODE_X);
- OldClientContext ctx(&opCtx, _ns);
+ Lock::DBLock lk(&opCtx, _nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, _nss.ns());
WriteUnitOfWork wuow(&opCtx);
_db = ctx.db();
- _coll = _db->createCollection(&opCtx, _ns);
+ _coll = _db->createCollection(&opCtx, _nss);
_catalog = _coll->getIndexCatalog();
wuow.commit();
}
@@ -129,24 +129,24 @@ public:
~RefreshEntry() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- Lock::DBLock lk(&opCtx, nsToDatabaseSubstring(_ns), MODE_X);
- OldClientContext ctx(&opCtx, _ns);
+ Lock::DBLock lk(&opCtx, _nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, _nss.ns());
WriteUnitOfWork wuow(&opCtx);
- _db->dropCollection(&opCtx, _ns).transitional_ignore();
+ _db->dropCollection(&opCtx, _nss).transitional_ignore();
wuow.commit();
}
void run() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- dbtests::WriteContextForTests ctx(&opCtx, _ns);
+ dbtests::WriteContextForTests ctx(&opCtx, _nss.ns());
const std::string indexName = "x_1";
ASSERT_OK(dbtests::createIndexFromSpec(
&opCtx,
- _ns,
- BSON("name" << indexName << "ns" << _ns << "key" << BSON("x" << 1) << "v"
+ _nss.ns(),
+ BSON("name" << indexName << "ns" << _nss.ns() << "key" << BSON("x" << 1) << "v"
<< static_cast<int>(kIndexVersion)
<< "expireAfterSeconds"
<< 5)));
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 6b5b7044484..39a32505a57 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -49,6 +49,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
} // namespace
static const char* const _ns = "unittests.indexupdate";
+static const NamespaceString _nss = NamespaceString(_ns);
/**
* Test fixture for a write locked test using collection _ns. Includes functionality to
@@ -111,8 +112,8 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(db->dropCollection(&_opCtx, _nss));
+ coll = db->createCollection(&_opCtx, _nss);
OpDebug* const nullOpDebug = nullptr;
ASSERT_OK(coll->insertDocument(&_opCtx,
@@ -166,8 +167,8 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(db->dropCollection(&_opCtx, _nss));
+ coll = db->createCollection(&_opCtx, _nss);
OpDebug* const nullOpDebug = nullptr;
ASSERT_OK(coll->insertDocument(&_opCtx,
@@ -223,8 +224,8 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
- coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(db->dropCollection(&_opCtx, _nss));
+ coll = db->createCollection(&_opCtx, _nss);
// Drop all indexes including id index.
coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
// Insert some documents.
@@ -265,11 +266,11 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ ASSERT_OK(db->dropCollection(&_opCtx, _nss));
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
- coll = db->createCollection(&_opCtx, _ns, options);
+ coll = db->createCollection(&_opCtx, _nss, options);
coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
// Insert some documents.
int32_t nDocs = 1000;
diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp
index df0e290fcc5..d12ce069a54 100644
--- a/src/mongo/dbtests/multikey_paths_test.cpp
+++ b/src/mongo/dbtests/multikey_paths_test.cpp
@@ -62,7 +62,7 @@ public:
Database* database = autoDb.getDb();
{
WriteUnitOfWork wuow(_opCtx.get());
- ASSERT(database->createCollection(_opCtx.get(), _nss.ns()));
+ ASSERT(database->createCollection(_opCtx.get(), _nss));
wuow.commit();
}
}
@@ -72,7 +72,7 @@ public:
Database* database = autoDb.getDb();
if (database) {
WriteUnitOfWork wuow(_opCtx.get());
- ASSERT_OK(database->dropCollection(_opCtx.get(), _nss.ns()));
+ ASSERT_OK(database->dropCollection(_opCtx.get(), _nss));
wuow.commit();
}
}
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 06e50faf063..b4f09c83c52 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -45,22 +45,22 @@ namespace PdfileTests {
namespace Insert {
class Base {
public:
- Base() : _lk(&_opCtx), _context(&_opCtx, ns()) {}
+ Base() : _lk(&_opCtx), _context(&_opCtx, nss().ns()) {}
virtual ~Base() {
if (!collection())
return;
WriteUnitOfWork wunit(&_opCtx);
- _context.db()->dropCollection(&_opCtx, ns()).transitional_ignore();
+ _context.db()->dropCollection(&_opCtx, nss()).transitional_ignore();
wunit.commit();
}
protected:
- const char* ns() {
- return "unittests.pdfiletests.Insert";
+ static NamespaceString nss() {
+ return NamespaceString("unittests.pdfiletests.Insert");
}
Collection* collection() {
- return _context.db()->getCollection(&_opCtx, ns());
+ return _context.db()->getCollection(&_opCtx, nss());
}
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
@@ -75,8 +75,7 @@ public:
WriteUnitOfWork wunit(&_opCtx);
BSONObj x = BSON("x" << 1);
ASSERT(x["_id"].type() == 0);
- Collection* collection =
- _context.db()->getOrCreateCollection(&_opCtx, NamespaceString(ns()));
+ Collection* collection = _context.db()->getOrCreateCollection(&_opCtx, nss());
OpDebug* const nullOpDebug = nullptr;
ASSERT(!collection->insertDocument(&_opCtx, InsertStatement(x), nullOpDebug, true).isOK());
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 95a287182dd..26a868c477b 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -159,6 +159,9 @@ public:
static const char* ns() {
return "unittests.QueryStageAnd";
}
+ NamespaceString nss() {
+ return NamespaceString(ns());
+ }
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
@@ -184,7 +187,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -270,7 +273,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -351,7 +354,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -395,7 +398,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -442,7 +445,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -488,7 +491,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -541,7 +544,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -592,7 +595,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -648,7 +651,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -696,7 +699,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -748,7 +751,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -797,7 +800,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -934,7 +937,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -1042,7 +1045,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -1096,7 +1099,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -1135,7 +1138,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -1178,7 +1181,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -1239,7 +1242,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -1291,7 +1294,7 @@ public:
Collection* coll = ctx.getCollection();
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 8b18df31ef6..6b617565423 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -105,7 +105,7 @@ public:
}
WriteUnitOfWork wuow(&_opCtx);
- database->dropCollection(&_opCtx, nss.ns()).transitional_ignore();
+ database->dropCollection(&_opCtx, nss).transitional_ignore();
wuow.commit();
}
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 31672f2f6af..80770a69e70 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -66,8 +66,8 @@ public:
virtual void setup() {
WriteUnitOfWork wunit(&_opCtx);
- _ctx.db()->dropCollection(&_opCtx, ns()).transitional_ignore();
- _coll = _ctx.db()->createCollection(&_opCtx, ns());
+ _ctx.db()->dropCollection(&_opCtx, nss()).transitional_ignore();
+ _coll = _ctx.db()->createCollection(&_opCtx, nss());
_coll->getIndexCatalog()
->createIndexOnEmptyCollection(&_opCtx,
@@ -230,6 +230,10 @@ public:
return "unittest.QueryStageCount";
}
+ static NamespaceString nss() {
+ return NamespaceString(ns());
+ }
+
protected:
vector<RecordId> _recordIds;
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
diff --git a/src/mongo/dbtests/query_stage_count_scan.cpp b/src/mongo/dbtests/query_stage_count_scan.cpp
index 5b7babe4a5e..9b6a4f94383 100644
--- a/src/mongo/dbtests/query_stage_count_scan.cpp
+++ b/src/mongo/dbtests/query_stage_count_scan.cpp
@@ -92,7 +92,7 @@ public:
}
const IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
- Collection* collection = db->getCollection(&_opCtx, ns());
+ Collection* collection = db->getCollection(&_opCtx, NamespaceString(ns()));
std::vector<const IndexDescriptor*> indexes;
collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
return indexes.empty() ? nullptr : indexes[0];
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index bd4bac3a803..4efe707f0cd 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -80,6 +80,9 @@ public:
static const char* ns() {
return "unittests.QueryStageFetch";
}
+ static NamespaceString nss() {
+ return NamespaceString(ns());
+ }
protected:
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
@@ -96,10 +99,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -158,13 +161,13 @@ public:
class FetchStageFilter : public QueryStageFetchBase {
public:
void run() {
- Lock::DBLock lk(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X);
+ Lock::DBLock lk(&_opCtx, nss().db(), MODE_X);
OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index d52bf548a92..d5112419175 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -54,8 +54,8 @@ public:
virtual void setup() {
WriteUnitOfWork wunit(&_opCtx);
- _ctx.db()->dropCollection(&_opCtx, ns()).transitional_ignore();
- _coll = _ctx.db()->createCollection(&_opCtx, ns());
+ _ctx.db()->dropCollection(&_opCtx, nss()).transitional_ignore();
+ _coll = _ctx.db()->createCollection(&_opCtx, nss());
ASSERT_OK(_coll->getIndexCatalog()->createIndexOnEmptyCollection(
&_opCtx,
@@ -140,6 +140,9 @@ public:
static const char* ns() {
return "unittest.QueryStageIxscan";
}
+ static NamespaceString nss() {
+ return NamespaceString(ns());
+ }
protected:
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 525bfb4059e..05678ce2c64 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -125,6 +125,10 @@ public:
return "unittests.QueryStageMergeSort";
}
+ static NamespaceString nss() {
+ return NamespaceString(ns());
+ }
+
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
@@ -140,10 +144,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -205,10 +209,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -269,10 +273,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -334,10 +338,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -403,10 +407,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -466,10 +470,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -518,10 +522,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -635,10 +639,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -729,10 +733,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -796,10 +800,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index de6c5a1a8cb..1c0c9c4de5f 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -219,6 +219,9 @@ public:
static const char* ns() {
return "unittests.QueryStageSort";
}
+ static NamespaceString nss() {
+ return NamespaceString(ns());
+ }
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
@@ -237,10 +240,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -259,10 +262,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -290,10 +293,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -315,10 +318,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -424,10 +427,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
@@ -522,10 +525,10 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wuow.commit();
}
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 880d94913db..c981c80a888 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -67,11 +67,11 @@ public:
{
WriteUnitOfWork wunit(&_opCtx);
_database = _context.db();
- _collection = _database->getCollection(&_opCtx, ns());
+ _collection = _database->getCollection(&_opCtx, nss());
if (_collection) {
- _database->dropCollection(&_opCtx, ns()).transitional_ignore();
+ _database->dropCollection(&_opCtx, nss()).transitional_ignore();
}
- _collection = _database->createCollection(&_opCtx, ns());
+ _collection = _database->createCollection(&_opCtx, nss());
wunit.commit();
}
@@ -81,7 +81,7 @@ public:
~Base() {
try {
WriteUnitOfWork wunit(&_opCtx);
- uassertStatusOK(_database->dropCollection(&_opCtx, ns()));
+ uassertStatusOK(_database->dropCollection(&_opCtx, nss()));
wunit.commit();
} catch (...) {
FAIL("Exception while cleaning up collection");
@@ -92,6 +92,9 @@ protected:
static const char* ns() {
return "unittests.querytests";
}
+ static NamespaceString nss() {
+ return NamespaceString(ns());
+ }
void addIndex(const IndexSpec& spec) {
BSONObjBuilder builder(spec.toBSON());
@@ -216,11 +219,11 @@ public:
{
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
- if (db->getCollection(&_opCtx, ns())) {
+ if (db->getCollection(&_opCtx, nss())) {
_collection = NULL;
- db->dropCollection(&_opCtx, ns()).transitional_ignore();
+ db->dropCollection(&_opCtx, nss()).transitional_ignore();
}
- _collection = db->createCollection(&_opCtx, ns(), CollectionOptions(), false);
+ _collection = db->createCollection(&_opCtx, nss(), CollectionOptions(), false);
wunit.commit();
}
ASSERT(_collection);
@@ -1286,6 +1289,9 @@ public:
const char* ns() {
return _ns.c_str();
}
+ NamespaceString nss() {
+ return NamespaceString(ns());
+ }
private:
string _ns;
@@ -1694,7 +1700,7 @@ public:
Lock::GlobalWrite lk(&_opCtx);
OldClientContext context(&_opCtx, ns());
WriteUnitOfWork wunit(&_opCtx);
- context.db()->createCollection(&_opCtx, ns(), coll_opts, false);
+ context.db()->createCollection(&_opCtx, nss(), coll_opts, false);
wunit.commit();
}
insert(ns(), BSON("a" << 1));
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index fd713ae161a..f008ab9938d 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -139,9 +139,9 @@ public:
dbtests::WriteContextForTests ctx(&_opCtx, ns());
WriteUnitOfWork wuow(&_opCtx);
- Collection* c = ctx.db()->getCollection(&_opCtx, ns());
+ Collection* c = ctx.db()->getCollection(&_opCtx, nss());
if (!c) {
- c = ctx.db()->createCollection(&_opCtx, ns());
+ c = ctx.db()->createCollection(&_opCtx, nss());
}
ASSERT(c->getIndexCatalog()->haveIdIndex(&_opCtx));
@@ -174,6 +174,9 @@ protected:
static const char* ns() {
return "unittests.repltests";
}
+ static NamespaceString nss() {
+ return NamespaceString(ns());
+ }
static const char* cllNS() {
return "local.oplog.rs";
}
@@ -204,10 +207,10 @@ protected:
Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wunit(&_opCtx);
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
wunit.commit();
}
@@ -255,12 +258,13 @@ protected:
void printAll(const char* ns) {
Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, ns);
+ NamespaceString nss(ns);
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns);
+ Collection* coll = db->getCollection(&_opCtx, nss);
if (!coll) {
WriteUnitOfWork wunit(&_opCtx);
- coll = db->createCollection(&_opCtx, ns);
+ coll = db->createCollection(&_opCtx, nss);
wunit.commit();
}
@@ -273,13 +277,14 @@ protected:
// These deletes don't get logged.
void deleteAll(const char* ns) const {
::mongo::writeConflictRetry(&_opCtx, "deleteAll", ns, [&] {
+ NamespaceString nss(ns);
Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, ns);
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns);
+ Collection* coll = db->getCollection(&_opCtx, nss);
if (!coll) {
- coll = db->createCollection(&_opCtx, ns);
+ coll = db->createCollection(&_opCtx, nss);
}
ASSERT_OK(coll->truncate(&_opCtx));
@@ -291,9 +296,9 @@ protected:
OldClientContext ctx(&_opCtx, ns());
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
- Collection* coll = db->getCollection(&_opCtx, ns());
+ Collection* coll = db->getCollection(&_opCtx, nss());
if (!coll) {
- coll = db->createCollection(&_opCtx, ns());
+ coll = db->createCollection(&_opCtx, nss());
}
OpDebug* const nullOpDebug = nullptr;
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index aa90d235361..c91b941632d 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -225,7 +225,7 @@ public:
{
WriteUnitOfWork uow(&opCtx);
ASSERT(collectionExists(&opCtx, &ctx, ns));
- ASSERT_OK(ctx.db()->dropCollection(&opCtx, ns));
+ ASSERT_OK(ctx.db()->dropCollection(&opCtx, NamespaceString(ns)));
ASSERT(!collectionExists(&opCtx, &ctx, ns));
if (!rollback) {
uow.commit();
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index d0c1139c437..d916473f0e4 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -237,7 +237,7 @@ public:
AutoGetOrCreateDb dbRaii(_opCtx, nss.db(), LockMode::MODE_X);
WriteUnitOfWork wunit(_opCtx);
- invariant(dbRaii.getDb()->createCollection(_opCtx, nss.ns()));
+ invariant(dbRaii.getDb()->createCollection(_opCtx, nss));
wunit.commit();
});
}
@@ -310,7 +310,7 @@ public:
NamespaceString ns,
const Timestamp& ts) {
OneOffRead oor(_opCtx, ts);
- return kvCatalog->getMetaData(_opCtx, ns.ns());
+ return kvCatalog->getMetaData(_opCtx, ns);
}
StatusWith<BSONObj> doAtomicApplyOps(const std::string& dbName,
@@ -468,7 +468,7 @@ public:
// getAllIdents() actually looks in the RecordStore for a list of all idents, and is thus
// versioned by timestamp. We can expect a namespace to have a consistent ident across
// timestamps, provided the collection does not get renamed.
- auto expectedIdent = kvCatalog->getCollectionIdent(nss.ns());
+ auto expectedIdent = kvCatalog->getCollectionIdent(nss);
auto idents = kvCatalog->getAllIdents(_opCtx);
auto found = std::find(idents.begin(), idents.end(), expectedIdent);
@@ -1795,7 +1795,7 @@ public:
// Drop/rename `kvDropDatabase`. `system.profile` does not get dropped/renamed.
WriteUnitOfWork wuow(_opCtx);
Database* db = coll.getDb();
- ASSERT_OK(db->dropCollection(_opCtx, nss.ns()));
+ ASSERT_OK(db->dropCollection(_opCtx, nss));
wuow.commit();
}
@@ -2684,7 +2684,7 @@ public:
auto kvStorageEngine =
dynamic_cast<KVStorageEngine*>(_opCtx->getServiceContext()->getStorageEngine());
KVCatalog* kvCatalog = kvStorageEngine->getCatalog();
- auto indexIdent = kvCatalog->getIndexIdent(_opCtx, nss.ns(), "user_1_db_1");
+ auto indexIdent = kvCatalog->getIndexIdent(_opCtx, nss, "user_1_db_1");
assertIdentsMissingAtTimestamp(kvCatalog, "", indexIdent, pastTs);
assertIdentsMissingAtTimestamp(kvCatalog, "", indexIdent, presentTs);
assertIdentsMissingAtTimestamp(kvCatalog, "", indexIdent, futureTs);
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 8ebebbb676a..d49fac99aae 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -157,8 +157,8 @@ public:
{
OpDebug* const nullOpDebug = nullptr;
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1)), nullOpDebug, true));
@@ -218,8 +218,8 @@ public:
{
OpDebug* const nullOpDebug = nullptr;
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
id1 = coll->getCursor(&_opCtx)->next()->id;
@@ -292,8 +292,8 @@ public:
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
id1 = coll->getCursor(&_opCtx)->next()->id;
@@ -358,8 +358,8 @@ public:
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1)), nullOpDebug, true));
@@ -446,8 +446,8 @@ public:
auto doc3 = BSON("_id" << 3 << "a" << BSON_ARRAY(BSON("c" << 1)));
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(doc1), nullOpDebug, true));
@@ -526,8 +526,8 @@ public:
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
@@ -594,8 +594,8 @@ public:
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
@@ -668,8 +668,8 @@ public:
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(
coll->insertDocument(&_opCtx,
InsertStatement(BSON("_id" << 1 << "x" << 1 << "a" << 2)),
@@ -736,8 +736,8 @@ public:
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(
coll->insertDocument(&_opCtx,
@@ -827,8 +827,8 @@ public:
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
@@ -903,8 +903,8 @@ public:
RecordId id1;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
ASSERT_OK(coll->insertDocument(
&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
@@ -958,8 +958,8 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
wunit.commit();
}
@@ -1069,8 +1069,8 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(_db->dropCollection(&_opCtx, _ns));
- coll = _db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
+ coll = _db->createCollection(&_opCtx, _nss);
wunit.commit();
}