summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Gottlieb <daniel.gottlieb@mongodb.com>2019-11-26 04:48:50 +0000
committerevergreen <evergreen@mongodb.com>2019-11-26 04:48:50 +0000
commitd471957fc37ef6cafe9ffeda3e231cdc871c3ce3 (patch)
tree3435c9d9420243e350da0f3dfbeecb81afbcd264
parent8b0f534a706005d366e200ee56af5c76217656b2 (diff)
downloadmongo-d471957fc37ef6cafe9ffeda3e231cdc871c3ce3.tar.gz
SERVER-43859: Take MODE_IX locks for collection creation.
Two concurrent storage transactions can now create collections with the same collection name. These transactions will conflict at commit time; the first committer will win and register their collection into the global catalog. The losing transactions will bubble a WriteConflictException. Top-level callers that should fail if the collection already existed must now check and fail with a NamespaceExists error code. Previously, those callers could rely on lower level code returning the NamespaceExists error. Callers that were implicitly creating a collection may retry the operation, using the now-registered collection. These transaction-local collections (UncommittedCollections) are returned when doing any CollectionCatalog::lookup* call.
-rw-r--r--src/mongo/db/SConscript4
-rw-r--r--src/mongo/db/catalog/SConscript19
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp10
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp5
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp50
-rw-r--r--src/mongo/db/catalog/collection_catalog.h16
-rw-r--r--src/mongo/db/catalog/collection_catalog_helper.cpp6
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp106
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp2
-rw-r--r--src/mongo/db/catalog/create_collection.cpp22
-rw-r--r--src/mongo/db/catalog/create_collection_test.cpp8
-rw-r--r--src/mongo/db/catalog/database_impl.cpp73
-rw-r--r--src/mongo/db/catalog/database_test.cpp65
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp6
-rw-r--r--src/mongo/db/catalog/drop_database.cpp2
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp4
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp4
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp5
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp29
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp23
-rw-r--r--src/mongo/db/catalog/uncommitted_collections.cpp140
-rw-r--r--src/mongo/db/catalog/uncommitted_collections.h74
-rw-r--r--src/mongo/db/catalog/validate_state.cpp6
-rw-r--r--src/mongo/db/catalog_raii.cpp4
-rw-r--r--src/mongo/db/cloner.cpp15
-rw-r--r--src/mongo/db/commands/count_cmd.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp12
-rw-r--r--src/mongo/db/commands/dbcheck.cpp3
-rw-r--r--src/mongo/db/commands/dbhash.cpp3
-rw-r--r--src/mongo/db/commands/distinct.cpp2
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp8
-rw-r--r--src/mongo/db/commands/find_cmd.cpp2
-rw-r--r--src/mongo/db/commands/list_collections.cpp2
-rw-r--r--src/mongo/db/commands/list_indexes.cpp2
-rw-r--r--src/mongo/db/commands/mr.cpp16
-rw-r--r--src/mongo/db/commands/mr_test.cpp5
-rw-r--r--src/mongo/db/commands/oplog_application_checks.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp5
-rw-r--r--src/mongo/db/commands/test_commands.cpp3
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp4
-rw-r--r--src/mongo/db/db.cpp6
-rw-r--r--src/mongo/db/db_raii.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp7
-rw-r--r--src/mongo/db/exec/requires_collection_stage.cpp4
-rw-r--r--src/mongo/db/free_mon/free_mon_mongod.cpp2
-rw-r--r--src/mongo/db/index_build_entry_helpers.cpp2
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp33
-rw-r--r--src/mongo/db/introspect.cpp6
-rw-r--r--src/mongo/db/matcher/expression_text.cpp3
-rw-r--r--src/mongo/db/op_observer_impl.cpp2
-rw-r--r--src/mongo/db/ops/update.cpp5
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp5
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream_test.cpp59
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp2
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone.cpp3
-rw-r--r--src/mongo/db/query/collection_query_info.cpp15
-rw-r--r--src/mongo/db/query/plan_executor_impl.cpp2
-rw-r--r--src/mongo/db/query/query_request.cpp2
-rw-r--r--src/mongo/db/query/query_request_test.cpp4
-rw-r--r--src/mongo/db/repair_database.cpp4
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp15
-rw-r--r--src/mongo/db/repl/apply_ops.cpp5
-rw-r--r--src/mongo/db/repl/dbcheck.cpp7
-rw-r--r--src/mongo/db/repl/oplog.cpp14
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp4
-rw-r--r--src/mongo/db/repl/oplog_interface_local.cpp2
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp2
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp6
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp32
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp6
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp13
-rw-r--r--src/mongo/db/s/implicit_create_collection.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp4
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_rename_collection.cpp2
-rw-r--r--src/mongo/db/storage/SConscript3
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.cpp16
-rw-r--r--src/mongo/db/storage/kv/durable_catalog_feature_tracker_test.cpp6
-rw-r--r--src/mongo/db/storage/kv/durable_catalog_test.cpp6
-rw-r--r--src/mongo/db/storage/oplog_cap_maintainer_thread.cpp4
-rw-r--r--src/mongo/db/storage/recovery_unit.cpp16
-rw-r--r--src/mongo/db/storage/recovery_unit.h11
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp6
-rw-r--r--src/mongo/db/storage/storage_engine_test_fixture.h17
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp10
-rw-r--r--src/mongo/db/storage/write_unit_of_work.cpp2
-rw-r--r--src/mongo/db/storage/write_unit_of_work.h1
-rw-r--r--src/mongo/db/system_index.cpp4
-rw-r--r--src/mongo/db/transaction_participant.cpp5
-rw-r--r--src/mongo/db/transaction_participant_test.cpp4
-rw-r--r--src/mongo/db/ttl.cpp2
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp6
-rw-r--r--src/mongo/dbtests/SConscript1
-rw-r--r--src/mongo/dbtests/catalogtests.cpp129
-rw-r--r--src/mongo/dbtests/counttests.cpp3
-rw-r--r--src/mongo/dbtests/dbtests.cpp3
-rw-r--r--src/mongo/dbtests/dbtests.h2
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp265
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp5
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp14
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_count_scan.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp30
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp18
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp3
-rw-r--r--src/mongo/dbtests/querytests.cpp5
-rw-r--r--src/mongo/dbtests/repltests.cpp11
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp74
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp5
112 files changed, 1116 insertions, 644 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index b35ec2bd3a8..6d5be85cbcc 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -510,8 +510,8 @@ env.Library(
'stats/top',
],
LIBDEPS_PRIVATE=[
- "catalog/database_holder",
- "$BUILD_DIR/mongo/idl/server_parameter",
+ '$BUILD_DIR/mongo/idl/server_parameter',
+ 'catalog/database_holder',
],
)
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index 642a0ffa115..c122e03a271 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -225,10 +225,6 @@ env.Library(
env.Idlc('multi_index_block.idl')[0],
],
LIBDEPS=[
- 'collection',
- 'index_build_block',
- 'index_timestamp_helper',
- 'index_build_block',
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/audit',
'$BUILD_DIR/mongo/db/background',
@@ -239,11 +235,15 @@ env.Library(
'$BUILD_DIR/mongo/db/storage/write_unit_of_work',
'$BUILD_DIR/mongo/util/fail_point',
'$BUILD_DIR/mongo/util/progress_meter',
+ 'collection',
+ 'index_build_block',
+ 'index_timestamp_helper',
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/index/index_build_interceptor',
'$BUILD_DIR/mongo/db/storage/storage_options',
'$BUILD_DIR/mongo/idl/server_parameter',
+ 'collection_catalog',
]
)
@@ -251,14 +251,16 @@ env.Library(
target='collection_catalog',
source=[
'collection_catalog.cpp',
+ 'uncommitted_collections.cpp',
],
LIBDEPS=[
- 'collection',
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/namespace_string',
'$BUILD_DIR/mongo/db/service_context',
+ 'collection',
],
LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/concurrency/write_conflict_exception',
'$BUILD_DIR/mongo/idl/server_parameter',
]
)
@@ -343,9 +345,7 @@ env.Library(
'$BUILD_DIR/mongo/db/views/views_mongod',
],
LIBDEPS_PRIVATE=[
- 'index_build_block',
- 'throttle_cursor',
- 'validate_state',
+ '$BUILD_DIR/mongo/db/catalog/collection_catalog',
'$BUILD_DIR/mongo/db/catalog/collection_catalog_helper',
'$BUILD_DIR/mongo/db/commands/server_status_core',
'$BUILD_DIR/mongo/db/index/index_build_interceptor',
@@ -354,6 +354,9 @@ env.Library(
'$BUILD_DIR/mongo/db/repl/repl_settings',
'$BUILD_DIR/mongo/db/storage/storage_engine_common',
'$BUILD_DIR/mongo/db/transaction',
+ 'index_build_block',
+ 'throttle_cursor',
+ 'validate_state',
],
)
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 05692e11536..6aa842def14 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -72,7 +72,7 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
uassert(ErrorCodes::NamespaceNotFound, "no such database", db);
Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(collectionName);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collectionName);
uassert(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "emptycapped not supported on view: " << collectionName.ns(),
collection || !ViewCatalog::get(db)->lookup(opCtx, collectionName.ns()));
@@ -118,7 +118,8 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
NamespaceString fromNss(db->name(), shortFrom);
NamespaceString toNss(db->name(), shortTo);
- Collection* fromCollection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(fromNss);
+ Collection* fromCollection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, fromNss);
if (!fromCollection) {
uassert(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "cloneCollectionAsCapped not supported for views: " << fromNss,
@@ -136,7 +137,7 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
uassert(ErrorCodes::NamespaceExists,
str::stream() << "cloneCollectionAsCapped failed - destination collection " << toNss
<< " already exists. source collection: " << fromNss,
- !CollectionCatalog::get(opCtx).lookupCollectionByNamespace(toNss));
+ !CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, toNss));
// create new collection
{
@@ -156,7 +157,8 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
uassertStatusOK(createCollection(opCtx, toNss.db().toString(), cmd.done()));
}
- Collection* toCollection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(toNss);
+ Collection* toCollection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, toNss);
invariant(toCollection); // we created above
// how much data to ignore because it won't fit anyway
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index bbcea9f741c..351d709b5d0 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -145,7 +145,7 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
for (const auto& entry : nsToIndexNameObjMap) {
NamespaceString collNss(entry.first);
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(collNss);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collNss);
invariant(collection, str::stream() << "couldn't get collection " << collNss.toString());
for (const auto& indexName : entry.second.first) {
@@ -175,7 +175,8 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
for (auto&& collNss :
CollectionCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, dbName)) {
// Note that the collection name already includes the database component.
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(collNss);
+ auto collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collNss);
invariant(collection,
str::stream()
<< "failed to get valid collection pointer for namespace " << collNss);
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 38c0a9b4aa3..c9018e46350 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -267,7 +267,7 @@ Status _collModInternal(OperationContext* opCtx,
AutoGetDb autoDb(opCtx, dbName, MODE_X);
Database* const db = autoDb.getDb();
Collection* coll =
- db ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss) : nullptr;
+ db ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss) : nullptr;
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangAfterDatabaseLock, opCtx, "hangAfterDatabaseLock", []() {}, false, nss);
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index ce1ff844149..1f7a18bc986 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -33,7 +33,9 @@
#include "collection_catalog.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
#include "mongo/db/concurrency/lock_manager_defs.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/storage/recovery_unit.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -56,7 +58,7 @@ public:
}
void rollback() override {
- _catalog->registerCollection(_uuid, std::move(_coll));
+ _catalog->registerCollection(_uuid, std::move(&_coll));
}
private:
@@ -233,7 +235,12 @@ void CollectionCatalog::onOpenCatalog(OperationContext* opCtx) {
_shadowCatalog.reset();
}
-Collection* CollectionCatalog::lookupCollectionByUUID(CollectionUUID uuid) const {
+Collection* CollectionCatalog::lookupCollectionByUUID(OperationContext* opCtx,
+ CollectionUUID uuid) const {
+ if (auto coll = UncommittedCollections::getForTxn(opCtx, uuid)) {
+ return coll;
+ }
+
stdx::lock_guard<Latch> lock(_catalogLock);
return _lookupCollectionByUUID(lock, uuid);
}
@@ -243,13 +250,23 @@ Collection* CollectionCatalog::_lookupCollectionByUUID(WithLock, CollectionUUID
return foundIt == _catalog.end() ? nullptr : foundIt->second.get();
}
-Collection* CollectionCatalog::lookupCollectionByNamespace(const NamespaceString& nss) const {
+Collection* CollectionCatalog::lookupCollectionByNamespace(OperationContext* opCtx,
+ const NamespaceString& nss) const {
+ if (auto coll = UncommittedCollections::getForTxn(opCtx, nss)) {
+ return coll;
+ }
+
stdx::lock_guard<Latch> lock(_catalogLock);
auto it = _collections.find(nss);
return it == _collections.end() ? nullptr : it->second;
}
-boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(CollectionUUID uuid) const {
+boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(OperationContext* opCtx,
+ CollectionUUID uuid) const {
+ if (auto coll = UncommittedCollections::getForTxn(opCtx, uuid)) {
+ return coll->ns();
+ }
+
stdx::lock_guard<Latch> lock(_catalogLock);
auto foundIt = _catalog.find(uuid);
if (foundIt != _catalog.end()) {
@@ -270,7 +287,11 @@ boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(CollectionUU
}
boost::optional<CollectionUUID> CollectionCatalog::lookupUUIDByNSS(
- const NamespaceString& nss) const {
+ OperationContext* opCtx, const NamespaceString& nss) const {
+ if (auto coll = UncommittedCollections::getForTxn(opCtx, nss)) {
+ return coll->uuid();
+ }
+
stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
auto it = _orderedCollections.lower_bound(std::make_pair(nss.db().toString(), minUuid));
@@ -286,7 +307,8 @@ boost::optional<CollectionUUID> CollectionCatalog::lookupUUIDByNSS(
return boost::none;
}
-NamespaceString CollectionCatalog::resolveNamespaceStringOrUUID(NamespaceStringOrUUID nsOrUUID) {
+NamespaceString CollectionCatalog::resolveNamespaceStringOrUUID(OperationContext* opCtx,
+ NamespaceStringOrUUID nsOrUUID) {
if (auto& nss = nsOrUUID.nss()) {
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Namespace " << *nss << " is not a valid collection name",
@@ -294,7 +316,7 @@ NamespaceString CollectionCatalog::resolveNamespaceStringOrUUID(NamespaceStringO
return std::move(*nss);
}
- auto resolvedNss = lookupNSSByUUID(*nsOrUUID.uuid());
+ auto resolvedNss = lookupNSSByUUID(opCtx, *nsOrUUID.uuid());
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Unable to resolve " << nsOrUUID.toString(),
@@ -365,21 +387,25 @@ std::vector<std::string> CollectionCatalog::getAllDbNames() const {
return ret;
}
-void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<Collection> coll) {
+void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<Collection>* coll) {
+ auto ns = (*coll)->ns();
stdx::lock_guard<Latch> lock(_catalogLock);
+ if (_collections.find(ns) != _collections.end()) {
+ log() << "Conflicted creating a collection. ns: " << (*coll)->ns() << " ("
+ << (*coll)->uuid() << ").";
+ throw WriteConflictException();
+ }
- LOG(1) << "Registering collection " << coll->ns() << " with UUID " << uuid;
+ LOG(1) << "Registering collection " << ns << " with UUID " << uuid;
- auto ns = coll->ns();
auto dbName = ns.db().toString();
auto dbIdPair = std::make_pair(dbName, uuid);
// Make sure no entry related to this uuid.
invariant(_catalog.find(uuid) == _catalog.end());
- invariant(_collections.find(ns) == _collections.end());
invariant(_orderedCollections.find(dbIdPair) == _orderedCollections.end());
- _catalog[uuid] = std::move(coll);
+ _catalog[uuid] = std::move(*coll);
_collections[ns] = _catalog[uuid].get();
_orderedCollections[dbIdPair] = _catalog[uuid].get();
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index c27022c26d9..224d4e11293 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -116,7 +116,7 @@ public:
/**
* Register the collection with `uuid`.
*/
- void registerCollection(CollectionUUID uuid, std::unique_ptr<Collection> collection);
+ void registerCollection(CollectionUUID uuid, std::unique_ptr<Collection>* collection);
/**
* Deregister the collection.
@@ -141,7 +141,7 @@ public:
*
* Returns nullptr if the 'uuid' is not known.
*/
- Collection* lookupCollectionByUUID(CollectionUUID uuid) const;
+ Collection* lookupCollectionByUUID(OperationContext* opCtx, CollectionUUID uuid) const;
/**
* This function gets the Collection pointer that corresponds to the NamespaceString.
@@ -150,27 +150,31 @@ public:
*
* Returns nullptr if the namespace is unknown.
*/
- Collection* lookupCollectionByNamespace(const NamespaceString& nss) const;
+ Collection* lookupCollectionByNamespace(OperationContext* opCtx,
+ const NamespaceString& nss) const;
/**
* This function gets the NamespaceString from the collection catalog entry that
* corresponds to CollectionUUID uuid. If no collection exists with the uuid, return
* boost::none. See onCloseCatalog/onOpenCatalog for more info.
*/
- boost::optional<NamespaceString> lookupNSSByUUID(CollectionUUID uuid) const;
+ boost::optional<NamespaceString> lookupNSSByUUID(OperationContext* opCtx,
+ CollectionUUID uuid) const;
/**
* Returns the UUID if `nss` exists in CollectionCatalog. The time complexity of
* this function is linear to the number of collections in `nss.db()`.
*/
- boost::optional<CollectionUUID> lookupUUIDByNSS(const NamespaceString& nss) const;
+ boost::optional<CollectionUUID> lookupUUIDByNSS(OperationContext* opCtx,
+ const NamespaceString& nss) const;
/**
* Without acquiring any locks resolves the given NamespaceStringOrUUID to an actual namespace.
* Throws NamespaceNotFound if the collection UUID cannot be resolved to a name, or if the UUID
* can be resolved, but the resulting collection is in the wrong database.
*/
- NamespaceString resolveNamespaceStringOrUUID(NamespaceStringOrUUID nsOrUUID);
+ NamespaceString resolveNamespaceStringOrUUID(OperationContext* opCtx,
+ NamespaceStringOrUUID nsOrUUID);
/**
* Returns whether the collection with 'uuid' satisfies the provided 'predicate'. If the
diff --git a/src/mongo/db/catalog/collection_catalog_helper.cpp b/src/mongo/db/catalog/collection_catalog_helper.cpp
index 1085d7edb51..d867ed33ea1 100644
--- a/src/mongo/db/catalog/collection_catalog_helper.cpp
+++ b/src/mongo/db/catalog/collection_catalog_helper.cpp
@@ -54,14 +54,14 @@ void forEachCollectionFromDb(OperationContext* opCtx,
boost::optional<Lock::CollectionLock> clk;
Collection* collection = nullptr;
- while (auto nss = catalog.lookupNSSByUUID(uuid)) {
+ while (auto nss = catalog.lookupNSSByUUID(opCtx, uuid)) {
// Get a fresh snapshot for each locked collection to see any catalog changes.
clk.emplace(opCtx, *nss, collLockMode);
opCtx->recoveryUnit()->abandonSnapshot();
- if (catalog.lookupNSSByUUID(uuid) == nss) {
+ if (catalog.lookupNSSByUUID(opCtx, uuid) == nss) {
// Success: locked the namespace and the UUID still maps to it.
- collection = catalog.lookupCollectionByUUID(uuid);
+ collection = catalog.lookupCollectionByUUID(opCtx, uuid);
invariant(collection);
break;
}
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index e31e2c9af3e..44409c32807 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/catalog/collection_mock.h"
#include "mongo/db/concurrency/lock_manager_defs.h"
#include "mongo/db/operation_context_noop.h"
+#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/storage/durable_catalog.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
@@ -45,7 +46,7 @@ using namespace mongo;
/**
* A test fixture that creates a CollectionCatalog and Collection* pointer to store in it.
*/
-class CollectionCatalogTest : public unittest::Test {
+class CollectionCatalogTest : public ServiceContextMongoDTest {
public:
CollectionCatalogTest()
: nss("testdb", "testcol"),
@@ -62,17 +63,18 @@ public:
ASSERT_GT(colUUID, prevUUID);
ASSERT_GT(nextUUID, colUUID);
- auto collection = std::make_unique<CollectionMock>(nss);
+ ServiceContextMongoDTest::setUp();
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
col = collection.get();
// Register dummy collection in catalog.
- catalog.registerCollection(colUUID, std::move(collection));
+ catalog.registerCollection(colUUID, &collection);
}
protected:
CollectionCatalog catalog;
OperationContextNoop opCtx;
NamespaceString nss;
- CollectionMock* col;
+ Collection* col;
CollectionUUID colUUID;
CollectionUUID nextUUID;
CollectionUUID prevUUID;
@@ -86,15 +88,15 @@ public:
NamespaceString barNss("bar", "coll" + std::to_string(counter));
auto fooUuid = CollectionUUID::gen();
- auto fooColl = std::make_unique<CollectionMock>(fooNss);
+ std::unique_ptr<Collection> fooColl = std::make_unique<CollectionMock>(fooNss);
auto barUuid = CollectionUUID::gen();
- auto barColl = std::make_unique<CollectionMock>(barNss);
+ std::unique_ptr<Collection> barColl = std::make_unique<CollectionMock>(barNss);
dbMap["foo"].insert(std::make_pair(fooUuid, fooColl.get()));
dbMap["bar"].insert(std::make_pair(barUuid, barColl.get()));
- catalog.registerCollection(fooUuid, std::move(fooColl));
- catalog.registerCollection(barUuid, std::move(barColl));
+ catalog.registerCollection(fooUuid, &fooColl);
+ catalog.registerCollection(barUuid, &barColl);
}
}
@@ -106,13 +108,13 @@ public:
}
}
- std::map<CollectionUUID, CollectionMock*>::iterator collsIterator(std::string dbName) {
+ std::map<CollectionUUID, Collection*>::iterator collsIterator(std::string dbName) {
auto it = dbMap.find(dbName);
ASSERT(it != dbMap.end());
return it->second.begin();
}
- std::map<CollectionUUID, CollectionMock*>::iterator collsIteratorEnd(std::string dbName) {
+ std::map<CollectionUUID, Collection*>::iterator collsIteratorEnd(std::string dbName) {
auto it = dbMap.find(dbName);
ASSERT(it != dbMap.end());
return it->second.end();
@@ -142,7 +144,7 @@ public:
protected:
CollectionCatalog catalog;
OperationContextNoop opCtx;
- std::map<std::string, std::map<CollectionUUID, CollectionMock*>> dbMap;
+ std::map<std::string, std::map<CollectionUUID, Collection*>> dbMap;
};
class CollectionCatalogResourceMapTest : public unittest::Test {
@@ -268,10 +270,10 @@ public:
void setUp() {
for (int i = 0; i < 5; i++) {
NamespaceString nss("resourceDb", "coll" + std::to_string(i));
- auto coll = std::make_unique<CollectionMock>(nss);
- auto uuid = coll->uuid();
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ auto uuid = collection->uuid();
- catalog.registerCollection(uuid, std::move(coll));
+ catalog.registerCollection(uuid, &collection);
}
int numEntries = 0;
@@ -357,7 +359,7 @@ TEST_F(CollectionCatalogResourceTest, LookupMissingCollectionResource) {
TEST_F(CollectionCatalogResourceTest, RemoveCollection) {
const std::string collNs = "resourceDb.coll1";
- auto coll = catalog.lookupCollectionByNamespace(NamespaceString(collNs));
+ auto coll = catalog.lookupCollectionByNamespace(&opCtx, NamespaceString(collNs));
catalog.deregisterCollection(coll->uuid());
auto rid = ResourceId(RESOURCE_COLLECTION, collNs);
ASSERT(!catalog.lookupResourceName(rid));
@@ -471,104 +473,104 @@ TEST_F(CollectionCatalogIterationTest, GetUUIDWontRepositionEvenIfEntryIsDropped
}
TEST_F(CollectionCatalogTest, OnCreateCollection) {
- ASSERT(catalog.lookupCollectionByUUID(colUUID) == col);
+ ASSERT(catalog.lookupCollectionByUUID(&opCtx, colUUID) == col);
}
TEST_F(CollectionCatalogTest, LookupCollectionByUUID) {
// Ensure the string value of the NamespaceString of the obtained Collection is equal to
// nss.ns().
- ASSERT_EQUALS(catalog.lookupCollectionByUUID(colUUID)->ns().ns(), nss.ns());
+ ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, colUUID)->ns().ns(), nss.ns());
// Ensure lookups of unknown UUIDs result in null pointers.
- ASSERT(catalog.lookupCollectionByUUID(CollectionUUID::gen()) == nullptr);
+ ASSERT(catalog.lookupCollectionByUUID(&opCtx, CollectionUUID::gen()) == nullptr);
}
TEST_F(CollectionCatalogTest, LookupNSSByUUID) {
// Ensure the string value of the obtained NamespaceString is equal to nss.ns().
- ASSERT_EQUALS(catalog.lookupNSSByUUID(colUUID)->ns(), nss.ns());
+ ASSERT_EQUALS(catalog.lookupNSSByUUID(&opCtx, colUUID)->ns(), nss.ns());
// Ensure namespace lookups of unknown UUIDs result in empty NamespaceStrings.
- ASSERT_EQUALS(catalog.lookupNSSByUUID(CollectionUUID::gen()), boost::none);
+ ASSERT_EQUALS(catalog.lookupNSSByUUID(&opCtx, CollectionUUID::gen()), boost::none);
}
TEST_F(CollectionCatalogTest, InsertAfterLookup) {
auto newUUID = CollectionUUID::gen();
NamespaceString newNss(nss.db(), "newcol");
- auto newCollUnique = std::make_unique<CollectionMock>(newNss);
+ std::unique_ptr<Collection> newCollUnique = std::make_unique<CollectionMock>(newNss);
auto newCol = newCollUnique.get();
// Ensure that looking up non-existing UUIDs doesn't affect later registration of those UUIDs.
- ASSERT(catalog.lookupCollectionByUUID(newUUID) == nullptr);
- ASSERT_EQUALS(catalog.lookupNSSByUUID(newUUID), boost::none);
- catalog.registerCollection(newUUID, std::move(newCollUnique));
- ASSERT_EQUALS(catalog.lookupCollectionByUUID(newUUID), newCol);
- ASSERT_EQUALS(*catalog.lookupNSSByUUID(colUUID), nss);
+ ASSERT(catalog.lookupCollectionByUUID(&opCtx, newUUID) == nullptr);
+ ASSERT_EQUALS(catalog.lookupNSSByUUID(&opCtx, newUUID), boost::none);
+ catalog.registerCollection(newUUID, &newCollUnique);
+ ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, newUUID), newCol);
+ ASSERT_EQUALS(*catalog.lookupNSSByUUID(&opCtx, colUUID), nss);
}
TEST_F(CollectionCatalogTest, OnDropCollection) {
catalog.deregisterCollection(colUUID);
// Ensure the lookup returns a null pointer upon removing the colUUID entry.
- ASSERT(catalog.lookupCollectionByUUID(colUUID) == nullptr);
+ ASSERT(catalog.lookupCollectionByUUID(&opCtx, colUUID) == nullptr);
}
TEST_F(CollectionCatalogTest, RenameCollection) {
auto uuid = CollectionUUID::gen();
NamespaceString oldNss(nss.db(), "oldcol");
- auto collUnique = std::make_unique<CollectionMock>(oldNss);
+ std::unique_ptr<Collection> collUnique = std::make_unique<CollectionMock>(oldNss);
auto collection = collUnique.get();
- catalog.registerCollection(uuid, std::move(collUnique));
- ASSERT_EQUALS(catalog.lookupCollectionByUUID(uuid), collection);
+ catalog.registerCollection(uuid, &collUnique);
+ ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, uuid), collection);
NamespaceString newNss(nss.db(), "newcol");
catalog.setCollectionNamespace(&opCtx, collection, oldNss, newNss);
ASSERT_EQ(collection->ns(), newNss);
- ASSERT_EQUALS(catalog.lookupCollectionByUUID(uuid), collection);
+ ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, uuid), collection);
}
TEST_F(CollectionCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsOldNSSIfDropped) {
catalog.onCloseCatalog(&opCtx);
catalog.deregisterCollection(colUUID);
- ASSERT(catalog.lookupCollectionByUUID(colUUID) == nullptr);
- ASSERT_EQUALS(*catalog.lookupNSSByUUID(colUUID), nss);
+ ASSERT(catalog.lookupCollectionByUUID(&opCtx, colUUID) == nullptr);
+ ASSERT_EQUALS(*catalog.lookupNSSByUUID(&opCtx, colUUID), nss);
catalog.onOpenCatalog(&opCtx);
- ASSERT_EQUALS(catalog.lookupNSSByUUID(colUUID), boost::none);
+ ASSERT_EQUALS(catalog.lookupNSSByUUID(&opCtx, colUUID), boost::none);
}
TEST_F(CollectionCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsNewlyCreatedNSS) {
auto newUUID = CollectionUUID::gen();
NamespaceString newNss(nss.db(), "newcol");
- auto newCollUnique = std::make_unique<CollectionMock>(newNss);
+ std::unique_ptr<Collection> newCollUnique = std::make_unique<CollectionMock>(newNss);
auto newCol = newCollUnique.get();
// Ensure that looking up non-existing UUIDs doesn't affect later registration of those UUIDs.
catalog.onCloseCatalog(&opCtx);
- ASSERT(catalog.lookupCollectionByUUID(newUUID) == nullptr);
- ASSERT_EQUALS(catalog.lookupNSSByUUID(newUUID), boost::none);
- catalog.registerCollection(newUUID, std::move(newCollUnique));
- ASSERT_EQUALS(catalog.lookupCollectionByUUID(newUUID), newCol);
- ASSERT_EQUALS(*catalog.lookupNSSByUUID(colUUID), nss);
+ ASSERT(catalog.lookupCollectionByUUID(&opCtx, newUUID) == nullptr);
+ ASSERT_EQUALS(catalog.lookupNSSByUUID(&opCtx, newUUID), boost::none);
+ catalog.registerCollection(newUUID, &newCollUnique);
+ ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, newUUID), newCol);
+ ASSERT_EQUALS(*catalog.lookupNSSByUUID(&opCtx, colUUID), nss);
// Ensure that collection still exists after opening the catalog again.
catalog.onOpenCatalog(&opCtx);
- ASSERT_EQUALS(catalog.lookupCollectionByUUID(newUUID), newCol);
- ASSERT_EQUALS(*catalog.lookupNSSByUUID(colUUID), nss);
+ ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, newUUID), newCol);
+ ASSERT_EQUALS(*catalog.lookupNSSByUUID(&opCtx, colUUID), nss);
}
TEST_F(CollectionCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsFreshestNSS) {
NamespaceString newNss(nss.db(), "newcol");
- auto newCollUnique = std::make_unique<CollectionMock>(newNss);
+ std::unique_ptr<Collection> newCollUnique = std::make_unique<CollectionMock>(newNss);
auto newCol = newCollUnique.get();
catalog.onCloseCatalog(&opCtx);
catalog.deregisterCollection(colUUID);
- ASSERT(catalog.lookupCollectionByUUID(colUUID) == nullptr);
- ASSERT_EQUALS(*catalog.lookupNSSByUUID(colUUID), nss);
- catalog.registerCollection(colUUID, std::move(newCollUnique));
- ASSERT_EQUALS(catalog.lookupCollectionByUUID(colUUID), newCol);
- ASSERT_EQUALS(*catalog.lookupNSSByUUID(colUUID), newNss);
+ ASSERT(catalog.lookupCollectionByUUID(&opCtx, colUUID) == nullptr);
+ ASSERT_EQUALS(*catalog.lookupNSSByUUID(&opCtx, colUUID), nss);
+ catalog.registerCollection(colUUID, &newCollUnique);
+ ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, colUUID), newCol);
+ ASSERT_EQUALS(*catalog.lookupNSSByUUID(&opCtx, colUUID), newNss);
// Ensure that collection still exists after opening the catalog again.
catalog.onOpenCatalog(&opCtx);
- ASSERT_EQUALS(catalog.lookupCollectionByUUID(colUUID), newCol);
- ASSERT_EQUALS(*catalog.lookupNSSByUUID(colUUID), newNss);
+ ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, colUUID), newCol);
+ ASSERT_EQUALS(*catalog.lookupNSSByUUID(&opCtx, colUUID), newNss);
}
DEATH_TEST_F(CollectionCatalogResourceTest, AddInvalidResourceType, "invariant") {
@@ -587,9 +589,9 @@ TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNames) {
std::vector<NamespaceString> nsss = {aColl, b1Coll, b2Coll, cColl, d1Coll, d2Coll, d3Coll};
for (auto& nss : nsss) {
- auto newColl = std::make_unique<CollectionMock>(nss);
+ std::unique_ptr<Collection> newColl = std::make_unique<CollectionMock>(nss);
auto uuid = CollectionUUID::gen();
- catalog.registerCollection(uuid, std::move(newColl));
+ catalog.registerCollection(uuid, &newColl);
}
std::vector<NamespaceString> dCollList = {d1Coll, d2Coll, d3Coll};
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index 21606d7a9cc..13d91880c82 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -56,7 +56,7 @@ Collection* getCollectionForCompact(OperationContext* opCtx,
invariant(opCtx->lockState()->isCollectionLockedForMode(collectionNss, MODE_IX));
CollectionCatalog& collectionCatalog = CollectionCatalog::get(opCtx);
- Collection* collection = collectionCatalog.lookupCollectionByNamespace(collectionNss);
+ Collection* collection = collectionCatalog.lookupCollectionByNamespace(opCtx, collectionNss);
if (!collection) {
std::shared_ptr<ViewDefinition> view =
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 2bddb98ee6e..dc841b95b2c 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/repl/replication_coordinator.h"
+#include "mongo/db/views/view_catalog.h"
#include "mongo/logger/redaction.h"
#include "mongo/util/log.h"
@@ -82,7 +83,7 @@ Status _createView(OperationContext* opCtx,
// Create 'system.views' in a separate WUOW if it does not exist.
WriteUnitOfWork wuow(opCtx);
Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- NamespaceString(db->getSystemViewsName()));
+ opCtx, NamespaceString(db->getSystemViewsName()));
if (!coll) {
coll = db->createCollection(opCtx, NamespaceString(db->getSystemViewsName()));
}
@@ -106,7 +107,18 @@ Status _createCollection(OperationContext* opCtx,
const BSONObj& idIndex) {
return writeConflictRetry(opCtx, "create", nss.ns(), [&] {
AutoGetOrCreateDb autoDb(opCtx, nss.db(), MODE_IX);
- Lock::CollectionLock collLock(opCtx, nss, MODE_X);
+ Lock::CollectionLock collLock(opCtx, nss, MODE_IX);
+ // This is a top-level handler for collection creation name conflicts. New commands coming
+ // in, or commands that generated a WriteConflict must return a NamespaceExists error here
+ // on conflict.
+ if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss) != nullptr) {
+ return Status(ErrorCodes::NamespaceExists,
+ str::stream() << "Collection already exists. NS: " << nss);
+ }
+ if (ViewCatalog::get(autoDb.getDb())->lookup(opCtx, nss.ns())) {
+ return Status(ErrorCodes::NamespaceExists,
+ str::stream() << "A view already exists. NS: " << nss);
+ }
AutoStatsTracker statsTracker(opCtx,
nss,
@@ -229,7 +241,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
uuid.isRFC4122v4());
auto& catalog = CollectionCatalog::get(opCtx);
- const auto currentName = catalog.lookupNSSByUUID(uuid);
+ const auto currentName = catalog.lookupNSSByUUID(opCtx, uuid);
auto serviceContext = opCtx->getServiceContext();
auto opObserver = serviceContext->getOpObserver();
if (currentName && *currentName == newCollName)
@@ -256,7 +268,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
// node.
const bool stayTemp = true;
auto futureColl = db
- ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(newCollName)
+ ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, newCollName)
: nullptr;
bool needsRenaming = static_cast<bool>(futureColl);
for (int tries = 0; needsRenaming && tries < 10; ++tries) {
@@ -304,7 +316,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
// If the collection with the requested UUID already exists, but with a different
// name, just rename it to 'newCollName'.
- if (catalog.lookupCollectionByUUID(uuid)) {
+ if (catalog.lookupCollectionByUUID(opCtx, uuid)) {
invariant(currentName);
uassert(40655,
str::stream() << "Invalid name " << newCollName << " for UUID " << uuid,
diff --git a/src/mongo/db/catalog/create_collection_test.cpp b/src/mongo/db/catalog/create_collection_test.cpp
index a75abe4e1f0..fafc8229a5a 100644
--- a/src/mongo/db/catalog/create_collection_test.cpp
+++ b/src/mongo/db/catalog/create_collection_test.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/create_collection.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/jsobj.h"
@@ -134,7 +135,7 @@ TEST_F(CreateCollectionTest,
auto opCtx = makeOpCtx();
auto uuid = UUID::gen();
- Lock::DBLock lock(opCtx.get(), newNss.db(), MODE_IX);
+ Lock::GlobalLock lk(opCtx.get(), MODE_X); // Satisfy low-level locking invariants.
// Create existing collection using StorageInterface.
{
@@ -160,7 +161,7 @@ TEST_F(CreateCollectionTest,
auto opCtx = makeOpCtx();
auto uuid = UUID::gen();
- Lock::DBLock lock(opCtx.get(), newNss.db(), MODE_IX);
+ Lock::GlobalLock lk(opCtx.get(), MODE_X); // Satisfy low-level locking invariants.
// Create existing collection with same name but different UUID using StorageInterface.
auto existingCollectionUuid = UUID::gen();
@@ -181,7 +182,7 @@ TEST_F(CreateCollectionTest,
// Check that old collection that was renamed out of the way still exists.
auto& catalog = CollectionCatalog::get(opCtx.get());
- auto renamedCollectionNss = catalog.lookupNSSByUUID(existingCollectionUuid);
+ auto renamedCollectionNss = catalog.lookupNSSByUUID(opCtx.get(), existingCollectionUuid);
ASSERT(renamedCollectionNss);
ASSERT_TRUE(collectionExists(opCtx.get(), *renamedCollectionNss))
<< "old renamed collection with UUID " << existingCollectionUuid
@@ -217,5 +218,4 @@ TEST_F(CreateCollectionTest,
ASSERT_TRUE(collectionExists(opCtx.get(), dropPendingNss));
ASSERT_FALSE(collectionExists(opCtx.get(), newNss));
}
-
} // namespace
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 430b540b034..efb175b079b 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -49,6 +49,7 @@
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/drop_indexes.h"
#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands/feature_compatibility_version_parser.h"
#include "mongo/db/concurrency/d_concurrency.h"
@@ -83,6 +84,7 @@ namespace mongo {
namespace {
MONGO_FAIL_POINT_DEFINE(hangBeforeLoggingCreateCollection);
MONGO_FAIL_POINT_DEFINE(hangAndFailAfterCreateCollectionReservesOpTime);
+MONGO_FAIL_POINT_DEFINE(openCreateCollectionWindowFp);
Status validateDBNameForWindows(StringData dbname) {
const std::vector<std::string> windowsReservedNames = {
@@ -142,7 +144,7 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
auto& catalog = CollectionCatalog::get(opCtx);
for (const auto& uuid : catalog.getAllCollectionUUIDsFromDb(_name)) {
- auto collection = catalog.lookupCollectionByUUID(uuid);
+ auto collection = catalog.lookupCollectionByUUID(opCtx, uuid);
invariant(collection);
// If this is called from the repair path, the collection is already initialized.
if (!collection->isInitialized())
@@ -307,7 +309,10 @@ Status DatabaseImpl::dropView(OperationContext* opCtx, NamespaceString viewName)
Status DatabaseImpl::dropCollection(OperationContext* opCtx,
NamespaceString nss,
repl::OpTime dropOpTime) const {
- if (!CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss)) {
+ // Cannot drop uncommitted collections.
+ invariant(!UncommittedCollections::getForTxn(opCtx, nss));
+
+ if (!CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)) {
// Collection doesn't exist so don't bother validating if it can be dropped.
return Status::OK();
}
@@ -344,7 +349,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
"dropCollection() cannot accept a valid drop optime when writes are replicated.");
}
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (!collection) {
return Status::OK(); // Post condition already met.
@@ -484,13 +489,14 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
invariant(fromNss.db() == _name);
invariant(toNss.db() == _name);
- if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(toNss)) {
+ if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, toNss)) {
return Status(ErrorCodes::NamespaceExists,
str::stream() << "Cannot rename '" << fromNss << "' to '" << toNss
<< "' because the destination namespace already exists");
}
- Collection* collToRename = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(fromNss);
+ Collection* collToRename =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, fromNss);
if (!collToRename) {
return Status(ErrorCodes::NamespaceNotFound, "collection not found to rename");
}
@@ -499,7 +505,7 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
"collection "
<< fromNss);
- Collection* toColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(toNss);
+ Collection* toColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, toNss);
if (toColl) {
invariant(
!toColl->getIndexCatalog()->haveAnyIndexesInProgress(),
@@ -517,9 +523,8 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
opCtx, collToRename->getCatalogId(), toNss, stayTemp);
// Set the namespace of 'collToRename' from within the CollectionCatalog. This is necessary
- // because
- // the CollectionCatalog mutex synchronizes concurrent access to the collection's namespace for
- // callers that may not hold a collection lock.
+ // because the CollectionCatalog mutex synchronizes concurrent access to the collection's
+ // namespace for callers that may not hold a collection lock.
CollectionCatalog::get(opCtx).setCollectionNamespace(opCtx, collToRename, fromNss, toNss);
opCtx->recoveryUnit()->onCommit([collToRename](auto commitTime) {
@@ -535,9 +540,15 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
void DatabaseImpl::_checkCanCreateCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) const {
- massert(17399,
- str::stream() << "Cannot create collection " << nss << " - collection already exists.",
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss) == nullptr);
+ if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss) != nullptr) {
+ if (options.isView()) {
+ uasserted(17399,
+ str::stream()
+ << "Cannot create collection " << nss << " - collection already exists.");
+ } else {
+ throw WriteConflictException();
+ }
+ }
uassert(14037,
"can't create user databases on a --configsvr instance",
@@ -589,7 +600,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
const BSONObj& idIndex) const {
invariant(!options.isView());
- invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_IX));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX));
uassert(CannotImplicitlyCreateCollectionInfo(nss),
"request doesn't allow collection to be created implicitly",
@@ -624,8 +635,12 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
// transaction, we reserve an opTime before the collection creation, then pass it to the
// opObserver. Reserving the optime automatically sets the storage timestamp.
OplogSlot createOplogSlot;
+ Timestamp createTime;
if (canAcceptWrites && supportsDocLocking() && !coordinator->isOplogDisabledFor(opCtx, nss)) {
createOplogSlot = repl::getNextOpTime(opCtx);
+ createTime = createOplogSlot.getTimestamp();
+ } else {
+ createTime = opCtx->recoveryUnit()->getCommitTimestamp();
}
if (MONGO_unlikely(hangAndFailAfterCreateCollectionReservesOpTime.shouldFail())) {
@@ -653,17 +668,12 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
std::move(catalogIdRecordStorePair.second));
auto collection = ownedCollection.get();
ownedCollection->init(opCtx);
-
- opCtx->recoveryUnit()->onCommit([collection](auto commitTime) {
- // Ban reading from this collection on committed reads on snapshots before now.
- if (commitTime)
- collection->setMinimumVisibleSnapshot(commitTime.get());
- });
-
- auto& catalog = CollectionCatalog::get(opCtx);
- auto uuid = ownedCollection->uuid();
- catalog.registerCollection(uuid, std::move(ownedCollection));
- opCtx->recoveryUnit()->onRollback([uuid, &catalog] { catalog.deregisterCollection(uuid); });
+ UncommittedCollections::addToTxn(opCtx, std::move(ownedCollection), createTime);
+ openCreateCollectionWindowFp.executeIf([&](const BSONObj& data) { sleepsecs(3); },
+ [&](const BSONObj& data) {
+ const auto collElem = data["collectionNS"];
+ return !collElem || nss.toString() == collElem.str();
+ });
BSONObj fullIdIndexSpec;
@@ -740,7 +750,7 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
replacePercentSign);
NamespaceString nss(_name, collectionName);
- if (!CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss)) {
+ if (!CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)) {
return nss;
}
}
@@ -772,7 +782,7 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
if (nss.isSystem())
continue;
- Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (!coll)
continue;
@@ -793,20 +803,9 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx,
bool createDefaultIndexes,
const BSONObj& idIndex) const {
LOG(1) << "create collection " << nss << ' ' << collectionOptions.toBSON();
-
if (!NamespaceString::validCollectionComponent(nss.ns()))
return Status(ErrorCodes::InvalidNamespace, str::stream() << "invalid ns: " << nss);
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
-
- if (collection)
- return Status(ErrorCodes::NamespaceExists,
- str::stream() << "a collection '" << nss << "' already exists");
-
- if (ViewCatalog::get(this)->lookup(opCtx, nss.ns()))
- return Status(ErrorCodes::NamespaceExists,
- str::stream() << "a view '" << nss << "' already exists");
-
// Validate the collation, if there is one.
std::unique_ptr<CollatorInterface> collator;
if (!collectionOptions.collation.isEmpty()) {
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index efcb5fec842..05f96baefcb 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/index_build_block.h"
#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
@@ -175,21 +176,29 @@ void _testDropCollection(OperationContext* opCtx,
bool createCollectionBeforeDrop,
const repl::OpTime& dropOpTime = {},
const CollectionOptions& collOpts = {}) {
+ if (createCollectionBeforeDrop) {
+ writeConflictRetry(opCtx, "testDropCollection", nss.ns(), [=] {
+ WriteUnitOfWork wuow(opCtx);
+ AutoGetOrCreateDb autoDb(opCtx, nss.db(), MODE_X);
+ auto db = autoDb.getDb();
+ ASSERT_TRUE(db);
+ ASSERT_TRUE(db->createCollection(opCtx, nss, collOpts));
+ wuow.commit();
+ });
+ }
+
writeConflictRetry(opCtx, "testDropCollection", nss.ns(), [=] {
AutoGetOrCreateDb autoDb(opCtx, nss.db(), MODE_X);
auto db = autoDb.getDb();
ASSERT_TRUE(db);
WriteUnitOfWork wuow(opCtx);
- if (createCollectionBeforeDrop) {
- ASSERT_TRUE(db->createCollection(opCtx, nss, collOpts));
- } else {
- ASSERT_FALSE(CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss));
+ if (!createCollectionBeforeDrop) {
+ ASSERT_FALSE(CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss));
}
ASSERT_OK(db->dropCollection(opCtx, nss, dropOpTime));
-
- ASSERT_FALSE(CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss));
+ ASSERT_FALSE(CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss));
wuow.commit();
});
}
@@ -244,17 +253,19 @@ TEST_F(DatabaseTest, DropCollectionRejectsProvidedDropOpTimeIfWritesAreReplicate
auto opCtx = _opCtx.get();
auto nss = _nss;
- writeConflictRetry(opCtx, "testDropOpTimeWithReplicated", nss.ns(), [opCtx, nss] {
- AutoGetOrCreateDb autoDb(opCtx, nss.db(), MODE_X);
- auto db = autoDb.getDb();
+ AutoGetOrCreateDb autoDb(opCtx, nss.db(), MODE_X);
+ auto db = autoDb.getDb();
+ writeConflictRetry(opCtx, "testDropOpTimeWithReplicated", nss.ns(), [&] {
ASSERT_TRUE(db);
WriteUnitOfWork wuow(opCtx);
ASSERT_TRUE(db->createCollection(opCtx, nss));
-
- repl::OpTime dropOpTime(Timestamp(Seconds(100), 0), 1LL);
- ASSERT_EQUALS(ErrorCodes::BadValue, db->dropCollection(opCtx, nss, dropOpTime));
+ wuow.commit();
});
+
+ WriteUnitOfWork wuow(opCtx);
+ repl::OpTime dropOpTime(Timestamp(Seconds(100), 0), 1LL);
+ ASSERT_EQUALS(ErrorCodes::BadValue, db->dropCollection(opCtx, nss, dropOpTime));
}
TEST_F(
@@ -345,27 +356,30 @@ TEST_F(DatabaseTest, RenameCollectionPreservesUuidOfSourceCollectionAndUpdatesUu
auto toNss = NamespaceString(fromNss.getSisterNS("bar"));
ASSERT_NOT_EQUALS(fromNss, toNss);
- writeConflictRetry(opCtx, "testRenameCollection", fromNss.ns(), [=] {
- AutoGetOrCreateDb autoDb(opCtx, fromNss.db(), MODE_X);
- auto db = autoDb.getDb();
- ASSERT_TRUE(db);
+ AutoGetOrCreateDb autoDb(opCtx, fromNss.db(), MODE_X);
+ auto db = autoDb.getDb();
+ ASSERT_TRUE(db);
- auto fromUuid = UUID::gen();
-
- auto&& catalog = CollectionCatalog::get(opCtx);
- ASSERT_EQUALS(boost::none, catalog.lookupNSSByUUID(fromUuid));
+ auto fromUuid = UUID::gen();
+ auto& catalog = CollectionCatalog::get(opCtx);
+ writeConflictRetry(opCtx, "create", fromNss.ns(), [&] {
+ ASSERT_EQUALS(boost::none, catalog.lookupNSSByUUID(opCtx, fromUuid));
WriteUnitOfWork wuow(opCtx);
CollectionOptions fromCollectionOptions;
fromCollectionOptions.uuid = fromUuid;
ASSERT_TRUE(db->createCollection(opCtx, fromNss, fromCollectionOptions));
- ASSERT_EQUALS(fromNss, *catalog.lookupNSSByUUID(fromUuid));
+ ASSERT_EQUALS(fromNss, *catalog.lookupNSSByUUID(opCtx, fromUuid));
+ wuow.commit();
+ });
+ writeConflictRetry(opCtx, "rename", fromNss.ns(), [&] {
+ WriteUnitOfWork wuow(opCtx);
auto stayTemp = false;
ASSERT_OK(db->renameCollection(opCtx, fromNss, toNss, stayTemp));
- ASSERT_FALSE(CollectionCatalog::get(opCtx).lookupCollectionByNamespace(fromNss));
- auto toCollection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(toNss);
+ ASSERT_FALSE(CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, fromNss));
+ auto toCollection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, toNss);
ASSERT_TRUE(toCollection);
auto toCollectionOptions =
@@ -375,7 +389,7 @@ TEST_F(DatabaseTest, RenameCollectionPreservesUuidOfSourceCollectionAndUpdatesUu
ASSERT_TRUE(toUuid);
ASSERT_EQUALS(fromUuid, *toUuid);
- ASSERT_EQUALS(toNss, *catalog.lookupNSSByUUID(*toUuid));
+ ASSERT_EQUALS(toNss, *catalog.lookupNSSByUUID(opCtx, *toUuid));
wuow.commit();
});
@@ -390,9 +404,6 @@ TEST_F(DatabaseTest,
ASSERT_EQUALS(
ErrorCodes::FailedToParse,
db->makeUniqueCollectionNamespace(_opCtx.get(), "CollectionModelWithoutPercentSign"));
-
- std::string longCollModel(8192, '%');
- ASSERT_OK(db->makeUniqueCollectionNamespace(_opCtx.get(), StringData(longCollModel)));
});
}
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index da14e62084b..5c374c2d98c 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/background.h"
#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
@@ -108,7 +109,8 @@ Status _dropCollection(OperationContext* opCtx,
DropCollectionSystemCollectionMode systemCollectionMode,
BSONObjBuilder& result) {
Lock::CollectionLock collLock(opCtx, collectionName, MODE_X);
- Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(collectionName);
+ Collection* coll =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collectionName);
if (!coll) {
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
}
@@ -173,7 +175,7 @@ Status dropCollection(OperationContext* opCtx,
}
Collection* coll =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(collectionName);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collectionName);
if (!coll) {
return _dropView(opCtx, db, collectionName, result);
} else {
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index ba78b5d8d24..f598148c68a 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -193,7 +193,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
BackgroundOperation::assertNoBgOpInProgForNs(nss.ns());
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss)->uuid());
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)->uuid());
writeConflictRetry(opCtx, "dropDatabase_collection", nss.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index a8cb11c8823..85e184b7036 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -36,6 +36,7 @@
#include <vector>
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_descriptor.h"
@@ -156,7 +157,8 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) {
// Being in a WUOW means all timestamping responsibility can be pushed up to the caller.
invariant(opCtx->lockState()->inAWriteUnitOfWork());
- invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_X));
+ invariant(
+ UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, collection->ns()));
if (_indexBuildInterceptor) {
// An index build should never be completed with writes remaining in the interceptor.
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 0c85b725664..3c222bd16cf 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -135,7 +135,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingInd
OperationContext* opCtx, NamespaceString ns, const UUID& buildUUID) {
auto builder = _getBuilder(buildUUID);
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(ns);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, ns);
auto rs = coll ? coll->getRecordStore() : nullptr;
// Iterate all records in the collection. Delete them if they aren't valid BSON. Index them
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index c06724c85c4..d9a275f1921 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/catalog/index_build_block.h"
#include "mongo/db/catalog/index_catalog_entry_impl.h"
#include "mongo/db/catalog/index_key_validate.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
#include "mongo/db/client.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
@@ -394,7 +395,8 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx,
StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationContext* opCtx,
BSONObj spec) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
+ invariant(UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx,
+ _collection->ns()));
invariant(_collection->numRecords(opCtx) == 0,
str::stream() << "Collection must be empty. Collection: " << _collection->ns()
<< " UUID: " << _collection->uuid()
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 91f4d9e383d..d2722cb0368 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/index_timestamp_helper.h"
#include "mongo/db/catalog/multi_index_block_gen.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/index/multikey_paths.h"
@@ -116,8 +117,8 @@ void MultiIndexBlock::cleanUpAfterBuild(OperationContext* opCtx,
}
auto nss = collection->ns();
-
- invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X), nss.toString());
+ invariant(UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, nss),
+ nss.toString());
while (true) {
try {
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index dc1d2208914..d63b2940626 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -71,7 +71,7 @@ namespace {
MONGO_FAIL_POINT_DEFINE(writeConflictInRenameCollCopyToTmp);
boost::optional<NamespaceString> getNamespaceFromUUID(OperationContext* opCtx, const UUID& uuid) {
- return CollectionCatalog::get(opCtx).lookupNSSByUUID(uuid);
+ return CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, uuid);
}
bool isCollectionSharded(OperationContext* opCtx, const NamespaceString& nss) {
@@ -117,7 +117,7 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx,
return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
Collection* const sourceColl =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(source);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, source);
if (!sourceColl) {
if (ViewCatalog::get(db)->lookup(opCtx, source.ns()))
return Status(ErrorCodes::CommandNotSupportedOnView,
@@ -128,7 +128,8 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx,
BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(sourceColl->uuid());
- Collection* targetColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(target);
+ Collection* targetColl =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, target);
if (!targetColl) {
if (ViewCatalog::get(db)->lookup(opCtx, target.ns()))
@@ -314,9 +315,9 @@ Status renameCollectionWithinDB(OperationContext* opCtx,
auto db = DatabaseHolder::get(opCtx)->getDb(opCtx, source.db());
Collection* const sourceColl =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(source);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, source);
Collection* const targetColl =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(target);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, target);
AutoStatsTracker statsTracker(opCtx,
source,
@@ -356,7 +357,7 @@ Status renameCollectionWithinDBForApplyOps(OperationContext* opCtx,
auto db = DatabaseHolder::get(opCtx)->getDb(opCtx, source.db());
Collection* const sourceColl =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(source);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, source);
AutoStatsTracker statsTracker(opCtx,
source,
@@ -365,7 +366,8 @@ Status renameCollectionWithinDBForApplyOps(OperationContext* opCtx,
db->getProfilingLevel());
return writeConflictRetry(opCtx, "renameCollection", target.ns(), [&] {
- Collection* targetColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(target);
+ Collection* targetColl =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, target);
WriteUnitOfWork wuow(opCtx);
if (targetColl) {
if (sourceColl->uuid() == targetColl->uuid()) {
@@ -410,7 +412,7 @@ Status renameCollectionWithinDBForApplyOps(OperationContext* opCtx,
if (collToDropBasedOnUUID && !collToDropBasedOnUUID->isDropPendingNamespace()) {
invariant(collToDropBasedOnUUID->db() == target.db());
targetColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- *collToDropBasedOnUUID);
+ opCtx, *collToDropBasedOnUUID);
}
}
@@ -482,7 +484,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
sourceDB->getProfilingLevel());
Collection* const sourceColl =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(source);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, source);
if (!sourceColl) {
if (sourceDB && ViewCatalog::get(sourceDB)->lookup(opCtx, source.ns()))
return Status(ErrorCodes::CommandNotSupportedOnView,
@@ -508,8 +510,9 @@ Status renameBetweenDBs(OperationContext* opCtx,
// Check if the target namespace exists and if dropTarget is true.
// Return a non-OK status if target exists and dropTarget is not true or if the collection
// is sharded.
- Collection* targetColl =
- targetDB ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(target) : nullptr;
+ Collection* targetColl = targetDB
+ ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, target)
+ : nullptr;
if (targetColl) {
if (sourceColl->uuid() == targetColl->uuid()) {
invariant(source == target);
@@ -716,7 +719,7 @@ void doLocalRenameIfOptionsAndIndexesHaveNotChanged(OperationContext* opCtx,
BSONObj originalCollectionOptions) {
AutoGetDb dbLock(opCtx, targetNs.db(), MODE_X);
auto collection = dbLock.getDb()
- ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(targetNs)
+ ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, targetNs)
: nullptr;
BSONObj collectionOptions = {};
if (collection) {
@@ -851,7 +854,7 @@ Status renameCollectionForApplyOps(OperationContext* opCtx,
NamespaceString sourceNss(sourceNsElt.valueStringData());
NamespaceString targetNss(targetNsElt.valueStringData());
if (uuidToRename) {
- auto nss = CollectionCatalog::get(opCtx).lookupNSSByUUID(uuidToRename.get());
+ auto nss = CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, uuidToRename.get());
if (nss)
sourceNss = *nss;
}
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index ebef7bd241e..465e84e39d7 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -446,7 +446,7 @@ CollectionUUID _getCollectionUuid(OperationContext* opCtx, const NamespaceString
* Get collection namespace by UUID.
*/
NamespaceString _getCollectionNssFromUUID(OperationContext* opCtx, const UUID& uuid) {
- Collection* source = CollectionCatalog::get(opCtx).lookupCollectionByUUID(uuid);
+ Collection* source = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
return source ? source->ns() : NamespaceString();
}
@@ -514,7 +514,7 @@ Collection* _getCollection_inlock(OperationContext* opCtx, const NamespaceString
if (!db) {
return nullptr;
}
- return CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ return CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
}
TEST_F(RenameCollectionTest, RenameCollectionReturnsNamespaceNotFoundIfDatabaseDoesNotExist) {
@@ -548,7 +548,7 @@ TEST_F(RenameCollectionTest, RenameCollectionReturnsNotMasterIfNotPrimary) {
TEST_F(RenameCollectionTest, TargetCollectionNameLong) {
_createCollection(_opCtx.get(), _sourceNss);
- const std::string targetCollectionName(8192, 'a');
+ const std::string targetCollectionName(500, 'a');
NamespaceString longTargetNss(_sourceNss.db(), targetCollectionName);
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, longTargetNss, {}));
}
@@ -557,7 +557,7 @@ TEST_F(RenameCollectionTest, LongIndexNameAllowedForTargetCollection) {
ASSERT_GREATER_THAN(_targetNssDifferentDb.size(), _sourceNss.size());
_createCollection(_opCtx.get(), _sourceNss);
- std::size_t longIndexLength = 8192;
+ std::size_t longIndexLength = 500;
const std::string indexName(longIndexLength, 'a');
_createIndexOnEmptyCollection(_opCtx.get(), _sourceNss, indexName);
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNssDifferentDb, {}));
@@ -571,7 +571,7 @@ TEST_F(RenameCollectionTest, LongIndexNameAllowedForTemporaryCollectionForRename
const NamespaceString tempNss(_targetNssDifferentDb.getSisterNS("tmpXXXXX.renameCollection"));
_createCollection(_opCtx.get(), _sourceNss);
- std::size_t longIndexLength = 8192;
+ std::size_t longIndexLength = 500;
const std::string indexName(longIndexLength, 'a');
_createIndexOnEmptyCollection(_opCtx.get(), _sourceNss, indexName);
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNssDifferentDb, {}));
@@ -691,7 +691,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDTargetEx
// B (originally A) should exist
ASSERT_TRUE(_collectionExists(_opCtx.get(), collB));
// The original B should exist too, but with a temporary name
- const auto& tmpB = CollectionCatalog::get(_opCtx.get()).lookupNSSByUUID(collBUUID);
+ const auto& tmpB =
+ CollectionCatalog::get(_opCtx.get()).lookupNSSByUUID(_opCtx.get(), collBUUID);
ASSERT(tmpB);
ASSERT_TRUE(tmpB->coll().startsWith("tmp"));
ASSERT_TRUE(*tmpB != collB);
@@ -723,7 +724,8 @@ TEST_F(RenameCollectionTest,
// B (originally A) should exist
ASSERT_TRUE(_collectionExists(_opCtx.get(), collB));
// The original B should exist too, but with a temporary name
- const auto& tmpB = CollectionCatalog::get(_opCtx.get()).lookupNSSByUUID(collBUUID);
+ const auto& tmpB =
+ CollectionCatalog::get(_opCtx.get()).lookupNSSByUUID(_opCtx.get(), collBUUID);
ASSERT(tmpB);
ASSERT_TRUE(*tmpB != collB);
ASSERT_TRUE(tmpB->coll().startsWith("tmp"));
@@ -748,7 +750,8 @@ TEST_F(RenameCollectionTest,
// B (originally A) should exist
ASSERT_TRUE(_collectionExists(_opCtx.get(), collB));
// The original B should exist too, but with a temporary name
- const auto& tmpB = CollectionCatalog::get(_opCtx.get()).lookupNSSByUUID(collBUUID);
+ const auto& tmpB =
+ CollectionCatalog::get(_opCtx.get()).lookupNSSByUUID(_opCtx.get(), collBUUID);
ASSERT(tmpB);
ASSERT_TRUE(*tmpB != collB);
ASSERT_TRUE(tmpB->coll().startsWith("tmp"));
@@ -1243,9 +1246,9 @@ TEST_F(RenameCollectionTest, CollectionCatalogMappingRemainsIntactThroughRename)
auto& catalog = CollectionCatalog::get(_opCtx.get());
Collection* sourceColl = _getCollection_inlock(_opCtx.get(), _sourceNss);
ASSERT(sourceColl);
- ASSERT_EQ(sourceColl, catalog.lookupCollectionByUUID(sourceColl->uuid()));
+ ASSERT_EQ(sourceColl, catalog.lookupCollectionByUUID(_opCtx.get(), sourceColl->uuid()));
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, {}));
- ASSERT_EQ(sourceColl, catalog.lookupCollectionByUUID(sourceColl->uuid()));
+ ASSERT_EQ(sourceColl, catalog.lookupCollectionByUUID(_opCtx.get(), sourceColl->uuid()));
}
TEST_F(RenameCollectionTest, FailRenameCollectionFromReplicatedToUnreplicatedDB) {
diff --git a/src/mongo/db/catalog/uncommitted_collections.cpp b/src/mongo/db/catalog/uncommitted_collections.cpp
new file mode 100644
index 00000000000..db532536744
--- /dev/null
+++ b/src/mongo/db/catalog/uncommitted_collections.cpp
@@ -0,0 +1,140 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
+#include "mongo/db/storage/durable_catalog.h"
+#include "mongo/util/assert_util.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+namespace {
+const auto getUncommittedCollections =
+ OperationContext::declareDecoration<UncommittedCollections>();
+} // namespace
+
+UncommittedCollections& UncommittedCollections::get(OperationContext* opCtx) {
+ return getUncommittedCollections(opCtx);
+}
+
+void UncommittedCollections::addToTxn(OperationContext* opCtx,
+ std::unique_ptr<Collection> coll,
+ Timestamp createTime) {
+ UncommittedCollections* collList = &getUncommittedCollections(opCtx);
+ auto existingColl = collList->_collections.find(coll->uuid());
+ uassert(31370,
+ str::stream() << "collection already exists. ns: " << coll->ns(),
+ existingColl == collList->_collections.end());
+
+ auto nss = coll->ns();
+ auto uuid = coll->uuid();
+ auto collPtr = coll.get();
+ collList->_collections[uuid] = std::move(coll);
+ collList->_nssIndex.insert({nss, uuid});
+
+ opCtx->recoveryUnit()->registerPreCommitHook(
+ [collList, uuid, createTime](OperationContext* opCtx) {
+ collList->commit(opCtx, uuid, createTime);
+ });
+ opCtx->recoveryUnit()->onCommit(
+ [collList, collPtr, createTime](boost::optional<Timestamp> commitTs) {
+ // Verify that the collection was given a minVisibleTimestamp equal to the transactions
+ // commit timestamp.
+ invariant(collPtr->getMinimumVisibleSnapshot() == createTime);
+ collList->clear();
+ });
+ opCtx->recoveryUnit()->onRollback([collList]() { collList->clear(); });
+}
+
+Collection* UncommittedCollections::getForTxn(OperationContext* opCtx,
+ const NamespaceStringOrUUID& id) {
+ if (id.nss()) {
+ return getForTxn(opCtx, id.nss().get());
+ } else {
+ return getForTxn(opCtx, id.uuid().get());
+ }
+}
+
+Collection* UncommittedCollections::getForTxn(OperationContext* opCtx, const NamespaceString& nss) {
+ auto& collList = getUncommittedCollections(opCtx);
+ auto it = collList._nssIndex.find(nss);
+ if (it == collList._nssIndex.end()) {
+ return nullptr;
+ }
+
+ return collList._collections[it->second].get();
+}
+
+Collection* UncommittedCollections::getForTxn(OperationContext* opCtx, const UUID& uuid) {
+ auto& collList = getUncommittedCollections(opCtx);
+ auto it = collList._collections.find(uuid);
+ if (it == collList._collections.end()) {
+ return nullptr;
+ }
+
+ return it->second.get();
+}
+
+void UncommittedCollections::commit(OperationContext* opCtx, UUID uuid, Timestamp createTs) {
+ if (_collections.count(uuid) == 0) {
+ return;
+ }
+
+ auto it = _collections.find(uuid);
+ // Invariant that a collection is found.
+ invariant(it->second.get(), uuid.toString());
+ it->second->setMinimumVisibleSnapshot(createTs);
+
+ auto nss = it->second->ns();
+ CollectionCatalog::get(opCtx).registerCollection(uuid, &(it->second));
+ _collections.erase(it);
+ _nssIndex.erase(nss);
+}
+
+bool UncommittedCollections::hasExclusiveAccessToCollection(OperationContext* opCtx,
+ const NamespaceString& nss) const {
+ if (opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X)) {
+ return true;
+ }
+
+ if (_nssIndex.count(nss) == 1) {
+ // If the collection is found in the local catalog, the appropriate locks must have already
+ // been taken.
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX), nss.toString());
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/uncommitted_collections.h b/src/mongo/db/catalog/uncommitted_collections.h
new file mode 100644
index 00000000000..6b2fe60954f
--- /dev/null
+++ b/src/mongo/db/catalog/uncommitted_collections.h
@@ -0,0 +1,74 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "mongo/bson/timestamp.h"
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context.h"
+
+namespace mongo {
+class UncommittedCollections {
+public:
+ UncommittedCollections() = default;
+ ~UncommittedCollections() = default;
+
+ static UncommittedCollections& get(OperationContext* opCtx);
+
+ static void addToTxn(OperationContext* opCtx,
+ std::unique_ptr<Collection> coll,
+ Timestamp createTime);
+
+ static Collection* getForTxn(OperationContext* opCtx, const NamespaceStringOrUUID& nss);
+ static Collection* getForTxn(OperationContext* opCtx, const NamespaceString& nss);
+ static Collection* getForTxn(OperationContext* opCtx, const UUID& uuid);
+
+ /**
+ * Registers any uncommitted collections with the CollectionCatalog. If registering a collection
+ * name conflicts with an existing entry, this method will throw a `WriteConflictException`.
+ */
+ void commit(OperationContext* opCtx, UUID uuid, Timestamp createTs);
+
+ bool hasExclusiveAccessToCollection(OperationContext* opCtx, const NamespaceString& nss) const;
+
+ void clear() {
+ _collections.clear();
+ _nssIndex.clear();
+ }
+
+private:
+ std::map<UUID, std::unique_ptr<Collection>> _collections;
+ std::map<NamespaceString, UUID> _nssIndex;
+};
+} // namespace mongo
diff --git a/src/mongo/db/catalog/validate_state.cpp b/src/mongo/db/catalog/validate_state.cpp
index 19a20a178b5..f1924d254f9 100644
--- a/src/mongo/db/catalog/validate_state.cpp
+++ b/src/mongo/db/catalog/validate_state.cpp
@@ -67,8 +67,8 @@ ValidateState::ValidateState(OperationContext* opCtx,
}
_database = _databaseLock->getDb() ? _databaseLock->getDb() : nullptr;
- _collection =
- _database ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(_nss) : nullptr;
+ _collection = _database ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _nss)
+ : nullptr;
if (!_collection) {
if (_database && ViewCatalog::get(_database)->lookup(opCtx, _nss.ns())) {
@@ -289,7 +289,7 @@ void ValidateState::_relockDatabaseAndCollection(OperationContext* opCtx) {
uasserted(ErrorCodes::Interrupted, collErrMsg);
}
- _collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(*_uuid);
+ _collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, *_uuid);
uassert(ErrorCodes::Interrupted, collErrMsg, _collection);
// The namespace of the collection can be changed during a same database collection rename.
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index cd69d4d4274..156727191dd 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -70,7 +70,7 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
}
_collLock.emplace(opCtx, nsOrUUID, modeColl, deadline);
- _resolvedNss = CollectionCatalog::get(opCtx).resolveNamespaceStringOrUUID(nsOrUUID);
+ _resolvedNss = CollectionCatalog::get(opCtx).resolveNamespaceStringOrUUID(opCtx, nsOrUUID);
// Wait for a configured amount of time after acquiring locks if the failpoint is enabled
setAutoGetCollectionWait.execute(
@@ -95,7 +95,7 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
if (!db)
return;
- _coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(_resolvedNss);
+ _coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _resolvedNss);
invariant(!nsOrUUID.uuid() || _coll,
str::stream() << "Collection for " << _resolvedNss.ns()
<< " disappeared after successufully resolving "
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index c6475cbe0e9..410def3bffa 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -117,7 +117,8 @@ struct Cloner::Fun {
bool createdCollection = false;
Collection* collection = nullptr;
- collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(to_collection);
+ collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, to_collection);
if (!collection) {
massert(17321,
str::stream() << "collection dropped during clone [" << to_collection.ns()
@@ -139,7 +140,7 @@ struct Cloner::Fun {
<< to_collection.ns() << "]");
wunit.commit();
collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(to_collection);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, to_collection);
invariant(collection,
str::stream()
<< "Missing collection during clone [" << to_collection.ns() << "]");
@@ -181,7 +182,7 @@ struct Cloner::Fun {
db != nullptr);
collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(to_collection);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, to_collection);
uassert(28594,
str::stream()
<< "Collection " << to_collection.ns() << " dropped while cloning",
@@ -329,7 +330,7 @@ void Cloner::copyIndexes(OperationContext* opCtx,
auto db = databaseHolder->openDb(opCtx, toDBName);
Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(to_collection);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, to_collection);
if (!collection) {
writeConflictRetry(opCtx, "createCollection", to_collection.ns(), [&] {
opCtx->checkForInterrupt();
@@ -347,7 +348,8 @@ void Cloner::copyIndexes(OperationContext* opCtx,
<< "Collection creation failed while copying indexes from "
<< from_collection.ns() << " to " << to_collection.ns() << " (Cloner)");
wunit.commit();
- collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(to_collection);
+ collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, to_collection);
invariant(collection,
str::stream() << "Missing collection " << to_collection.ns() << " (Cloner)");
});
@@ -586,7 +588,8 @@ Status Cloner::createCollectionsForDb(
opCtx->checkForInterrupt();
WriteUnitOfWork wunit(opCtx);
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (collection) {
if (!params.shardedColl) {
// If the collection is unsharded then we want to fail when a collection
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index b31bb5822cc..74826de1215 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -112,7 +112,7 @@ public:
const auto hasTerm = false;
return authSession->checkAuthForFind(
CollectionCatalog::get(opCtx).resolveNamespaceStringOrUUID(
- CommandHelpers::parseNsOrUUID(dbname, cmdObj)),
+ opCtx, CommandHelpers::parseNsOrUUID(dbname, cmdObj)),
hasTerm);
}
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index ada7fa43a7f..e85ec4911fd 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -370,7 +370,7 @@ Collection* getOrCreateCollection(OperationContext* opCtx,
const BSONObj& cmdObj,
std::string* errmsg,
BSONObjBuilder* result) {
- if (auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(ns)) {
+ if (auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, ns)) {
result->appendBool(kCreateCollectionAutomaticallyFieldName, false);
return collection;
}
@@ -549,7 +549,7 @@ bool runCreateIndexesForMobile(OperationContext* opCtx,
Lock::CollectionLock colLock(opCtx, {dbName, collectionUUID}, MODE_IS);
// Reaquire the collection pointer because we momentarily released the collection lock.
- collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(collectionUUID);
+ collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, collectionUUID);
invariant(collection);
// Reaquire the 'ns' string in case the collection was renamed while we momentarily released
@@ -570,7 +570,7 @@ bool runCreateIndexesForMobile(OperationContext* opCtx,
Lock::CollectionLock colLock(opCtx, {dbName, collectionUUID}, MODE_IS);
// Reaquire the collection pointer because we momentarily released the collection lock.
- collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(collectionUUID);
+ collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, collectionUUID);
invariant(collection);
// Reaquire the 'ns' string in case the collection was renamed while we momentarily released
@@ -594,7 +594,7 @@ bool runCreateIndexesForMobile(OperationContext* opCtx,
Lock::CollectionLock colLock(opCtx, {dbName, collectionUUID}, MODE_S);
// Reaquire the collection pointer because we momentarily released the collection lock.
- collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(collectionUUID);
+ collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, collectionUUID);
invariant(collection);
// Reaquire the 'ns' string in case the collection was renamed while we momentarily released
@@ -619,7 +619,7 @@ bool runCreateIndexesForMobile(OperationContext* opCtx,
opCtx, NamespaceStringOrUUID(dbName, collectionUUID), MODE_X);
// Reaquire the collection pointer because we momentarily released the collection lock.
- collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(collectionUUID);
+ collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, collectionUUID);
invariant(collection);
// Reaquire the 'ns' string in case the collection was renamed while we momentarily released
@@ -629,7 +629,7 @@ bool runCreateIndexesForMobile(OperationContext* opCtx,
auto databaseHolder = DatabaseHolder::get(opCtx);
db = databaseHolder->getDb(opCtx, ns.db());
- invariant(CollectionCatalog::get(opCtx).lookupCollectionByNamespace(ns));
+ invariant(CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, ns));
// Perform the third and final drain while holding the exclusive collection lock.
uassertStatusOK(
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 8e87e6b6463..c933dd6ad34 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -334,7 +334,8 @@ private:
return false;
}
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(info.nss);
+ auto collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, info.nss);
if (!collection) {
return false;
}
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 2e65ced1211..da181689560 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -325,7 +325,8 @@ public:
private:
std::string _hashCollection(OperationContext* opCtx, Database* db, const NamespaceString& nss) {
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
invariant(collection);
boost::optional<Lock::CollectionLock> collLock;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 4da2af80abf..6dadccff56b 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -110,7 +110,7 @@ public:
const auto hasTerm = false;
return authSession->checkAuthForFind(
CollectionCatalog::get(opCtx).resolveNamespaceStringOrUUID(
- CommandHelpers::parseNsOrUUID(dbname, cmdObj)),
+ opCtx, CommandHelpers::parseNsOrUUID(dbname, cmdObj)),
hasTerm);
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 7a0c7f038e1..d0a36274cc1 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -462,10 +462,10 @@ public:
assertCanWrite(
opCtx,
nsString,
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nsString));
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nsString));
collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nsString);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nsString);
;
// If someone else beat us to creating the collection, do nothing
@@ -478,8 +478,8 @@ public:
db->userCreateNS(opCtx, nsString, defaultCollectionOptions));
wuow.commit();
- collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nsString);
+ collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ opCtx, nsString);
}
invariant(collection);
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index bd24d034e29..9f583615036 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -207,7 +207,7 @@ public:
const auto hasTerm = _request.body.hasField(kTermField);
uassertStatusOK(authSession->checkAuthForFind(
CollectionCatalog::get(opCtx).resolveNamespaceStringOrUUID(
- CommandHelpers::parseNsOrUUID(_dbName, _request.body)),
+ opCtx, CommandHelpers::parseNsOrUUID(_dbName, _request.body)),
hasTerm));
}
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index ca0dab0d375..7e2e90c8d86 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -312,7 +312,7 @@ public:
Lock::CollectionLock clk(opCtx, nss, MODE_IS);
Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
BSONObj collBson =
buildCollectionBson(opCtx, collection, includePendingDrops, nameOnly);
if (!collBson.isEmpty()) {
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 55d3c6b0a65..dafefa6f1ee 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -118,7 +118,7 @@ public:
// Check for the listIndexes ActionType on the database.
const auto nss = CollectionCatalog::get(opCtx).resolveNamespaceStringOrUUID(
- CommandHelpers::parseNsOrUUID(dbname, cmdObj));
+ opCtx, CommandHelpers::parseNsOrUUID(dbname, cmdObj));
if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(nss),
ActionType::listIndexes)) {
return Status::OK();
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index f7bc154dfb8..df22782ff0b 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -114,7 +114,7 @@ unsigned long long collectionCount(OperationContext* opCtx,
auto databaseHolder = DatabaseHolder::get(opCtx);
auto db = databaseHolder->getDb(opCtx, nss.ns());
if (db) {
- coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
}
} else {
ctx.emplace(opCtx, nss);
@@ -183,8 +183,9 @@ void dropTempCollections(OperationContext* cleanupOpCtx,
[cleanupOpCtx, &tempNamespace] {
AutoGetDb autoDb(cleanupOpCtx, tempNamespace.db(), MODE_X);
if (auto db = autoDb.getDb()) {
- if (auto collection = CollectionCatalog::get(cleanupOpCtx)
- .lookupCollectionByNamespace(tempNamespace)) {
+ if (auto collection =
+ CollectionCatalog::get(cleanupOpCtx)
+ .lookupCollectionByNamespace(cleanupOpCtx, tempNamespace)) {
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "no longer primary while dropping temporary "
"collection for mapReduce: "
@@ -210,7 +211,7 @@ void dropTempCollections(OperationContext* cleanupOpCtx,
auto databaseHolder = DatabaseHolder::get(cleanupOpCtx);
if (auto db = databaseHolder->getDb(cleanupOpCtx, incLong.ns())) {
if (auto collection = CollectionCatalog::get(cleanupOpCtx)
- .lookupCollectionByNamespace(incLong)) {
+ .lookupCollectionByNamespace(cleanupOpCtx, incLong)) {
BackgroundOperation::assertNoBgOpInProgForNs(incLong.ns());
IndexBuildsCoordinator::get(cleanupOpCtx)
->assertNoIndexBuildInProgForCollection(collection->uuid());
@@ -525,7 +526,8 @@ void State::prepTempCollection() {
writeConflictRetry(_opCtx, "M/R prepTempCollection", _config.incLong.ns(), [this] {
AutoGetOrCreateDb autoGetIncCollDb(_opCtx, _config.incLong.db(), MODE_X);
auto const db = autoGetIncCollDb.getDb();
- invariant(!CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(_config.incLong));
+ invariant(!CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(_opCtx,
+ _config.incLong));
CollectionOptions options;
options.setNoIdIndex();
@@ -587,8 +589,8 @@ void State::prepTempCollection() {
// Create temp collection and insert the indexes from temporary storage
AutoGetOrCreateDb autoGetFinalDb(_opCtx, _config.tempNamespace.db(), MODE_X);
auto const db = autoGetFinalDb.getDb();
- invariant(
- !CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(_config.tempNamespace));
+ invariant(!CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(
+ _opCtx, _config.tempNamespace));
uassert(
ErrorCodes::PrimarySteppedDown,
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index 623fbe109bc..c65f492893d 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -586,8 +586,9 @@ TEST_F(MapReduceCommandTest, ReplacingExistingOutputCollectionPreservesIndexes)
<< BSON("indexesCreated" << _opObserver->indexesCreated);
ASSERT_BSONOBJ_EQ(indexSpec, _opObserver->indexesCreated[0]);
- ASSERT_NOT_EQUALS(*options.uuid,
- *CollectionCatalog::get(_opCtx.get()).lookupUUIDByNSS(outputNss))
+ ASSERT_NOT_EQUALS(
+ *options.uuid,
+ *CollectionCatalog::get(_opCtx.get()).lookupUUIDByNSS(_opCtx.get(), outputNss))
<< "Output collection " << outputNss << " was not replaced";
_assertTemporaryCollectionsAreDropped();
diff --git a/src/mongo/db/commands/oplog_application_checks.cpp b/src/mongo/db/commands/oplog_application_checks.cpp
index 783d1002b35..3cb9dafb0ac 100644
--- a/src/mongo/db/commands/oplog_application_checks.cpp
+++ b/src/mongo/db/commands/oplog_application_checks.cpp
@@ -67,7 +67,7 @@ Status OplogApplicationChecks::checkOperationAuthorization(OperationContext* opC
// ns by UUID overrides the ns specified if they are different.
auto& catalog = CollectionCatalog::get(opCtx);
boost::optional<NamespaceString> uuidCollNS =
- catalog.lookupNSSByUUID(getUUIDFromOplogEntry(oplogEntry));
+ catalog.lookupNSSByUUID(opCtx, getUUIDFromOplogEntry(oplogEntry));
if (uuidCollNS && *uuidCollNS != ns)
ns = *uuidCollNS;
}
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index ef7394df62a..8569562dd1d 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -292,7 +292,8 @@ StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNames
// from a $merge to a collection in a different database. Since we cannot write to
// views, simply assume that the namespace is a collection.
resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
- } else if (!db || CollectionCatalog::get(opCtx).lookupCollectionByNamespace(involvedNs)) {
+ } else if (!db ||
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, involvedNs)) {
// If the aggregation database exists and 'involvedNs' refers to a collection namespace,
// then we resolve it as an empty pipeline in order to read directly from the underlying
// collection. If the database doesn't exist, then we still resolve it as an empty
@@ -343,7 +344,7 @@ Status collatorCompatibleWithPipeline(OperationContext* opCtx,
return Status::OK();
}
for (auto&& potentialViewNs : liteParsedPipeline.getInvolvedNamespaces()) {
- if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(potentialViewNs)) {
+ if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, potentialViewNs)) {
continue;
}
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 3e94951bb5b..81b7332b31a 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -89,7 +89,8 @@ public:
WriteUnitOfWork wunit(opCtx);
UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (!collection) {
collection = db->createCollection(opCtx, nss);
if (!collection) {
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 689b7a46833..59a09012695 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -272,7 +272,7 @@ Lock::CollectionLock::CollectionLock(OperationContext* opCtx,
// 'nsOrUUID' must be a UUID and dbName.
auto& collectionCatalog = CollectionCatalog::get(opCtx);
- auto nss = collectionCatalog.resolveNamespaceStringOrUUID(nssOrUUID);
+ auto nss = collectionCatalog.resolveNamespaceStringOrUUID(opCtx, nssOrUUID);
// The UUID cannot move between databases so this one dassert is sufficient.
dassert(_opCtx->lockState()->isDbLockedForMode(nss.db(),
@@ -295,7 +295,7 @@ Lock::CollectionLock::CollectionLock(OperationContext* opCtx,
// We looked up UUID without a collection lock so it's possible that the
// collection name changed now. Look it up again.
prevResolvedNss = nss;
- nss = collectionCatalog.resolveNamespaceStringOrUUID(nssOrUUID);
+ nss = collectionCatalog.resolveNamespaceStringOrUUID(opCtx, nssOrUUID);
} while (nss != prevResolvedNss);
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index c5501b733be..bc669196f8c 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -231,7 +231,7 @@ void logStartup(OperationContext* opCtx) {
AutoGetOrCreateDb autoDb(opCtx, startupLogCollectionName.db(), mongo::MODE_X);
Database* db = autoDb.getDb();
Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(startupLogCollectionName);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, startupLogCollectionName);
WriteUnitOfWork wunit(opCtx);
if (!collection) {
BSONObj options = BSON("capped" << true << "size" << 10 * 1024 * 1024);
@@ -239,8 +239,8 @@ void logStartup(OperationContext* opCtx) {
CollectionOptions collectionOptions = uassertStatusOK(
CollectionOptions::parse(options, CollectionOptions::ParseKind::parseForCommand));
uassertStatusOK(db->userCreateNS(opCtx, startupLogCollectionName, collectionOptions));
- collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(startupLogCollectionName);
+ collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ opCtx, startupLogCollectionName);
}
invariant(collection);
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index 4c328cec41e..dd98f9f5ee7 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -364,7 +364,7 @@ OldClientContext::OldClientContext(OperationContext* opCtx, const std::string& n
->checkShardVersionOrThrow(
_opCtx,
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- NamespaceString(ns)));
+ opCtx, NamespaceString(ns)));
break;
}
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 7c1e045f497..e7748aa1e9a 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -139,7 +139,7 @@ bool Helpers::findById(OperationContext* opCtx,
invariant(database);
Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(NamespaceString(ns));
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, NamespaceString(ns));
if (!collection) {
return false;
}
@@ -263,8 +263,9 @@ BSONObj Helpers::inferKeyPattern(const BSONObj& o) {
void Helpers::emptyCollection(OperationContext* opCtx, const NamespaceString& nss) {
OldClientContext context(opCtx, nss.ns());
repl::UnreplicatedWritesBlock uwb(opCtx);
- Collection* collection =
- context.db() ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss) : nullptr;
+ Collection* collection = context.db()
+ ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)
+ : nullptr;
deleteObjects(opCtx, collection, nss, BSONObj(), false);
}
diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp
index 060722dbe14..27a1e478fd4 100644
--- a/src/mongo/db/exec/requires_collection_stage.cpp
+++ b/src/mongo/db/exec/requires_collection_stage.cpp
@@ -52,7 +52,7 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
dassert(getOpCtx()->lockState()->isCollectionLockedForMode(_nss, MODE_IS));
const CollectionCatalog& catalog = CollectionCatalog::get(getOpCtx());
- auto newNss = catalog.lookupNSSByUUID(_collectionUUID);
+ auto newNss = catalog.lookupNSSByUUID(getOpCtx(), _collectionUUID);
uassert(ErrorCodes::QueryPlanKilled,
str::stream() << "collection dropped. UUID " << _collectionUUID,
newNss);
@@ -68,7 +68,7 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
// restored locks on the correct name. It is now safe to restore the Collection pointer. The
// collection must exist, since we already successfully looked up the namespace string by UUID
// under the correct lock manager locks.
- _collection = catalog.lookupCollectionByUUID(_collectionUUID);
+ _collection = catalog.lookupCollectionByUUID(getOpCtx(), _collectionUUID);
invariant(_collection);
uassert(ErrorCodes::QueryPlanKilled,
diff --git a/src/mongo/db/free_mon/free_mon_mongod.cpp b/src/mongo/db/free_mon/free_mon_mongod.cpp
index fbf9255ddce..79b2769068e 100644
--- a/src/mongo/db/free_mon/free_mon_mongod.cpp
+++ b/src/mongo/db/free_mon/free_mon_mongod.cpp
@@ -236,7 +236,7 @@ public:
void collect(OperationContext* opCtx, BSONObjBuilder& builder) {
auto& catalog = CollectionCatalog::get(opCtx);
for (auto& nss : _namespaces) {
- auto optUUID = catalog.lookupUUIDByNSS(nss);
+ auto optUUID = catalog.lookupUUIDByNSS(opCtx, nss);
if (optUUID) {
builder << nss.toString() << optUUID.get();
}
diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp
index dd918677412..497be488d47 100644
--- a/src/mongo/db/index_build_entry_helpers.cpp
+++ b/src/mongo/db/index_build_entry_helpers.cpp
@@ -104,7 +104,7 @@ void ensureIndexBuildEntriesNamespaceExists(OperationContext* opCtx) {
// Create the collection if it doesn't exist.
if (!CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- NamespaceString::kIndexBuildEntryNamespace)) {
+ opCtx, NamespaceString::kIndexBuildEntryNamespace)) {
WriteUnitOfWork wuow(opCtx);
CollectionOptions defaultCollectionOptions;
Collection* collection =
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 77b7ce01fd9..206390fd74f 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/index_build_entry_gen.h"
#include "mongo/db/catalog/index_timestamp_helper.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/concurrency/locker.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
@@ -314,7 +315,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::rebuildIndex
}
auto& collectionCatalog = CollectionCatalog::get(getGlobalServiceContext());
- Collection* collection = collectionCatalog.lookupCollectionByNamespace(nss);
+ Collection* collection = collectionCatalog.lookupCollectionByNamespace(opCtx, nss);
// Complete the index build.
return _runIndexRebuildForRecovery(opCtx, collection, buildUUID);
@@ -340,7 +341,7 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
}
auto& collectionCatalog = CollectionCatalog::get(getGlobalServiceContext());
- Collection* collection = collectionCatalog.lookupCollectionByNamespace(nss);
+ Collection* collection = collectionCatalog.lookupCollectionByNamespace(opCtx, nss);
auto indexCatalog = collection->getIndexCatalog();
{
// These steps are combined into a single WUOW to ensure there are no commits without
@@ -567,7 +568,7 @@ void IndexBuildsCoordinator::restartIndexBuildsForRecovery(
const std::map<UUID, StorageEngine::IndexBuildToRestart>& buildsToRestart) {
for (auto& [buildUUID, build] : buildsToRestart) {
boost::optional<NamespaceString> nss =
- CollectionCatalog::get(opCtx).lookupNSSByUUID(build.collUUID);
+ CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, build.collUUID);
invariant(nss);
log() << "Restarting index build for collection: " << *nss
@@ -693,7 +694,7 @@ void IndexBuildsCoordinator::createIndexes(OperationContext* opCtx,
const std::vector<BSONObj>& specs,
IndexBuildsManager::IndexConstraints indexConstraints,
bool fromMigrate) {
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(collectionUUID);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, collectionUUID);
invariant(collection,
str::stream() << "IndexBuildsCoordinator::createIndexes: " << collectionUUID);
auto nss = collection->ns();
@@ -744,14 +745,14 @@ void IndexBuildsCoordinator::createIndexesOnEmptyCollection(OperationContext* op
UUID collectionUUID,
const std::vector<BSONObj>& specs,
bool fromMigrate) {
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(collectionUUID);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, collectionUUID);
+
invariant(collection, str::stream() << collectionUUID);
invariant(0U == collection->numRecords(opCtx), str::stream() << collectionUUID);
auto nss = collection->ns();
- invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X),
- str::stream() << collectionUUID);
-
+ invariant(
+ UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, collection->ns()));
// Emit startIndexBuild and commitIndexBuild oplog entries if supported by the current FCV.
auto opObserver = opCtx->getServiceContext()->getOpObserver();
@@ -933,7 +934,7 @@ Status IndexBuildsCoordinator::_registerAndSetUpIndexBuildForTwoPhaseRecovery(
// case when an index builds is restarted during recovery.
Lock::DBLock dbLock(opCtx, dbName, MODE_IX);
Lock::CollectionLock collLock(opCtx, nssOrUuid, MODE_X);
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(collectionUUID);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, collectionUUID);
invariant(collection);
const auto& nss = collection->ns();
const auto protocol = IndexBuildProtocol::kTwoPhase;
@@ -1270,7 +1271,7 @@ void IndexBuildsCoordinator::_runIndexBuildInner(OperationContext* opCtx,
// If _buildIndex returned normally, then we should have the collection X lock. It is not
// required to safely access the collection, though, because an index build is registerd.
auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByUUID(replState->collectionUUID);
+ CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, replState->collectionUUID);
invariant(collection);
replState->stats.numIndexesAfter = _getNumIndexesTotal(opCtx, collection);
} catch (const DBException& ex) {
@@ -1282,7 +1283,7 @@ void IndexBuildsCoordinator::_runIndexBuildInner(OperationContext* opCtx,
// tearDownIndexBuild is called. The collection can be renamed, but it is OK for the name to
// be stale just for logging purposes.
auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByUUID(replState->collectionUUID);
+ CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, replState->collectionUUID);
invariant(collection,
str::stream() << "Collection with UUID " << replState->collectionUUID
<< " should exist because an index build is in progress: "
@@ -1352,7 +1353,7 @@ void IndexBuildsCoordinator::_buildIndexTwoPhase(
const IndexBuildOptions& indexBuildOptions,
boost::optional<Lock::CollectionLock>* exclusiveCollectionLock) {
- auto nss = *CollectionCatalog::get(opCtx).lookupNSSByUUID(replState->collectionUUID);
+ auto nss = *CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, replState->collectionUUID);
auto preAbortStatus = Status::OK();
try {
_scanCollectionAndInsertKeysIntoSorter(
@@ -1394,7 +1395,7 @@ void IndexBuildsCoordinator::_scanCollectionAndInsertKeysIntoSorter(
boost::optional<Lock::CollectionLock>* exclusiveCollectionLock) {
{
- auto nss = CollectionCatalog::get(opCtx).lookupNSSByUUID(replState->collectionUUID);
+ auto nss = CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, replState->collectionUUID);
invariant(nss);
invariant(opCtx->lockState()->isDbLockedForMode(replState->dbName, MODE_IX));
invariant(opCtx->lockState()->isCollectionLockedForMode(*nss, MODE_X));
@@ -1423,7 +1424,7 @@ void IndexBuildsCoordinator::_scanCollectionAndInsertKeysIntoSorter(
// The collection object should always exist while an index build is registered.
auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByUUID(replState->collectionUUID);
+ CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, replState->collectionUUID);
invariant(collection);
uassertStatusOK(
@@ -1475,7 +1476,7 @@ NamespaceString IndexBuildsCoordinator::_insertKeysFromSideTablesWithoutBlocking
RecoveryUnit::ReadSource::kUnset,
IndexBuildInterceptor::DrainYieldPolicy::kNoYield));
- nss = *CollectionCatalog::get(opCtx).lookupNSSByUUID(replState->collectionUUID);
+ nss = *CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, replState->collectionUUID);
}
if (MONGO_unlikely(hangAfterIndexBuildSecondDrain.shouldFail())) {
@@ -1550,7 +1551,7 @@ void IndexBuildsCoordinator::_insertKeysFromSideTablesAndCommit(
// The collection object should always exist while an index build is registered.
auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByUUID(replState->collectionUUID);
+ CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, replState->collectionUUID);
invariant(collection,
str::stream() << "Collection not found after relocking. Index build: "
<< replState->buildUUID
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index c88e9ac3d6b..82993e676c9 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -176,8 +176,8 @@ void profile(OperationContext* opCtx, NetworkOp op) {
// not allowed while performing writes, so temporarily enforce prepare conflicts.
EnforcePrepareConflictsBlock enforcePrepare(opCtx);
- Collection* const coll =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(db->getProfilingNS());
+ Collection* const coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ opCtx, db->getProfilingNS());
if (coll) {
invariant(!opCtx->shouldParticipateInFlowControl());
@@ -220,7 +220,7 @@ Status createProfileCollection(OperationContext* opCtx, Database* db) {
auto& dbProfilingNS = db->getProfilingNS();
Collection* const collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(dbProfilingNS);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, dbProfilingNS);
if (collection) {
if (!collection->isCapped()) {
return Status(ErrorCodes::NamespaceExists,
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index eb650fabbb9..7220f1ce153 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -66,7 +66,8 @@ TextMatchExpression::TextMatchExpression(OperationContext* opCtx,
<< nss.ns() << "')",
db);
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
uassert(ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index bc004104381..1b012b4a8ad 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -612,7 +612,7 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx,
if (!db) {
return;
}
- Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ Collection* coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
invariant(coll->uuid() == uuid);
invariant(DurableCatalog::get(opCtx)->isEqualToMetadataUUID(opCtx, coll->getCatalogId(), uuid));
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 6f12b861cc4..38cf8eb8c12 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -62,7 +62,8 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
invariant(!request.isExplain());
const NamespaceString& nsString = request.getNamespaceString();
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nsString);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nsString);
// The update stage does not create its own collection. As such, if the update is
// an upsert, create the collection that the update stage inserts into beforehand.
@@ -71,7 +72,7 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
// Callers should either get an X or create the collection.
const Locker* locker = opCtx->lockState();
invariant(locker->isW() ||
- locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()), MODE_X));
+ locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()), MODE_IX));
writeConflictRetry(opCtx, "createCollection", nsString.ns(), [&] {
Lock::DBLock lk(opCtx, nsString.db(), MODE_X);
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 3b1c09b6a62..7c93bc3303a 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -211,11 +211,12 @@ void makeCollection(OperationContext* opCtx, const NamespaceString& ns) {
writeConflictRetry(opCtx, "implicit collection creation", ns.ns(), [&opCtx, &ns] {
AutoGetOrCreateDb db(opCtx, ns.db(), MODE_IX);
- Lock::CollectionLock collLock(opCtx, ns, MODE_X);
+ Lock::CollectionLock collLock(opCtx, ns, MODE_IX);
assertCanWrite_inlock(
- opCtx, ns, CollectionCatalog::get(opCtx).lookupCollectionByNamespace(ns));
+ opCtx, ns, CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, ns));
if (!CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ opCtx,
ns)) { // someone else may have beat us to it.
uassertStatusOK(userAllowedCreateNS(ns.db(), ns.coll()));
WriteUnitOfWork wuow(opCtx);
diff --git a/src/mongo/db/pipeline/document_source_change_stream_test.cpp b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
index b918a53ca43..1e37a101baf 100644
--- a/src/mongo/db/pipeline/document_source_change_stream_test.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
@@ -414,9 +414,8 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndResumeAfter
auto expCtx = getExpCtx();
// Need to put the collection in the collection catalog so the resume token is valid.
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx)
- .registerCollection(testUuid(), std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(testUuid(), &collection);
ASSERT_THROWS_CODE(
DSChangeStream::createFromBson(
@@ -435,9 +434,9 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAfterAndResumeAfterOptions) {
auto opCtx = expCtx->opCtx;
// Need to put the collection in the collection catalog so the resume token is validcollection
- auto collection = std::make_unique<CollectionMock>(nss);
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
auto& catalog = CollectionCatalog::get(opCtx);
- catalog.registerCollection(testUuid(), std::move(collection));
+ catalog.registerCollection(testUuid(), &collection);
ASSERT_THROWS_CODE(
DSChangeStream::createFromBson(
@@ -457,9 +456,9 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndStartAfterO
auto opCtx = expCtx->opCtx;
// Need to put the collection in the collection catalog so the resume token is valid.
- auto collection = std::make_unique<CollectionMock>(nss);
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
auto& catalog = CollectionCatalog::get(opCtx);
- catalog.registerCollection(testUuid(), std::move(collection));
+ catalog.registerCollection(testUuid(), &collection);
ASSERT_THROWS_CODE(
DSChangeStream::createFromBson(
@@ -478,9 +477,9 @@ TEST_F(ChangeStreamStageTest, ShouldRejectResumeAfterWithResumeTokenMissingUUID)
auto opCtx = expCtx->opCtx;
// Need to put the collection in the collection catalog so the resume token is valid.
- auto collection = std::make_unique<CollectionMock>(nss);
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
auto& catalog = CollectionCatalog::get(opCtx);
- catalog.registerCollection(testUuid(), std::move(collection));
+ catalog.registerCollection(testUuid(), &collection);
ASSERT_THROWS_CODE(
DSChangeStream::createFromBson(
@@ -1510,8 +1509,8 @@ TEST_F(ChangeStreamStageTest, DocumentKeyShouldIncludeShardKeyFromResumeToken) {
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, &collection);
BSONObj o2 = BSON("_id" << 1 << "shardKey" << 2);
auto resumeToken = makeResumeToken(ts, uuid, o2);
@@ -1555,8 +1554,8 @@ TEST_F(ChangeStreamStageTest, DocumentKeyShouldNotIncludeShardKeyFieldsIfNotPres
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, &collection);
BSONObj o2 = BSON("_id" << 1 << "shardKey" << 2);
auto resumeToken = makeResumeToken(ts, uuid, o2);
@@ -1597,8 +1596,8 @@ TEST_F(ChangeStreamStageTest, ResumeAfterFailsIfResumeTokenDoesNotContainUUID) {
const Timestamp ts(3, 45);
const auto uuid = testUuid();
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, &collection);
// Create a resume token from only the timestamp.
auto resumeToken = makeResumeToken(ts);
@@ -1650,9 +1649,8 @@ TEST_F(ChangeStreamStageTest, ResumeAfterWithTokenFromInvalidateShouldFail) {
auto expCtx = getExpCtx();
// Need to put the collection in the collection catalog so the resume token is valid.
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx)
- .registerCollection(testUuid(), std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(testUuid(), &collection);
const auto resumeTokenInvalidate =
makeResumeToken(kDefaultTs,
@@ -2035,8 +2033,8 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldIncludeShardKeyFromResumeToken)
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, &collection);
BSONObj o2 = BSON("_id" << 1 << "shardKey" << 2);
auto resumeToken = makeResumeToken(ts, uuid, o2);
@@ -2071,8 +2069,8 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldNotIncludeShardKeyFieldsIfNotPr
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, &collection);
BSONObj o2 = BSON("_id" << 1 << "shardKey" << 2);
auto resumeToken = makeResumeToken(ts, uuid, o2);
@@ -2108,8 +2106,8 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldNotIncludeShardKeyIfResumeToken
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, &collection);
// Create a resume token from only the timestamp.
auto resumeToken = makeResumeToken(ts);
@@ -2144,9 +2142,8 @@ TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromInvalidateShouldFail) {
auto expCtx = getExpCtx();
// Need to put the collection in the collection catalog so the resume token is valid.
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx)
- .registerCollection(testUuid(), std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(testUuid(), &collection);
const auto resumeTokenInvalidate =
makeResumeToken(kDefaultTs,
@@ -2166,8 +2163,8 @@ TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromInvalidateShouldFail) {
TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromDropDatabase) {
const auto uuid = testUuid();
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, &collection);
// Create a resume token from only the timestamp, similar to a 'dropDatabase' entry.
auto resumeToken = makeResumeToken(
@@ -2195,8 +2192,8 @@ TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromDropDatabase) {
TEST_F(ChangeStreamStageDBTest, StartAfterSucceedsEvenIfResumeTokenDoesNotContainUUID) {
const auto uuid = testUuid();
- auto collection = std::make_unique<CollectionMock>(nss);
- CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, std::move(collection));
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
+ CollectionCatalog::get(getExpCtx()->opCtx).registerCollection(uuid, &collection);
// Create a resume token from only the timestamp, similar to a 'dropDatabase' entry.
auto resumeToken = makeResumeToken(kDefaultTs);
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index c2f9c9d1a8d..bf576960b20 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -185,7 +185,7 @@ Value DocumentSourceCursor::serialize(boost::optional<ExplainOptions::Verbosity>
AutoGetDb dbLock(opCtx, _exec->nss().db(), lockMode);
Lock::CollectionLock collLock(opCtx, _exec->nss(), lockMode);
auto collection = dbLock.getDb()
- ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(_exec->nss())
+ ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _exec->nss())
: nullptr;
Explain::explainStages(_exec.get(),
diff --git a/src/mongo/db/pipeline/process_interface_standalone.cpp b/src/mongo/db/pipeline/process_interface_standalone.cpp
index aebdebc1ffb..9e70d9cec0a 100644
--- a/src/mongo/db/pipeline/process_interface_standalone.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone.cpp
@@ -575,7 +575,8 @@ bool MongoInterfaceStandalone::fieldsHaveSupportingUniqueIndex(
Lock::CollectionLock collLock(opCtx, nss, MODE_IS);
auto databaseHolder = DatabaseHolder::get(opCtx);
auto db = databaseHolder->getDb(opCtx, nss.db());
- auto collection = db ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss) : nullptr;
+ auto collection =
+ db ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss) : nullptr;
if (!collection) {
return fieldPaths == std::set<FieldPath>{"_id"};
}
diff --git a/src/mongo/db/query/collection_query_info.cpp b/src/mongo/db/query/collection_query_info.cpp
index 9f24696ff21..d65773a230c 100644
--- a/src/mongo/db/query/collection_query_info.cpp
+++ b/src/mongo/db/query/collection_query_info.cpp
@@ -82,9 +82,6 @@ CollectionQueryInfo::CollectionQueryInfo()
_indexUsageTracker(getGlobalServiceContext()->getPreciseClockSource()) {}
const UpdateIndexData& CollectionQueryInfo::getIndexKeys(OperationContext* opCtx) const {
- const Collection* coll = get.owner(this);
- // This requires "some" lock, and MODE_IS is an expression for that, for now.
- dassert(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_IS));
invariant(_keysComputed);
return _indexedPaths;
}
@@ -172,7 +169,7 @@ void CollectionQueryInfo::notifyOfQuery(OperationContext* opCtx,
for (auto it = indexesUsed.begin(); it != indexesUsed.end(); ++it) {
// This index should still exist, since the PlanExecutor would have been killed if the
// index was dropped (and we would not get here).
- dassert(nullptr != coll->getIndexCatalog()->findIndexByName(opCtx, *it));
+ invariant(nullptr != coll->getIndexCatalog()->findIndexByName(opCtx, *it));
_indexUsageTracker.recordIndexAccess(*it);
}
@@ -213,8 +210,6 @@ void CollectionQueryInfo::updatePlanCacheIndexEntries(OperationContext* opCtx) {
void CollectionQueryInfo::init(OperationContext* opCtx) {
const Collection* coll = get.owner(this);
- // Requires exclusive collection lock.
- invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_X));
const bool includeUnfinishedIndexes = false;
std::unique_ptr<IndexCatalog::IndexIterator> ii =
@@ -228,21 +223,13 @@ void CollectionQueryInfo::init(OperationContext* opCtx) {
}
void CollectionQueryInfo::addedIndex(OperationContext* opCtx, const IndexDescriptor* desc) {
- const Collection* coll = get.owner(this);
- // Requires exclusive collection lock.
- invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_X));
invariant(desc);
rebuildIndexData(opCtx);
-
_indexUsageTracker.registerIndex(desc->indexName(), desc->keyPattern());
}
void CollectionQueryInfo::droppedIndex(OperationContext* opCtx, StringData indexName) {
- const Collection* coll = get.owner(this);
- // Requires exclusive collection lock.
- invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_X));
-
rebuildIndexData(opCtx);
_indexUsageTracker.unregisterIndex(indexName);
}
diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp
index db5e31ff329..533fae432e3 100644
--- a/src/mongo/db/query/plan_executor_impl.cpp
+++ b/src/mongo/db/query/plan_executor_impl.cpp
@@ -466,7 +466,7 @@ std::shared_ptr<CappedInsertNotifier> PlanExecutorImpl::_getCappedInsertNotifier
auto databaseHolder = DatabaseHolder::get(_opCtx);
auto db = databaseHolder->getDb(_opCtx, _nss.db());
invariant(db);
- auto collection = CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(_nss);
+ auto collection = CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(_opCtx, _nss);
invariant(collection);
return collection->getCappedInsertNotifier();
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index ab6c2344285..fcfa9915611 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -122,7 +122,7 @@ QueryRequest::QueryRequest(NamespaceStringOrUUID nssOrUuid)
void QueryRequest::refreshNSS(OperationContext* opCtx) {
if (_uuid) {
const CollectionCatalog& catalog = CollectionCatalog::get(opCtx);
- auto foundColl = catalog.lookupCollectionByUUID(_uuid.get());
+ auto foundColl = catalog.lookupCollectionByUUID(opCtx, _uuid.get());
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "UUID " << _uuid.get() << " specified in query request not found",
foundColl);
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 95a08b366a2..1bd5d0a2d62 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1661,9 +1661,9 @@ TEST_F(QueryRequestTest, ParseFromUUID) {
// Register a UUID/Collection pair in the CollectionCatalog.
const CollectionUUID uuid = UUID::gen();
const NamespaceString nss("test.testns");
- auto coll = std::make_unique<CollectionMock>(nss);
+ std::unique_ptr<Collection> collection = std::make_unique<CollectionMock>(nss);
CollectionCatalog& catalog = CollectionCatalog::get(opCtx.get());
- catalog.registerCollection(uuid, std::move(coll));
+ catalog.registerCollection(uuid, &collection);
QueryRequest qr(NamespaceStringOrUUID("test", uuid));
// Ensure a call to refreshNSS succeeds.
qr.refreshNSS(opCtx.get());
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index ced21031a10..533e0df0c6d 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -147,7 +147,7 @@ Status repairCollections(OperationContext* opCtx,
log() << "Repairing collection " << nss;
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
Status status = engine->repairRecordStore(opCtx, collection->getCatalogId(), nss);
if (!status.isOK())
return status;
@@ -155,7 +155,7 @@ Status repairCollections(OperationContext* opCtx,
for (const auto& nss : colls) {
opCtx->checkForInterrupt();
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
auto swIndexNameObjs = getIndexNameObjs(opCtx, collection->getCatalogId());
if (!swIndexNameObjs.isOK())
return swIndexNameObjs.getStatus();
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index bf8d07ed074..741976143eb 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -96,7 +96,7 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx
// If the server configuration collection, which contains the FCV document, does not exist, then
// create it.
if (!CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- NamespaceString::kServerConfigurationNamespace)) {
+ opCtx, NamespaceString::kServerConfigurationNamespace)) {
log() << "Re-creating the server configuration collection (admin.system.version) that was "
"dropped.";
uassertStatusOK(
@@ -104,7 +104,7 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx
}
Collection* fcvColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- NamespaceString::kServerConfigurationNamespace);
+ opCtx, NamespaceString::kServerConfigurationNamespace);
invariant(fcvColl);
// Restore the featureCompatibilityVersion document if it is missing.
@@ -256,7 +256,7 @@ void checkForCappedOplog(OperationContext* opCtx, Database* db) {
const NamespaceString oplogNss(NamespaceString::kRsOplogNamespace);
invariant(opCtx->lockState()->isDbLockedForMode(oplogNss.db(), MODE_IS));
Collection* oplogCollection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(oplogNss);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogNss);
if (oplogCollection && !oplogCollection->isCapped()) {
severe() << "The oplog collection " << oplogNss
<< " is not capped; a capped oplog is a requirement for replication to function.";
@@ -306,7 +306,7 @@ void rebuildIndexes(OperationContext* opCtx, StorageEngine* storageEngine) {
for (const auto& entry : nsToIndexNameObjMap) {
NamespaceString collNss(entry.first);
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(collNss);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collNss);
for (const auto& indexName : entry.second.first) {
log() << "Rebuilding index. Collection: " << collNss << " Index: " << indexName;
}
@@ -338,7 +338,7 @@ void setReplSetMemberInStandaloneMode(OperationContext* opCtx) {
invariant(opCtx->lockState()->isW());
Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- NamespaceString::kSystemReplSetNamespace);
+ opCtx, NamespaceString::kSystemReplSetNamespace);
if (collection && collection->numRecords(opCtx) > 0) {
setReplSetMemberInStandaloneMode(opCtx->getServiceContext(), true);
return;
@@ -410,7 +410,8 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
Collection* versionColl;
BSONObj featureCompatibilityVersion;
if (!db ||
- !(versionColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(fcvNSS)) ||
+ !(versionColl =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, fcvNSS)) ||
!Helpers::findOne(opCtx,
versionColl,
BSON("_id" << FeatureCompatibilityVersionParser::kParameterName),
@@ -517,7 +518,7 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
// featureCompatibilityVersion document, cache it in-memory as a server parameter.
if (dbName == "admin") {
if (Collection* versionColl = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- NamespaceString::kServerConfigurationNamespace)) {
+ opCtx, NamespaceString::kServerConfigurationNamespace)) {
BSONObj featureCompatibilityVersion;
if (Helpers::findOne(
opCtx,
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 8ab2dffb19b..297934bbbca 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -151,7 +151,7 @@ Status _applyOps(OperationContext* opCtx,
// NamespaceNotFound.
// Additionally for inserts, we fail early on non-existent collections.
Lock::CollectionLock collectionLock(opCtx, nss, MODE_IX);
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (!collection && (*opType == 'i' || *opType == 'u')) {
uasserted(
ErrorCodes::AtomicityFailure,
@@ -311,7 +311,8 @@ Status _checkPrecondition(OperationContext* opCtx,
if (!database) {
return {ErrorCodes::NamespaceNotFound, "database in ns does not exist: " + nss.ns()};
}
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (!collection) {
return {ErrorCodes::NamespaceNotFound, "collection in ns does not exist: " + nss.ns()};
}
diff --git a/src/mongo/db/repl/dbcheck.cpp b/src/mongo/db/repl/dbcheck.cpp
index c9e822b0cc3..083dd74edc1 100644
--- a/src/mongo/db/repl/dbcheck.cpp
+++ b/src/mongo/db/repl/dbcheck.cpp
@@ -388,8 +388,9 @@ AutoGetCollectionForDbCheck::AutoGetCollectionForDbCheck(OperationContext* opCtx
: _agd(opCtx, nss), _collLock(opCtx, nss, MODE_S) {
std::string msg;
- _collection =
- _agd.getDb() ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss) : nullptr;
+ _collection = _agd.getDb()
+ ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)
+ : nullptr;
// If the collection gets deleted after the check is launched, record that in the health log.
if (!_collection) {
@@ -464,7 +465,7 @@ Status dbCheckDatabaseOnSecondary(OperationContext* opCtx,
const DbCheckOplogCollection& entry) {
// dbCheckCollectionResult-specific stuff.
auto uuid = uassertStatusOK(UUID::parse(entry.getUuid().toString()));
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(uuid);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
if (!collection) {
Status status(ErrorCodes::NamespaceNotFound, "Could not find collection for dbCheck");
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 8f345e4e023..3661989d417 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -239,7 +239,7 @@ void createIndexForApplyOps(OperationContext* opCtx,
auto databaseHolder = DatabaseHolder::get(opCtx);
auto db = databaseHolder->getDb(opCtx, indexNss.ns());
auto indexCollection =
- db ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(indexNss) : nullptr;
+ db ? CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, indexNss) : nullptr;
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to create index due to missing collection: " << indexNss.ns(),
indexCollection);
@@ -585,7 +585,7 @@ void createOplog(OperationContext* opCtx,
OldClientContext ctx(opCtx, oplogCollectionName.ns());
Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(oplogCollectionName);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogCollectionName);
if (collection) {
if (replSettings.getOplogSizeBytes() != 0) {
@@ -669,7 +669,7 @@ std::pair<OptionalCollectionUUID, NamespaceString> extractCollModUUIDAndNss(
}
CollectionUUID uuid = ui.get();
auto& catalog = CollectionCatalog::get(opCtx);
- const auto nsByUUID = catalog.lookupNSSByUUID(uuid);
+ const auto nsByUUID = catalog.lookupNSSByUUID(opCtx, uuid);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply operation due to missing collection (" << uuid
<< "): " << redact(cmd.toString()),
@@ -681,7 +681,7 @@ NamespaceString extractNsFromUUID(OperationContext* opCtx, const boost::optional
invariant(ui);
auto uuid = ui.get();
auto& catalog = CollectionCatalog::get(opCtx);
- auto nss = catalog.lookupNSSByUUID(uuid);
+ auto nss = catalog.lookupNSSByUUID(opCtx, uuid);
uassert(ErrorCodes::NamespaceNotFound, "No namespace with UUID " + uuid.toString(), nss);
return *nss;
}
@@ -1134,7 +1134,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
Collection* collection = nullptr;
if (auto uuid = op.getUuid()) {
CollectionCatalog& catalog = CollectionCatalog::get(opCtx);
- collection = catalog.lookupCollectionByUUID(uuid.get());
+ collection = catalog.lookupCollectionByUUID(opCtx, uuid.get());
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply operation due to missing collection ("
<< uuid.get() << "): " << redact(opOrGroupedInserts.toBSON()),
@@ -1148,7 +1148,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
dassert(opCtx->lockState()->isCollectionLockedForMode(
requestNss, supportsDocLocking() ? MODE_IX : MODE_X),
requestNss.ns());
- collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(requestNss);
+ collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, requestNss);
}
BSONObj o = op.getObject();
@@ -1537,7 +1537,7 @@ Status applyCommand_inlock(OperationContext* opCtx,
Lock::DBLock lock(opCtx, nss.db(), MODE_IS);
auto databaseHolder = DatabaseHolder::get(opCtx);
auto db = databaseHolder->getDb(opCtx, nss.ns());
- if (db && !CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss) &&
+ if (db && !CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss) &&
ViewCatalog::get(db)->lookup(opCtx, nss.ns())) {
return {ErrorCodes::CommandNotSupportedOnView,
str::stream() << "applyOps not supported on view:" << nss.ns()};
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index f5716c2a49b..b036bad722b 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -79,7 +79,7 @@ NamespaceString parseUUIDOrNs(OperationContext* opCtx, const OplogEntry& oplogEn
const auto& uuid = optionalUuid.get();
auto& catalog = CollectionCatalog::get(opCtx);
- auto nss = catalog.lookupNSSByUUID(uuid);
+ auto nss = catalog.lookupNSSByUUID(opCtx, uuid);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "No namespace with UUID " << uuid.toString(),
nss);
@@ -159,7 +159,7 @@ private:
const NamespaceString& nss) {
CollectionProperties collProperties;
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (!collection) {
return collProperties;
diff --git a/src/mongo/db/repl/oplog_interface_local.cpp b/src/mongo/db/repl/oplog_interface_local.cpp
index 499e8976aa5..e8965ec81b4 100644
--- a/src/mongo/db/repl/oplog_interface_local.cpp
+++ b/src/mongo/db/repl/oplog_interface_local.cpp
@@ -64,7 +64,7 @@ OplogIteratorLocal::OplogIteratorLocal(OperationContext* opCtx)
InternalPlanner::collectionScan(opCtx,
NamespaceString::kRsOplogNamespace.ns(),
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- NamespaceString::kRsOplogNamespace),
+ opCtx, NamespaceString::kRsOplogNamespace),
PlanExecutor::NO_YIELD,
InternalPlanner::BACKWARD)) {}
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index 5be259ad537..7e7545ef4f7 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -424,7 +424,7 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx,
AutoGetDb autoDb(opCtx, oplogNss.db(), MODE_IX);
Lock::CollectionLock oplogCollectionLoc(opCtx, oplogNss, MODE_X);
Collection* oplogCollection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(oplogNss);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogNss);
if (!oplogCollection) {
fassertFailedWithStatusNoTrace(
34418,
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 24c6e1e8b63..4bfdb8dd2d8 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -549,7 +549,7 @@ void RollbackImpl::_correctRecordStoreCounts(OperationContext* opCtx) {
const auto& catalog = CollectionCatalog::get(opCtx);
for (const auto& uiCount : _newCounts) {
const auto uuid = uiCount.first;
- const auto coll = catalog.lookupCollectionByUUID(uuid);
+ const auto coll = catalog.lookupCollectionByUUID(opCtx, uuid);
invariant(coll,
str::stream() << "The collection with UUID " << uuid
<< " is unexpectedly missing in the CollectionCatalog");
@@ -629,7 +629,7 @@ Status RollbackImpl::_findRecordStoreCounts(OperationContext* opCtx) {
continue;
}
- auto nss = catalog.lookupNSSByUUID(uuid);
+ auto nss = catalog.lookupNSSByUUID(opCtx, uuid);
StorageInterface::CollectionCount oldCount = 0;
// Drop-pending collections are not visible to rollback via the catalog when they are
@@ -1056,7 +1056,7 @@ Status RollbackImpl::_writeRollbackFiles(OperationContext* opCtx) {
auto storageEngine = opCtx->getServiceContext()->getStorageEngine();
for (auto&& entry : _observerInfo.rollbackDeletedIdsMap) {
const auto& uuid = entry.first;
- const auto nss = catalog.lookupNSSByUUID(uuid);
+ const auto nss = catalog.lookupNSSByUUID(opCtx, uuid);
// Drop-pending collections are not visible to rollback via the catalog when they are
// managed by the storage engine. See StorageEngine::supportsPendingDrops().
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 87f3448c504..37c5b82ef1e 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -713,10 +713,11 @@ void dropIndex(OperationContext* opCtx,
*/
void rollbackCreateIndexes(OperationContext* opCtx, UUID uuid, std::set<std::string> indexNames) {
- boost::optional<NamespaceString> nss = CollectionCatalog::get(opCtx).lookupNSSByUUID(uuid);
+ boost::optional<NamespaceString> nss =
+ CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, uuid);
invariant(nss);
Lock::DBLock dbLock(opCtx, nss->db(), MODE_X);
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(uuid);
+ Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
// If we cannot find the collection, we skip over dropping the index.
if (!collection) {
@@ -753,11 +754,12 @@ void rollbackCreateIndexes(OperationContext* opCtx, UUID uuid, std::set<std::str
void rollbackDropIndexes(OperationContext* opCtx,
UUID uuid,
std::map<std::string, BSONObj> indexNames) {
- boost::optional<NamespaceString> nss = CollectionCatalog::get(opCtx).lookupNSSByUUID(uuid);
+ boost::optional<NamespaceString> nss =
+ CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, uuid);
invariant(nss);
Lock::DBLock dbLock(opCtx, nss->db(), MODE_IX);
Lock::CollectionLock collLock(opCtx, *nss, MODE_X);
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(*nss);
+ Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, *nss);
// If we cannot find the collection, we skip over dropping the index.
if (!collection) {
@@ -850,7 +852,8 @@ void dropCollection(OperationContext* opCtx,
void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Database* db) {
// Finds the UUID of the collection that we are renaming out of the way.
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(info.renameTo);
+ auto collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, info.renameTo);
invariant(collection);
// The generated unique collection name is only guaranteed to exist if the database is
@@ -1049,7 +1052,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
invariant(!doc._id.eoo()); // This is checked when we insert to the set.
UUID uuid = doc.uuid;
- boost::optional<NamespaceString> nss = catalog.lookupNSSByUUID(uuid);
+ boost::optional<NamespaceString> nss = catalog.lookupNSSByUUID(opCtx, uuid);
try {
if (nss) {
@@ -1164,7 +1167,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
invariant(!fixUpInfo.collectionsToRename.count(uuid));
invariant(!fixUpInfo.collectionsToResyncMetadata.count(uuid));
- boost::optional<NamespaceString> nss = CollectionCatalog::get(opCtx).lookupNSSByUUID(uuid);
+ boost::optional<NamespaceString> nss =
+ CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, uuid);
// Do not attempt to acquire the database lock with an empty namespace. We should survive
// an attempt to drop a non-existent collection.
if (!nss) {
@@ -1175,7 +1179,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
Database* db = dbLock.getDb();
if (db) {
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(uuid);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
dropCollection(opCtx, *nss, collection, db);
LOG(1) << "Dropped collection: " << *nss << ", UUID: " << uuid;
}
@@ -1223,7 +1228,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// collection, we will not be able to retrieve the collection's catalog entries.
for (auto uuid : fixUpInfo.collectionsToResyncMetadata) {
boost::optional<NamespaceString> nss =
- CollectionCatalog::get(opCtx).lookupNSSByUUID(uuid);
+ CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, uuid);
invariant(nss);
log() << "Resyncing collection metadata for collection: " << *nss << ", UUID: " << uuid;
@@ -1234,7 +1239,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
auto db = databaseHolder->openDb(opCtx, nss->db().toString());
invariant(db);
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(uuid);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
invariant(collection);
auto infoResult = rollbackSource.getCollectionInfoByUUID(nss->db().toString(), uuid);
@@ -1333,7 +1339,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
unique_ptr<RemoveSaver> removeSaver;
invariant(!fixUpInfo.collectionsToDrop.count(uuid));
- boost::optional<NamespaceString> nss = catalog.lookupNSSByUUID(uuid);
+ boost::optional<NamespaceString> nss = catalog.lookupNSSByUUID(opCtx, uuid);
if (!nss) {
nss = NamespaceString();
}
@@ -1362,7 +1368,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
const NamespaceString docNss(doc.ns);
Lock::DBLock docDbLock(opCtx, docNss.db(), MODE_X);
OldClientContext ctx(opCtx, doc.ns.toString());
- Collection* collection = catalog.lookupCollectionByUUID(uuid);
+ Collection* collection = catalog.lookupCollectionByUUID(opCtx, uuid);
// Adds the doc to our rollback file if the collection was not dropped while
// rolling back createCollection operations. Does not log an error when
@@ -1530,7 +1536,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
Lock::CollectionLock oplogCollectionLoc(opCtx, oplogNss, MODE_X);
OldClientContext ctx(opCtx, oplogNss.ns());
Collection* oplogCollection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(oplogNss);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogNss);
if (!oplogCollection) {
fassertFailedWithStatusNoTrace(
40495,
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index 9380cbe51ad..81988675ec6 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -326,7 +326,7 @@ int _testRollbackDelete(OperationContext* opCtx,
auto db = databaseHolder->getDb(opCtx, "test");
ASSERT_TRUE(db);
auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(NamespaceString("test.t"));
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, NamespaceString("test.t"));
if (!collection) {
return -1;
}
@@ -1514,7 +1514,7 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
AutoGetOrCreateDb autoDb(_opCtx.get(), "test", MODE_X);
mongo::WriteUnitOfWork wuow(_opCtx.get());
coll = CollectionCatalog::get(_opCtx.get())
- .lookupCollectionByNamespace(NamespaceString("test.t"));
+ .lookupCollectionByNamespace(_opCtx.get(), NamespaceString("test.t"));
if (!coll) {
coll =
autoDb.getDb()->createCollection(_opCtx.get(), NamespaceString("test.t"), options);
@@ -1660,7 +1660,7 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
auto db = databaseHolder->getDb(_opCtx.get(), "test");
ASSERT_TRUE(db);
ASSERT_FALSE(CollectionCatalog::get(_opCtx.get())
- .lookupCollectionByNamespace(NamespaceString("test.t")));
+ .lookupCollectionByNamespace(_opCtx.get(), NamespaceString("test.t")));
}
}
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index ef77a529a56..237a07e02e2 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -223,8 +223,8 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
UnreplicatedWritesBlock uwb(opCtx.get());
// Get locks and create the collection.
- AutoGetOrCreateDb db(opCtx.get(), nss.db(), MODE_X);
- AutoGetCollection coll(opCtx.get(), nss, fixLockModeForSystemDotViewsChanges(nss, MODE_IX));
+ AutoGetOrCreateDb db(opCtx.get(), nss.db(), MODE_IX);
+ AutoGetCollection coll(opCtx.get(), nss, fixLockModeForSystemDotViewsChanges(nss, MODE_X));
if (coll.getCollection()) {
return Status(ErrorCodes::NamespaceExists,
@@ -438,13 +438,14 @@ Status StorageInterfaceImpl::createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) {
return writeConflictRetry(opCtx, "StorageInterfaceImpl::createCollection", nss.ns(), [&] {
- AutoGetOrCreateDb databaseWriteGuard(opCtx, nss.db(), MODE_X);
+ AutoGetOrCreateDb databaseWriteGuard(opCtx, nss.db(), MODE_IX);
auto db = databaseWriteGuard.getDb();
invariant(db);
- if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss)) {
+ if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)) {
return Status(ErrorCodes::NamespaceExists,
str::stream() << "Collection " << nss.ns() << " already exists.");
}
+ Lock::CollectionLock lk(opCtx, nss, MODE_IX);
WriteUnitOfWork wuow(opCtx);
try {
auto coll = db->createCollection(opCtx, nss, options);
@@ -1111,12 +1112,12 @@ Status StorageInterfaceImpl::isAdminDbValid(OperationContext* opCtx) {
}
Collection* const usersCollection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- AuthorizationManager::usersCollectionNamespace);
+ opCtx, AuthorizationManager::usersCollectionNamespace);
const bool hasUsers =
usersCollection && !Helpers::findOne(opCtx, usersCollection, BSONObj(), false).isNull();
Collection* const adminVersionCollection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
- AuthorizationManager::versionCollectionNamespace);
+ opCtx, AuthorizationManager::versionCollectionNamespace);
BSONObj authSchemaVersionDocument;
if (!adminVersionCollection ||
!Helpers::findOne(opCtx,
diff --git a/src/mongo/db/s/implicit_create_collection.cpp b/src/mongo/db/s/implicit_create_collection.cpp
index 62b086eebc5..b146eeb5076 100644
--- a/src/mongo/db/s/implicit_create_collection.cpp
+++ b/src/mongo/db/s/implicit_create_collection.cpp
@@ -96,7 +96,7 @@ public:
auto db = databaseHolder->getDb(opCtx, _ns.db());
if (db) {
Lock::CollectionLock collLock(opCtx, _ns, MODE_IS);
- if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(_ns)) {
+ if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _ns)) {
// Collection already created, no more work needs to be done.
return Status::OK();
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 0df33635636..075f109f59f 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -652,7 +652,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
AutoGetOrCreateDb autoCreateDb(opCtx, nss.db(), MODE_X);
auto db = autoCreateDb.getDb();
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
if (collection) {
checkUUIDsMatch(collection);
} else {
@@ -665,7 +665,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
uassertStatusOK(db->userCreateNS(
opCtx, nss, collectionOptions, createDefaultIndexes, donorIdIndexSpec));
wuow.commit();
- collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
}
auto indexSpecs = checkEmptyOrGetMissingIndexesFromDonor(collection);
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index 43fa7f3d12e..839c87ea602 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -226,7 +226,7 @@ public:
// Views do not require a shard version check. We do not care about invalid system views
// for this check, only to validate if a view already exists for this namespace.
if (autoDb->getDb() &&
- !CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss) &&
+ !CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss) &&
ViewCatalog::get(autoDb->getDb())
->lookupWithoutValidatingDurableViews(opCtx, nss.ns())) {
return true;
diff --git a/src/mongo/db/s/shardsvr_rename_collection.cpp b/src/mongo/db/s/shardsvr_rename_collection.cpp
index 41971a44c6e..ec61672284e 100644
--- a/src/mongo/db/s/shardsvr_rename_collection.cpp
+++ b/src/mongo/db/s/shardsvr_rename_collection.cpp
@@ -62,7 +62,7 @@ public:
void typedRun(OperationContext* opCtx) {
auto incomingRequest = request();
auto sourceCollUUID = request().getUuid();
- auto nssFromUUID = CollectionCatalog::get(opCtx).lookupNSSByUUID(sourceCollUUID);
+ auto nssFromUUID = CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, sourceCollUUID);
if (nssFromUUID == incomingRequest.getTo()) {
repl::ReplClientInfo::forClient(opCtx->getClient())
.setLastOpToSystemLastOpTime(opCtx);
diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript
index 1a7d6e575f9..6f69bf51f59 100644
--- a/src/mongo/db/storage/SConscript
+++ b/src/mongo/db/storage/SConscript
@@ -91,7 +91,7 @@ env.Library(
'$BUILD_DIR/mongo/base',
]
)
-
+
env.Library(
target='oplog_cap_maintainer_thread',
source=[
@@ -424,6 +424,7 @@ env.Library(
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/catalog/index_timestamp_helper',
+ '$BUILD_DIR/mongo/db/concurrency/write_conflict_exception',
'$BUILD_DIR/mongo/db/server_options_core',
],
)
diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp
index 42a1207672b..49e4c3fa52b 100644
--- a/src/mongo/db/storage/durable_catalog_impl.cpp
+++ b/src/mongo/db/storage/durable_catalog_impl.cpp
@@ -39,6 +39,7 @@
#include "mongo/bson/util/builder.h"
#include "mongo/db/catalog/index_timestamp_helper.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
@@ -459,9 +460,13 @@ void DurableCatalogImpl::init(OperationContext* opCtx) {
}
if (!_featureTracker) {
- // If there wasn't a feature document, then just an initialize a feature tracker that
- // doesn't manage a feature document yet.
+ // If there wasn't a feature document, commit a default one to disk. All deployments will
+ // end up with `kPathLevelMultikeyTracking` as every `_id` index build sets this.
+ WriteUnitOfWork wuow(opCtx);
_featureTracker = DurableCatalogImpl::FeatureTracker::create(opCtx, this);
+ _featureTracker->markRepairableFeatureAsInUse(
+ opCtx, FeatureTracker::RepairableFeature::kPathLevelMultikeyTracking);
+ wuow.commit();
}
// In the unlikely event that we have used this _rand before generate a new one.
@@ -777,12 +782,11 @@ StatusWith<std::pair<RecordId, std::unique_ptr<RecordStore>>> DurableCatalogImpl
const NamespaceString& nss,
const CollectionOptions& options,
bool allocateDefaultSpace) {
- invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_IX));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX));
invariant(nss.coll().size() > 0);
- if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss)) {
- return Status(ErrorCodes::NamespaceExists,
- str::stream() << "collection already exists " << nss);
+ if (CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)) {
+ throw WriteConflictException();
}
KVPrefix prefix = KVPrefix::getNextPrefix(nss);
diff --git a/src/mongo/db/storage/kv/durable_catalog_feature_tracker_test.cpp b/src/mongo/db/storage/kv/durable_catalog_feature_tracker_test.cpp
index 82d69cf454d..d5a2594b37c 100644
--- a/src/mongo/db/storage/kv/durable_catalog_feature_tracker_test.cpp
+++ b/src/mongo/db/storage/kv/durable_catalog_feature_tracker_test.cpp
@@ -107,12 +107,6 @@ private:
std::unique_ptr<DurableCatalogImpl::FeatureTracker> _featureTracker;
};
-TEST_F(DurableCatalogFeatureTrackerTest, FeatureDocumentIsNotEagerlyCreated) {
- auto opCtx = newOperationContext();
- auto cursor = getRecordStore()->getCursor(opCtx.get());
- ASSERT_FALSE(static_cast<bool>(cursor->next()));
-}
-
TEST_F(DurableCatalogFeatureTrackerTest, CanMarkNonRepairableFeatureAsInUse) {
{
auto opCtx = newOperationContext();
diff --git a/src/mongo/db/storage/kv/durable_catalog_test.cpp b/src/mongo/db/storage/kv/durable_catalog_test.cpp
index 5a2da09c0ea..c5b5b469f09 100644
--- a/src/mongo/db/storage/kv/durable_catalog_test.cpp
+++ b/src/mongo/db/storage/kv/durable_catalog_test.cpp
@@ -88,9 +88,9 @@ public:
ASSERT_OK(swColl.getStatus());
std::pair<RecordId, std::unique_ptr<RecordStore>> coll = std::move(swColl.getValue());
_catalogId = coll.first;
- auto collection = std::make_unique<CollectionMock>(_nss);
- CollectionCatalog::get(opCtx.get())
- .registerCollection(options.uuid.get(), std::move(collection));
+ std::unique_ptr<Collection> collection =
+ std::make_unique<CollectionMock>(_nss, _catalogId);
+ CollectionCatalog::get(opCtx.get()).registerCollection(options.uuid.get(), &collection);
wuow.commit();
}
}
diff --git a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
index 6c5cfec31e6..a418badd579 100644
--- a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
+++ b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
@@ -87,8 +87,8 @@ bool OplogCapMaintainerThread::_deleteExcessDocuments() {
// We need to hold the database lock while getting the collection. Otherwise a
// concurrent collection creation would write to the map in the Database object
// while we concurrently read the map.
- Collection* collection =
- CollectionCatalog::get(opCtx.get()).lookupCollectionByNamespace(oplogNss);
+ Collection* collection = CollectionCatalog::get(opCtx.get())
+ .lookupCollectionByNamespace(opCtx.get(), oplogNss);
if (!collection) {
LOG(2) << "no collection " << oplogNss;
return false;
diff --git a/src/mongo/db/storage/recovery_unit.cpp b/src/mongo/db/storage/recovery_unit.cpp
index 70471edc713..bc11798a185 100644
--- a/src/mongo/db/storage/recovery_unit.cpp
+++ b/src/mongo/db/storage/recovery_unit.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/storage/recovery_unit.h"
#include "mongo/util/log.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
namespace {
@@ -50,12 +51,26 @@ void RecoveryUnit::assignNextSnapshotId() {
_mySnapshotId = nextSnapshotId.fetchAndAdd(1);
}
+void RecoveryUnit::registerPreCommitHook(std::function<void(OperationContext*)> callback) {
+ _preCommitHooks.push_back(std::move(callback));
+}
+
+void RecoveryUnit::runPreCommitHooks(OperationContext* opCtx) {
+ ON_BLOCK_EXIT([&] { _preCommitHooks.clear(); });
+ for (auto& hook : _preCommitHooks) {
+ hook(opCtx);
+ }
+}
+
void RecoveryUnit::registerChange(std::unique_ptr<Change> change) {
invariant(_inUnitOfWork(), toString(_getState()));
_changes.push_back(std::move(change));
}
void RecoveryUnit::commitRegisteredChanges(boost::optional<Timestamp> commitTimestamp) {
+ // Getting to this method implies `runPreCommitHooks` completed successfully, resulting in
+ // having its contents cleared.
+ invariant(_preCommitHooks.empty());
for (auto& change : _changes) {
try {
LOG(2) << "CUSTOM COMMIT " << redact(demangleName(typeid(*change)));
@@ -68,6 +83,7 @@ void RecoveryUnit::commitRegisteredChanges(boost::optional<Timestamp> commitTime
}
void RecoveryUnit::abortRegisteredChanges() {
+ _preCommitHooks.clear();
try {
for (Changes::const_reverse_iterator it = _changes.rbegin(), end = _changes.rend();
it != end;
diff --git a/src/mongo/db/storage/recovery_unit.h b/src/mongo/db/storage/recovery_unit.h
index 521dd75232b..5158340a399 100644
--- a/src/mongo/db/storage/recovery_unit.h
+++ b/src/mongo/db/storage/recovery_unit.h
@@ -454,6 +454,15 @@ public:
virtual bool inActiveTxn() const = 0;
/**
+ * Registers a callback to be called prior to a WriteUnitOfWork committing the storage
+ * transaction. This callback may throw a WriteConflictException which will abort the
+ * transaction.
+ */
+ virtual void registerPreCommitHook(std::function<void(OperationContext*)> callback);
+
+ virtual void runPreCommitHooks(OperationContext* opCtx);
+
+ /**
* A Change is an action that is registerChange()'d while a WriteUnitOfWork exists. The
* change is either rollback()'d or commit()'d when the WriteUnitOfWork goes out of scope.
*
@@ -650,6 +659,8 @@ private:
virtual void doCommitUnitOfWork() = 0;
virtual void doAbortUnitOfWork() = 0;
+ std::vector<std::function<void(OperationContext*)>> _preCommitHooks;
+
typedef std::vector<std::unique_ptr<Change>> Changes;
Changes _changes;
State _state = State::kInactive;
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index 367e526ed66..35c838c474a 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -262,7 +262,7 @@ void StorageEngineImpl::_initCollection(OperationContext* opCtx,
auto collection = collectionFactory->make(opCtx, nss, catalogId, uuid, std::move(rs));
auto& collectionCatalog = CollectionCatalog::get(getGlobalServiceContext());
- collectionCatalog.registerCollection(uuid, std::move(collection));
+ collectionCatalog.registerCollection(uuid, &collection);
}
void StorageEngineImpl::closeCatalog(OperationContext* opCtx) {
@@ -654,7 +654,7 @@ Status StorageEngineImpl::_dropCollectionsNoTimestamp(OperationContext* opCtx,
WriteUnitOfWork untimestampedDropWuow(opCtx);
for (auto& nss : toDrop) {
invariant(getCatalog());
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
Status result = getCatalog()->dropCollection(opCtx, coll->getCatalogId());
if (!result.isOK() && firstError.isOK()) {
@@ -741,7 +741,7 @@ Status StorageEngineImpl::repairRecordStore(OperationContext* opCtx,
// After repairing, re-initialize the collection with a valid RecordStore.
auto& collectionCatalog = CollectionCatalog::get(getGlobalServiceContext());
- auto uuid = collectionCatalog.lookupUUIDByNSS(nss).get();
+ auto uuid = collectionCatalog.lookupUUIDByNSS(opCtx, nss).get();
collectionCatalog.deregisterCollection(uuid);
_initCollection(opCtx, catalogId, nss, false);
return Status::OK();
diff --git a/src/mongo/db/storage/storage_engine_test_fixture.h b/src/mongo/db/storage/storage_engine_test_fixture.h
index ffda8356902..3495fd893e8 100644
--- a/src/mongo/db/storage/storage_engine_test_fixture.h
+++ b/src/mongo/db/storage/storage_engine_test_fixture.h
@@ -57,8 +57,9 @@ public:
std::unique_ptr<RecordStore> rs;
std::tie(catalogId, rs) = unittest::assertGet(
_storageEngine->getCatalog()->createCollection(opCtx, ns, options, true));
- CollectionCatalog::get(opCtx).registerCollection(
- options.uuid.get(), std::make_unique<CollectionMock>(ns, catalogId));
+
+ std::unique_ptr<Collection> coll = std::make_unique<CollectionMock>(ns, catalogId);
+ CollectionCatalog::get(opCtx).registerCollection(options.uuid.get(), &coll);
return {{_storageEngine->getCatalog()->getEntry(catalogId)}};
}
@@ -78,7 +79,7 @@ public:
Status dropIndexTable(OperationContext* opCtx, NamespaceString nss, std::string indexName) {
RecordId catalogId =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss)->getCatalogId();
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss)->getCatalogId();
std::string indexIdent =
_storageEngine->getCatalog()->getIndexIdent(opCtx, catalogId, indexName);
return dropIdent(opCtx, indexIdent);
@@ -138,7 +139,8 @@ public:
}
BSONObj spec = builder.append("name", key).append("v", 2).done();
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(collNs);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collNs);
auto descriptor =
std::make_unique<IndexDescriptor>(collection, IndexNames::findPluginName(spec), spec);
@@ -151,13 +153,14 @@ public:
}
void indexBuildSuccess(OperationContext* opCtx, NamespaceString collNs, std::string key) {
- Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(collNs);
+ Collection* collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, collNs);
DurableCatalog::get(opCtx)->indexBuildSuccess(opCtx, collection->getCatalogId(), key);
}
Status removeEntry(OperationContext* opCtx, StringData collNs, DurableCatalog* catalog) {
- Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(NamespaceString(collNs));
+ Collection* collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(
+ opCtx, NamespaceString(collNs));
return dynamic_cast<DurableCatalogImpl*>(catalog)->_removeEntry(opCtx,
collection->getCatalogId());
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 625d2b0991c..efe60ad6653 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -333,13 +333,21 @@ void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* opCt
log() << "The size storer reports that the oplog contains " << numRecords
<< " records totaling to " << dataSize << " bytes";
+ // Don't calculate stones if this is a new collection. This is to prevent standalones from
+ // attempting to get a forward scanning oplog cursor on an explicit create of the oplog
+ // collection. These values can be wrong. The assumption is that if they are both observed to be
+ // zero, there must be very little data in the oplog; the cost of being wrong is imperceptible.
+ if (numRecords == 0 && dataSize == 0) {
+ return;
+ }
+
// Only use sampling to estimate where to place the oplog stones if the number of samples drawn
// is less than 5% of the collection.
const uint64_t kMinSampleRatioForRandCursor = 20;
// If the oplog doesn't contain enough records to make sampling more efficient, then scan the
// oplog to determine where to put down stones.
- if (numRecords <= 0 || dataSize <= 0 ||
+ if (numRecords < 0 || dataSize < 0 ||
uint64_t(numRecords) <
kMinSampleRatioForRandCursor * kRandomSamplesPerStone * numStonesToKeep) {
_calculateStonesByScanning(opCtx);
diff --git a/src/mongo/db/storage/write_unit_of_work.cpp b/src/mongo/db/storage/write_unit_of_work.cpp
index f09cd50cd24..bcc0ee742f4 100644
--- a/src/mongo/db/storage/write_unit_of_work.cpp
+++ b/src/mongo/db/storage/write_unit_of_work.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/storage/write_unit_of_work.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
#include "mongo/db/operation_context.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/time_support.h"
@@ -105,6 +106,7 @@ void WriteUnitOfWork::commit() {
sleepFor(Milliseconds(100));
}
+ _opCtx->recoveryUnit()->runPreCommitHooks(_opCtx);
_opCtx->recoveryUnit()->commitUnitOfWork();
_opCtx->_ruState = RecoveryUnitState::kNotInUnitOfWork;
}
diff --git a/src/mongo/db/storage/write_unit_of_work.h b/src/mongo/db/storage/write_unit_of_work.h
index 297d1f65f6e..3d8130c290e 100644
--- a/src/mongo/db/storage/write_unit_of_work.h
+++ b/src/mongo/db/storage/write_unit_of_work.h
@@ -31,7 +31,6 @@
#include <memory>
-
namespace mongo {
class OperationContext;
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index 66e07743167..dfafa0e46a1 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -139,7 +139,7 @@ Status verifySystemIndexes(OperationContext* opCtx) {
}
Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(systemUsers);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, systemUsers);
if (collection) {
IndexCatalog* indexCatalog = collection->getIndexCatalog();
invariant(indexCatalog);
@@ -169,7 +169,7 @@ Status verifySystemIndexes(OperationContext* opCtx) {
}
// Ensure that system indexes exist for the roles collection, if it exists.
- collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(systemRoles);
+ collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, systemRoles);
if (collection) {
IndexCatalog* indexCatalog = collection->getIndexCatalog();
invariant(indexCatalog);
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index 73c348c8d30..e2ee6cf41c2 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -347,7 +347,8 @@ TransactionParticipant::getOldestActiveTimestamp(Timestamp stableTimestamp) {
return boost::none;
}
- auto collection = CollectionCatalog::get(opCtx.get()).lookupCollectionByNamespace(nss);
+ auto collection =
+ CollectionCatalog::get(opCtx.get()).lookupCollectionByNamespace(opCtx.get(), nss);
if (!collection) {
return boost::none;
}
@@ -1081,7 +1082,7 @@ Timestamp TransactionParticipant::Participant::prepareTransaction(
transactionOperationUuids.insert(transactionOp.getUuid().get());
}
for (const auto& uuid : transactionOperationUuids) {
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(uuid);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
uassert(ErrorCodes::OperationNotSupportedInTransaction,
str::stream() << "prepareTransaction failed because one of the transaction "
"operations was done against a temporary collection '"
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index 46a0b8f700e..90de929be2d 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -3787,7 +3787,7 @@ TEST_F(TxnParticipantTest, OldestActiveTransactionTimestamp) {
AutoGetOrCreateDb autoDb(opCtx(), nss.db(), MODE_X);
WriteUnitOfWork wuow(opCtx());
- auto coll = CollectionCatalog::get(opCtx()).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx()).lookupCollectionByNamespace(opCtx(), nss);
ASSERT(coll);
OpDebug* const nullOpDebug = nullptr;
ASSERT_OK(
@@ -3799,7 +3799,7 @@ TEST_F(TxnParticipantTest, OldestActiveTransactionTimestamp) {
Timestamp ts(1, i);
AutoGetOrCreateDb autoDb(opCtx(), nss.db(), MODE_X);
WriteUnitOfWork wuow(opCtx());
- auto coll = CollectionCatalog::get(opCtx()).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx()).lookupCollectionByNamespace(opCtx(), nss);
ASSERT(coll);
auto cursor = coll->getCursor(opCtx());
while (auto record = cursor->next()) {
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 0e514afb651..be0ca3fd9c2 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -144,7 +144,7 @@ private:
auto uuid = ttlInfo.first;
auto indexName = ttlInfo.second;
- auto nss = CollectionCatalog::get(opCtxPtr.get()).lookupNSSByUUID(uuid);
+ auto nss = CollectionCatalog::get(opCtxPtr.get()).lookupNSSByUUID(&opCtx, uuid);
if (!nss) {
ttlCollectionCache.deregisterTTLInfo(ttlInfo);
continue;
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index 86afedc12a7..5d3e410cd41 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -112,7 +112,7 @@ void DurableViewCatalogImpl::_iterate(OperationContext* opCtx,
invariant(opCtx->lockState()->isCollectionLockedForMode(_db->getSystemViewsName(), MODE_IS));
Collection* systemViews =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(_db->getSystemViewsName());
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _db->getSystemViewsName());
if (!systemViews) {
return;
}
@@ -187,7 +187,7 @@ void DurableViewCatalogImpl::upsert(OperationContext* opCtx,
dassert(opCtx->lockState()->isCollectionLockedForMode(systemViewsNs, MODE_X));
Collection* systemViews =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(systemViewsNs);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, systemViewsNs);
invariant(systemViews);
const bool requireIndex = false;
@@ -215,7 +215,7 @@ void DurableViewCatalogImpl::remove(OperationContext* opCtx, const NamespaceStri
dassert(opCtx->lockState()->isCollectionLockedForMode(name, MODE_IX));
Collection* systemViews =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespace(_db->getSystemViewsName());
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, _db->getSystemViewsName());
dassert(opCtx->lockState()->isCollectionLockedForMode(systemViews->ns(), MODE_X));
if (!systemViews)
diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript
index ab5707a053e..0e440b47168 100644
--- a/src/mongo/dbtests/SConscript
+++ b/src/mongo/dbtests/SConscript
@@ -63,6 +63,7 @@ if not has_option('noshell') and usemozjs:
target="dbtest",
source=[
'basictests.cpp',
+ 'catalogtests.cpp',
'clienttests.cpp',
'commandtests.cpp',
'counttests.cpp',
diff --git a/src/mongo/dbtests/catalogtests.cpp b/src/mongo/dbtests/catalogtests.cpp
new file mode 100644
index 00000000000..56db54cc9f8
--- /dev/null
+++ b/src/mongo/dbtests/catalogtests.cpp
@@ -0,0 +1,129 @@
+/**
+ * Copyright (C) 2019-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
+#include "mongo/db/catalog_raii.h"
+#include "mongo/db/client.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/storage/durable_catalog.h"
+#include "mongo/dbtests/dbtests.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+
+using unittest::log;
+
+bool collectionExists(OperationContext* opCtx, NamespaceString nss) {
+ return CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss) != nullptr;
+}
+
+class ConcurrentCreateCollectionTest {
+public:
+ void run() {
+ auto serviceContext = getGlobalServiceContext();
+ if (!serviceContext->getStorageEngine()->supportsDocLocking()) {
+ return;
+ }
+
+ // TODO SERVER-44138: The commented out lines can be reinstated when committing new
+ // collections to the catalog are transactional (all succeed or fail).
+
+ // NamespaceString op1UniqueNss("test.uniqueCollection");
+ NamespaceString competingNss("test.competingCollection");
+
+ auto client1 = serviceContext->makeClient("client1");
+ auto client2 = serviceContext->makeClient("client2");
+
+ auto op1 = client1->makeOperationContext();
+ auto op2 = client2->makeOperationContext();
+
+
+ Lock::DBLock dbLk1(op1.get(), competingNss.db(), LockMode::MODE_IX);
+ Lock::CollectionLock collLk1(op1.get(), competingNss, LockMode::MODE_IX);
+ // Lock::CollectionLock uniqueNssLk(op1.get(), op1UniqueNss, LockMode::MODE_IX);
+ Lock::DBLock dbLk2(op2.get(), competingNss.db(), LockMode::MODE_IX);
+ Lock::CollectionLock collLk2(op2.get(), competingNss, LockMode::MODE_IX);
+
+ Database* db =
+ DatabaseHolder::get(op1.get())->openDb(op1.get(), competingNss.db(), nullptr);
+
+ {
+ WriteUnitOfWork wuow1(op1.get());
+ // ASSERT_TRUE(db->createCollection(op1.get(), op1UniqueNss) != nullptr);
+ ASSERT_TRUE(db->createCollection(op1.get(), competingNss) != nullptr);
+ ASSERT_TRUE(collectionExists(op1.get(), competingNss));
+ ASSERT_FALSE(collectionExists(op2.get(), competingNss));
+ {
+ WriteUnitOfWork wuow2(op2.get());
+ ASSERT_FALSE(collectionExists(op2.get(), competingNss));
+ ASSERT_TRUE(db->createCollection(op2.get(), competingNss) != nullptr);
+
+ ASSERT_TRUE(collectionExists(op1.get(), competingNss));
+ ASSERT_TRUE(collectionExists(op2.get(), competingNss));
+ ASSERT_NOT_EQUALS(UncommittedCollections::getForTxn(op1.get(), competingNss),
+ UncommittedCollections::getForTxn(op2.get(), competingNss));
+ wuow2.commit();
+ }
+ ASSERT_THROWS(wuow1.commit(), WriteConflictException);
+ }
+
+ ASSERT_TRUE(collectionExists(op1.get(), competingNss));
+ ASSERT_TRUE(collectionExists(op2.get(), competingNss));
+ // ASSERT_FALSE(collectionExists(op1.get(), op1UniqueNss));
+ // ASSERT_FALSE(collectionExists(op2.get(), op1UniqueNss));
+ }
+};
+
+} // namespace
+
+class AllCatalogTests : public unittest::OldStyleSuiteSpecification {
+public:
+ AllCatalogTests() : unittest::OldStyleSuiteSpecification("CatalogTests") {}
+
+ template <typename T>
+ void add() {
+ addNameCallback(nameForTestClass<T>(), [] { T().run(); });
+ }
+
+ void setupTests() {
+ add<ConcurrentCreateCollectionTest>();
+ }
+};
+
+unittest::OldStyleSuiteInitializer<AllCatalogTests> allCatalogTests;
+
+} // namespace mongo
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 9329f88245f..0506f843572 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -51,7 +51,8 @@ public:
{
WriteUnitOfWork wunit(&_opCtx);
- _collection = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ _collection =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (_collection) {
_database->dropCollection(&_opCtx, nss()).transitional_ignore();
}
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
index e094c2ed223..56612d5c9d5 100644
--- a/src/mongo/dbtests/dbtests.cpp
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -99,7 +99,8 @@ Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj
Collection* coll;
{
WriteUnitOfWork wunit(opCtx);
- coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(NamespaceString(ns));
+ coll =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, NamespaceString(ns));
if (!coll) {
coll = autoDb.getDb()->createCollection(opCtx, NamespaceString(ns));
}
diff --git a/src/mongo/dbtests/dbtests.h b/src/mongo/dbtests/dbtests.h
index b40c46b8fb1..617a4f64520 100644
--- a/src/mongo/dbtests/dbtests.h
+++ b/src/mongo/dbtests/dbtests.h
@@ -80,7 +80,7 @@ public:
}
Collection* getCollection() const {
- return CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(_nss);
+ return CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(_opCtx, _nss);
}
private:
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 19b1a22a8c5..21e45f0adbd 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -34,6 +34,8 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/multi_index_block.h"
+#include "mongo/db/catalog/uncommitted_collections.h"
+#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
@@ -58,33 +60,46 @@ static const NamespaceString _nss = NamespaceString(_ns);
*/
class IndexBuildBase {
public:
- IndexBuildBase() : _ctx(&_opCtx, _ns), _client(&_opCtx) {
- _client.createCollection(_ns);
+ IndexBuildBase() {
+ regenOpCtx();
+
+ AutoGetOrCreateDb db(_opCtx, _nss.db(), LockMode::MODE_IX);
+ WriteUnitOfWork wuow(_opCtx);
+ Lock::CollectionLock lk(_opCtx, _nss, LockMode::MODE_IX);
+ db.getDb()->createCollection(_opCtx, _nss);
+ wuow.commit();
}
+
~IndexBuildBase() {
- _client.dropCollection(_ns);
getGlobalServiceContext()->unsetKillAllOperations();
+
+ AutoGetOrCreateDb db(_opCtx, _nss.db(), LockMode::MODE_IX);
+ WriteUnitOfWork wuow(_opCtx);
+ Lock::CollectionLock lk(_opCtx, _nss, LockMode::MODE_X);
+ ASSERT_OK(db.getDb()->dropCollection(_opCtx, _nss, {}));
+ wuow.commit();
}
+
Collection* collection() {
- return _ctx.getCollection();
+ return CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(_opCtx, _nss);
}
protected:
- Status createIndex(const std::string& dbname, const BSONObj& indexSpec);
+ Status createIndex(const BSONObj& indexSpec);
bool buildIndexInterrupted(const BSONObj& key) {
try {
MultiIndexBlock indexer;
ON_BLOCK_EXIT([&] {
- indexer.cleanUpAfterBuild(&_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn);
+ indexer.cleanUpAfterBuild(_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn);
});
uassertStatusOK(
- indexer.init(&_opCtx, collection(), key, MultiIndexBlock::kNoopOnInitFn));
- uassertStatusOK(indexer.insertAllDocumentsInCollection(&_opCtx, collection()));
- WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(indexer.commit(&_opCtx,
+ indexer.init(_opCtx, collection(), key, MultiIndexBlock::kNoopOnInitFn));
+ uassertStatusOK(indexer.insertAllDocumentsInCollection(_opCtx, collection()));
+ WriteUnitOfWork wunit(_opCtx);
+ ASSERT_OK(indexer.commit(_opCtx,
collection(),
MultiIndexBlock::kNoopOnCreateEachFn,
MultiIndexBlock::kNoopOnCommitFn));
@@ -98,10 +113,14 @@ protected:
return false;
}
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
- dbtests::WriteContextForTests _ctx;
- DBDirectClient _client;
+ void regenOpCtx() {
+ _txnPtr = nullptr;
+ _txnPtr = cc().makeOperationContext();
+ _opCtx = _txnPtr.get();
+ }
+
+ ServiceContext::UniqueOperationContext _txnPtr; // = cc().makeOperationContext();
+ OperationContext* _opCtx; // = _txnPtr.get();
};
/** Index creation ignores unique constraints when told to. */
@@ -109,21 +128,18 @@ template <bool background>
class InsertBuildIgnoreUnique : public IndexBuildBase {
public:
void run() {
- // Create a new collection.
- Database* db = _ctx.db();
- Collection* coll;
+ AutoGetOrCreateDb dbRaii(_opCtx, _nss.db(), LockMode::MODE_IX);
+ Lock::CollectionLock collLk(_opCtx, _nss, LockMode::MODE_X);
+ Collection* coll = collection();
{
- WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _nss));
- coll = db->createCollection(&_opCtx, _nss);
-
+ WriteUnitOfWork wunit(_opCtx);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(coll->insertDocument(&_opCtx,
+ ASSERT_OK(coll->insertDocument(_opCtx,
InsertStatement(BSON("_id" << 1 << "a"
<< "dup")),
nullOpDebug,
true));
- ASSERT_OK(coll->insertDocument(&_opCtx,
+ ASSERT_OK(coll->insertDocument(_opCtx,
InsertStatement(BSON("_id" << 2 << "a"
<< "dup")),
nullOpDebug,
@@ -141,15 +157,15 @@ public:
<< "background" << background);
ON_BLOCK_EXIT(
- [&] { indexer.cleanUpAfterBuild(&_opCtx, coll, MultiIndexBlock::kNoopOnCleanUpFn); });
+ [&] { indexer.cleanUpAfterBuild(_opCtx, coll, MultiIndexBlock::kNoopOnCleanUpFn); });
- ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus());
- ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll));
- ASSERT_OK(indexer.checkConstraints(&_opCtx));
+ ASSERT_OK(indexer.init(_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus());
+ ASSERT_OK(indexer.insertAllDocumentsInCollection(_opCtx, coll));
+ ASSERT_OK(indexer.checkConstraints(_opCtx));
- WriteUnitOfWork wunit(&_opCtx);
+ WriteUnitOfWork wunit(_opCtx);
ASSERT_OK(indexer.commit(
- &_opCtx, coll, MultiIndexBlock::kNoopOnCreateEachFn, MultiIndexBlock::kNoopOnCommitFn));
+ _opCtx, coll, MultiIndexBlock::kNoopOnCreateEachFn, MultiIndexBlock::kNoopOnCommitFn));
wunit.commit();
}
};
@@ -160,20 +176,19 @@ class InsertBuildEnforceUnique : public IndexBuildBase {
public:
void run() {
// Create a new collection.
- Database* db = _ctx.db();
- Collection* coll;
+ AutoGetOrCreateDb dbRaii(_opCtx, _nss.db(), LockMode::MODE_IX);
+ boost::optional<Lock::CollectionLock> collLk;
+ collLk.emplace(_opCtx, _nss, LockMode::MODE_IX);
+ Collection* coll = collection();
{
- WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _nss));
- coll = db->createCollection(&_opCtx, _nss);
-
+ WriteUnitOfWork wunit(_opCtx);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(coll->insertDocument(&_opCtx,
+ ASSERT_OK(coll->insertDocument(_opCtx,
InsertStatement(BSON("_id" << 1 << "a"
<< "dup")),
nullOpDebug,
true));
- ASSERT_OK(coll->insertDocument(&_opCtx,
+ ASSERT_OK(coll->insertDocument(_opCtx,
InsertStatement(BSON("_id" << 2 << "a"
<< "dup")),
nullOpDebug,
@@ -189,19 +204,20 @@ public:
<< static_cast<int>(kIndexVersion) << "unique" << true
<< "background" << background);
+ collLk.emplace(_opCtx, _nss, LockMode::MODE_X);
ON_BLOCK_EXIT(
- [&] { indexer.cleanUpAfterBuild(&_opCtx, coll, MultiIndexBlock::kNoopOnCleanUpFn); });
+ [&] { indexer.cleanUpAfterBuild(_opCtx, coll, MultiIndexBlock::kNoopOnCleanUpFn); });
- ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus());
+ ASSERT_OK(indexer.init(_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus());
auto desc =
- coll->getIndexCatalog()->findIndexByName(&_opCtx, "a", true /* includeUnfinished */);
+ coll->getIndexCatalog()->findIndexByName(_opCtx, "a", true /* includeUnfinished */);
ASSERT(desc);
// Hybrid index builds check duplicates explicitly.
- ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll));
+ ASSERT_OK(indexer.insertAllDocumentsInCollection(_opCtx, coll));
- auto status = indexer.checkConstraints(&_opCtx);
+ auto status = indexer.checkConstraints(_opCtx);
ASSERT_EQUALS(status.code(), ErrorCodes::DuplicateKey);
}
};
@@ -210,35 +226,42 @@ public:
class InsertBuildIndexInterrupt : public IndexBuildBase {
public:
void run() {
- // Create a new collection.
- Database* db = _ctx.db();
- Collection* coll;
{
- WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _nss));
- coll = db->createCollection(&_opCtx, _nss);
- // Drop all indexes including id index.
- coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
- // Insert some documents.
- int32_t nDocs = 1000;
- OpDebug* const nullOpDebug = nullptr;
- for (int32_t i = 0; i < nDocs; ++i) {
- ASSERT_OK(
- coll->insertDocument(&_opCtx, InsertStatement(BSON("a" << i)), nullOpDebug));
+ AutoGetOrCreateDb dbRaii(_opCtx, _nss.db(), LockMode::MODE_IX);
+ boost::optional<Lock::CollectionLock> collLk;
+ collLk.emplace(_opCtx, _nss, LockMode::MODE_X);
+
+ Collection* coll = collection();
+ {
+ WriteUnitOfWork wunit(_opCtx);
+ // Drop all indexes including id index.
+ coll->getIndexCatalog()->dropAllIndexes(_opCtx, true);
+ // Insert some documents.
+ int32_t nDocs = 1000;
+ OpDebug* const nullOpDebug = nullptr;
+ for (int32_t i = 0; i < nDocs; ++i) {
+ ASSERT_OK(
+ coll->insertDocument(_opCtx, InsertStatement(BSON("a" << i)), nullOpDebug));
+ }
+ wunit.commit();
}
- wunit.commit();
+ // Request an interrupt.
+ getGlobalServiceContext()->setKillAllOperations();
+ BSONObj indexInfo = BSON("key" << BSON("a" << 1) << "name"
+ << "a_1"
+ << "v" << static_cast<int>(kIndexVersion));
+ // The call is interrupted because mayInterrupt == true.
+ ASSERT_TRUE(buildIndexInterrupted(indexInfo));
+ // only want to interrupt the index build
+ getGlobalServiceContext()->unsetKillAllOperations();
}
- // Request an interrupt.
- getGlobalServiceContext()->setKillAllOperations();
- BSONObj indexInfo = BSON("key" << BSON("a" << 1) << "name"
- << "a_1"
- << "v" << static_cast<int>(kIndexVersion));
- // The call is interrupted because mayInterrupt == true.
- ASSERT_TRUE(buildIndexInterrupted(indexInfo));
- // only want to interrupt the index build
- getGlobalServiceContext()->unsetKillAllOperations();
+
+ regenOpCtx();
+ AutoGetDb dbRaii(_opCtx, _nss.db(), LockMode::MODE_IX);
+ boost::optional<Lock::CollectionLock> collLk;
+ collLk.emplace(_opCtx, _nss, LockMode::MODE_IX);
// The new index is not listed in the index catalog because the index build failed.
- ASSERT(!coll->getIndexCatalog()->findIndexByName(&_opCtx, "a_1"));
+ ASSERT(!collection()->getIndexCatalog()->findIndexByName(_opCtx, "a_1"));
}
};
@@ -251,62 +274,73 @@ public:
return;
}
- // Recreate the collection as capped, without an _id index.
- Database* db = _ctx.db();
- Collection* coll;
{
- WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(db->dropCollection(&_opCtx, _nss));
+ // Recreate the collection as capped, without an _id index.
+ AutoGetOrCreateDb dbRaii(_opCtx, _nss.db(), LockMode::MODE_IX);
+ Database* db = dbRaii.getDb();
+ boost::optional<Lock::CollectionLock> collLk;
+ collLk.emplace(_opCtx, _nss, LockMode::MODE_X);
+
+ WriteUnitOfWork wunit(_opCtx);
+ ASSERT_OK(db->dropCollection(_opCtx, _nss));
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
- coll = db->createCollection(&_opCtx, _nss, options);
- coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
+ Collection* coll = db->createCollection(_opCtx, _nss, options);
+ coll->getIndexCatalog()->dropAllIndexes(_opCtx, true);
// Insert some documents.
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
ASSERT_OK(coll->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug, true));
+ _opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug, true));
}
wunit.commit();
+ // Request an interrupt.
+ getGlobalServiceContext()->setKillAllOperations();
+ BSONObj indexInfo = BSON("key" << BSON("_id" << 1) << "name"
+ << "_id_"
+ << "v" << static_cast<int>(kIndexVersion));
+ ASSERT_TRUE(buildIndexInterrupted(indexInfo));
+ // only want to interrupt the index build
+ getGlobalServiceContext()->unsetKillAllOperations();
}
- // Request an interrupt.
- getGlobalServiceContext()->setKillAllOperations();
- BSONObj indexInfo = BSON("key" << BSON("_id" << 1) << "name"
- << "_id_"
- << "v" << static_cast<int>(kIndexVersion));
- ASSERT_TRUE(buildIndexInterrupted(indexInfo));
- // only want to interrupt the index build
- getGlobalServiceContext()->unsetKillAllOperations();
+ regenOpCtx();
+ AutoGetOrCreateDb dbRaii(_opCtx, _nss.db(), LockMode::MODE_IX);
+ boost::optional<Lock::CollectionLock> collLk;
+ collLk.emplace(_opCtx, _nss, LockMode::MODE_IX);
+
// The new index is not listed in the index catalog because the index build failed.
- ASSERT(!coll->getIndexCatalog()->findIndexByName(&_opCtx, "_id_"));
+ ASSERT(!collection()->getIndexCatalog()->findIndexByName(_opCtx, "_id_"));
}
-};
+}; // namespace IndexUpdateTests
+
+Status IndexBuildBase::createIndex(const BSONObj& indexSpec) {
+ Lock::DBLock dbLk(_opCtx, _nss.db(), MODE_IX);
+ Lock::CollectionLock collLk(_opCtx, _nss, MODE_X);
-Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& indexSpec) {
MultiIndexBlock indexer;
ON_BLOCK_EXIT([&] {
- indexer.cleanUpAfterBuild(&_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn);
+ indexer.cleanUpAfterBuild(_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn);
});
Status status =
- indexer.init(&_opCtx, collection(), indexSpec, MultiIndexBlock::kNoopOnInitFn).getStatus();
+ indexer.init(_opCtx, collection(), indexSpec, MultiIndexBlock::kNoopOnInitFn).getStatus();
if (status == ErrorCodes::IndexAlreadyExists) {
return Status::OK();
}
if (!status.isOK()) {
return status;
}
- status = indexer.insertAllDocumentsInCollection(&_opCtx, collection());
+ status = indexer.insertAllDocumentsInCollection(_opCtx, collection());
if (!status.isOK()) {
return status;
}
- status = indexer.checkConstraints(&_opCtx);
+ status = indexer.checkConstraints(_opCtx);
if (!status.isOK()) {
return status;
}
- WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(indexer.commit(&_opCtx,
+ WriteUnitOfWork wunit(_opCtx);
+ ASSERT_OK(indexer.commit(_opCtx,
collection(),
MultiIndexBlock::kNoopOnCreateEachFn,
MultiIndexBlock::kNoopOnCommitFn));
@@ -320,8 +354,7 @@ Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& ind
class SimpleCompoundIndex : public IndexBuildBase {
public:
SimpleCompoundIndex() {
- ASSERT_OK(createIndex("unittest",
- BSON("name"
+ ASSERT_OK(createIndex(BSON("name"
<< "x"
<< "key" << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
@@ -333,8 +366,7 @@ public:
void run() {
// Cannot have same key spec with an option different from the existing one.
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
- createIndex("unittest",
- BSON("name"
+ createIndex(BSON("name"
<< "x"
<< "unique" << true << "key" << BSON("x" << 1 << "y" << 1)
<< "v" << static_cast<int>(kIndexVersion))));
@@ -344,8 +376,7 @@ public:
class SameSpecSameOptions : public SimpleCompoundIndex {
public:
void run() {
- ASSERT_OK(createIndex("unittest",
- BSON("name"
+ ASSERT_OK(createIndex(BSON("name"
<< "x"
<< "key" << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
@@ -357,8 +388,7 @@ public:
void run() {
// Cannot create a different index with the same name as the existing one.
ASSERT_EQUALS(ErrorCodes::IndexKeySpecsConflict,
- createIndex("unittest",
- BSON("name"
+ createIndex(BSON("name"
<< "x"
<< "key" << BSON("y" << 1 << "x" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
@@ -371,8 +401,7 @@ public:
class ComplexIndex : public IndexBuildBase {
public:
ComplexIndex() {
- ASSERT_OK(createIndex("unittests",
- BSON("name"
+ ASSERT_OK(createIndex(BSON("name"
<< "super"
<< "unique" << 1 << "sparse" << true << "expireAfterSeconds"
<< 3600 << "key"
@@ -389,8 +418,7 @@ public:
// the original. This will throw an IndexOptionsConflict as the index already exists under
// another name.
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
- createIndex("unittests",
- BSON("name"
+ createIndex(BSON("name"
<< "super2"
<< "expireAfterSeconds" << 3600 << "sparse" << true
<< "unique" << 1 << "key"
@@ -405,8 +433,7 @@ public:
void run() {
// Exactly the same specs with the existing one, only specified in a different order than
// the original, but with the same name.
- ASSERT_OK(createIndex("unittests",
- BSON("name"
+ ASSERT_OK(createIndex(BSON("name"
<< "super"
<< "expireAfterSeconds" << 3600 << "sparse" << true << "unique"
<< 1 << "key"
@@ -423,8 +450,7 @@ class SameSpecDifferentUnique : public ComplexIndex {
public:
void run() {
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
- createIndex("unittest",
- BSON("name"
+ createIndex(BSON("name"
<< "super2"
<< "unique" << false << "sparse" << true
<< "expireAfterSeconds" << 3600 << "key"
@@ -438,8 +464,7 @@ class SameSpecDifferentSparse : public ComplexIndex {
public:
void run() {
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
- createIndex("unittest",
- BSON("name"
+ createIndex(BSON("name"
<< "super2"
<< "unique" << 1 << "sparse" << false << "background" << true
<< "expireAfterSeconds" << 3600 << "key"
@@ -453,8 +478,7 @@ class SameSpecDifferentTTL : public ComplexIndex {
public:
void run() {
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
- createIndex("unittest",
- BSON("name"
+ createIndex(BSON("name"
<< "super2"
<< "unique" << 1 << "sparse" << true << "expireAfterSeconds"
<< 2400 << "key"
@@ -468,13 +492,13 @@ class StorageEngineOptions : public IndexBuildBase {
public:
void run() {
// "storageEngine" field has to be an object if present.
- ASSERT_NOT_OK(createIndex("unittest", _createSpec(12345)));
+ ASSERT_NOT_OK(createIndex(_createSpec(12345)));
// 'storageEngine' must not be empty.
- ASSERT_NOT_OK(createIndex("unittest", _createSpec(BSONObj())));
+ ASSERT_NOT_OK(createIndex(_createSpec(BSONObj())));
// Every field under "storageEngine" must match a registered storage engine.
- ASSERT_NOT_OK(createIndex("unittest", _createSpec(BSON("unknownEngine" << BSONObj()))));
+ ASSERT_NOT_OK(createIndex(_createSpec(BSON("unknownEngine" << BSONObj()))));
// Testing with 'wiredTiger' because the registered storage engine factory
// supports custom index options under 'storageEngine'.
@@ -483,20 +507,19 @@ public:
// Run 'wiredTiger' tests if the storage engine is supported.
if (isRegisteredStorageEngine(getGlobalServiceContext(), storageEngineName)) {
// Every field under "storageEngine" has to be an object.
- ASSERT_NOT_OK(createIndex("unittest", _createSpec(BSON(storageEngineName << 1))));
+ ASSERT_NOT_OK(createIndex(_createSpec(BSON(storageEngineName << 1))));
// Storage engine options must pass validation by the storage engine factory.
// For 'wiredTiger', embedded document must contain 'configString'.
- ASSERT_NOT_OK(createIndex(
- "unittest", _createSpec(BSON(storageEngineName << BSON("unknown" << 1)))));
+ ASSERT_NOT_OK(
+ createIndex(_createSpec(BSON(storageEngineName << BSON("unknown" << 1)))));
// Configuration string for 'wiredTiger' must be a string.
- ASSERT_NOT_OK(createIndex(
- "unittest", _createSpec(BSON(storageEngineName << BSON("configString" << 1)))));
+ ASSERT_NOT_OK(
+ createIndex(_createSpec(BSON(storageEngineName << BSON("configString" << 1)))));
// Valid 'wiredTiger' configuration.
ASSERT_OK(createIndex(
- "unittest",
_createSpec(BSON(storageEngineName << BSON("configString"
<< "block_compressor=zlib")))));
}
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 0ed15a37d23..ac55a11f63b 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -60,7 +60,7 @@ protected:
return NamespaceString("unittests.pdfiletests.Insert");
}
Collection* collection() {
- return CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ return CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
}
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
@@ -75,7 +75,8 @@ public:
WriteUnitOfWork wunit(&_opCtx);
BSONObj x = BSON("x" << 1);
ASSERT(x["_id"].type() == 0);
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
coll = _context.db()->createCollection(&_opCtx, nss());
}
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index 8cb1e1401d9..79577d0b7df 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -85,12 +85,12 @@ public:
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Takes ownership of 'ws', 'scan', and 'cq'.
- auto statusWithPlanExecutor =
- PlanExecutor::make(std::move(cq),
- std::move(ws),
- std::move(scan),
- CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss),
- PlanExecutor::YIELD_MANUAL);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ std::move(cq),
+ std::move(ws),
+ std::move(scan),
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss),
+ PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
return std::move(statusWithPlanExecutor.getValue());
@@ -117,7 +117,7 @@ public:
}
Collection* collection() {
- return CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss);
+ return CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
}
void truncateCollection(Collection* collection) const {
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 1726b5c994f..92e99e1d3ae 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -148,7 +148,8 @@ public:
ixparams.bounds.endKey = BSON("" << end);
ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
- const Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss);
+ const Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
unique_ptr<WorkingSet> ws(new WorkingSet());
@@ -175,7 +176,8 @@ protected:
private:
const IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
- Collection* collection = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss);
+ Collection* collection =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
std::vector<const IndexDescriptor*> indexes;
collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
ASSERT_LTE(indexes.size(), 1U);
diff --git a/src/mongo/dbtests/query_stage_count_scan.cpp b/src/mongo/dbtests/query_stage_count_scan.cpp
index 53390fca861..913baf62ba9 100644
--- a/src/mongo/dbtests/query_stage_count_scan.cpp
+++ b/src/mongo/dbtests/query_stage_count_scan.cpp
@@ -90,8 +90,8 @@ public:
}
const IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
- Collection* collection =
- CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(NamespaceString(ns()));
+ Collection* collection = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(
+ &_opCtx, NamespaceString(ns()));
std::vector<const IndexDescriptor*> indexes;
collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
return indexes.empty() ? nullptr : indexes[0];
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index fbc35ccbc47..13781d15b3a 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -99,7 +99,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -165,7 +166,8 @@ public:
Lock::DBLock lk(&_opCtx, nss().db(), MODE_X);
OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 3e296b1e07b..b0df7ed51fc 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -145,7 +145,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -210,7 +211,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -274,7 +276,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -339,7 +342,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -408,7 +412,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -471,7 +476,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -523,7 +529,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -640,7 +647,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -742,7 +750,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -808,7 +817,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 69f8582b54c..9ae8cd972eb 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -249,7 +249,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -271,7 +272,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -302,7 +304,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -327,7 +330,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -436,7 +440,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -534,7 +539,8 @@ public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wuow(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 7b8a360a113..2f501af6690 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -273,7 +273,8 @@ public:
OpDebug* opDebug = &curOp.debug();
const CollatorInterface* collator = nullptr;
UpdateDriver driver(new ExpressionContext(&_opCtx, collator));
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss);
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
ASSERT(coll);
// Get the RecordIds that would be returned by an in-order scan.
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 136573a521d..c7936436f49 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -68,7 +68,8 @@ public:
{
WriteUnitOfWork wunit(&_opCtx);
_database = _context.db();
- _collection = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ _collection =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (_collection) {
_database->dropCollection(&_opCtx, nss()).transitional_ignore();
}
@@ -224,7 +225,7 @@ public:
{
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
- if (CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss())) {
+ if (CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss())) {
_collection = nullptr;
db->dropCollection(&_opCtx, nss()).transitional_ignore();
}
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 7de0b3ccb7a..9b38d9d863d 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -141,7 +141,7 @@ public:
dbtests::WriteContextForTests ctx(&_opCtx, ns());
WriteUnitOfWork wuow(&_opCtx);
- Collection* c = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* c = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!c) {
c = ctx.db()->createCollection(&_opCtx, nss());
}
@@ -211,7 +211,8 @@ protected:
Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
WriteUnitOfWork wunit(&_opCtx);
coll = db->createCollection(&_opCtx, nss());
@@ -268,7 +269,8 @@ protected:
OldClientContext ctx(&_opCtx, ns);
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss);
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
if (!coll) {
coll = db->createCollection(&_opCtx, nss);
}
@@ -282,7 +284,8 @@ protected:
OldClientContext ctx(&_opCtx, ns());
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
- Collection* coll = CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(nss());
+ Collection* coll =
+ CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss());
if (!coll) {
coll = db->createCollection(&_opCtx, nss());
}
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 0276ef65381..72affc48683 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -62,10 +62,7 @@ void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) {
}
}
bool collectionExists(OperationContext* opCtx, OldClientContext* ctx, const string& ns) {
- auto nss = NamespaceString(ns);
- std::vector<NamespaceString> collections = CollectionCatalog::get(getGlobalServiceContext())
- .getAllCollectionNamesFromDb(opCtx, nss.db());
- return std::count(collections.begin(), collections.end(), nss) > 0;
+ return CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, NamespaceString(ns));
}
void createCollection(OperationContext* opCtx, const NamespaceString& nss) {
@@ -87,17 +84,17 @@ Status renameCollection(OperationContext* opCtx,
return renameCollection(opCtx, source, target, {});
}
Status truncateCollection(OperationContext* opCtx, const NamespaceString& nss) {
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
return coll->truncate(opCtx);
}
void insertRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) {
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
OpDebug* const nullOpDebug = nullptr;
ASSERT_OK(coll->insertDocument(opCtx, InsertStatement(data), nullOpDebug, false));
}
void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) {
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
auto cursor = coll->getCursor(opCtx);
auto record = cursor->next();
@@ -107,15 +104,15 @@ void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const
ASSERT(!cursor->next());
}
void assertEmpty(OperationContext* opCtx, const NamespaceString& nss) {
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
ASSERT(!coll->getCursor(opCtx)->next());
}
bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, true) != nullptr;
}
bool indexReady(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, false) != nullptr;
}
size_t getNumIndexEntries(OperationContext* opCtx,
@@ -123,7 +120,7 @@ size_t getNumIndexEntries(OperationContext* opCtx,
const string& idxName) {
size_t numEntries = 0;
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
IndexCatalog* catalog = coll->getIndexCatalog();
auto desc = catalog->findIndexByName(opCtx, idxName, false);
@@ -145,7 +142,7 @@ size_t getNumIndexEntries(OperationContext* opCtx,
}
void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(nss);
+ auto coll = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
auto desc = coll->getIndexCatalog()->findIndexByName(opCtx, idxName);
ASSERT(desc);
ASSERT_OK(coll->getIndexCatalog()->dropIndex(opCtx, desc));
@@ -429,49 +426,6 @@ public:
};
template <bool rollback, bool defaultIndexes>
-class CreateDropCollection {
-public:
- void run() {
- NamespaceString nss("unittests.rollback_create_drop_collection");
- const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
- OperationContext& opCtx = *opCtxPtr;
- dropDatabase(&opCtx, nss);
-
- Lock::DBLock dbXLock(&opCtx, nss.db(), MODE_X);
- OldClientContext ctx(&opCtx, nss.ns());
-
- BSONObj doc = BSON("_id"
- << "example string");
-
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
- {
- WriteUnitOfWork uow(&opCtx);
-
- CollectionOptions collectionOptions =
- assertGet(CollectionOptions::parse(BSONObj(), CollectionOptions::parseForCommand));
- ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes));
- ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
- insertRecord(&opCtx, nss, doc);
- assertOnlyRecord(&opCtx, nss, doc);
-
- BSONObjBuilder result;
- ASSERT_OK(
- dropCollection(&opCtx,
- nss,
- result,
- {},
- DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
-
- if (!rollback) {
- uow.commit();
- }
- }
- ASSERT(!collectionExists(&opCtx, &ctx, nss.ns()));
- }
-};
-
-template <bool rollback, bool defaultIndexes>
class TruncateCollection {
public:
void run() {
@@ -535,7 +489,7 @@ public:
AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
- Collection* coll = CollectionCatalog::get(&opCtx).lookupCollectionByNamespace(nss);
+ Collection* coll = CollectionCatalog::get(&opCtx).lookupCollectionByNamespace(&opCtx, nss);
IndexCatalog* catalog = coll->getIndexCatalog();
string idxName = "a";
@@ -576,7 +530,7 @@ public:
AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
- Collection* coll = CollectionCatalog::get(&opCtx).lookupCollectionByNamespace(nss);
+ Collection* coll = CollectionCatalog::get(&opCtx).lookupCollectionByNamespace(&opCtx, nss);
IndexCatalog* catalog = coll->getIndexCatalog();
string idxName = "a";
@@ -629,7 +583,7 @@ public:
AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
- Collection* coll = CollectionCatalog::get(&opCtx).lookupCollectionByNamespace(nss);
+ Collection* coll = CollectionCatalog::get(&opCtx).lookupCollectionByNamespace(&opCtx, nss);
IndexCatalog* catalog = coll->getIndexCatalog();
string idxName = "a";
@@ -692,7 +646,8 @@ public:
assertGet(CollectionOptions::parse(BSONObj(), CollectionOptions::parseForCommand));
ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, false));
ASSERT(collectionExists(&opCtx, &ctx, nss.ns()));
- Collection* coll = CollectionCatalog::get(&opCtx).lookupCollectionByNamespace(nss);
+ Collection* coll =
+ CollectionCatalog::get(&opCtx).lookupCollectionByNamespace(&opCtx, nss);
IndexCatalog* catalog = coll->getIndexCatalog();
ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, specA));
@@ -750,7 +705,6 @@ public:
addAll<DropCollection>();
addAll<RenameDropTargetCollection>();
addAll<ReplaceCollection>();
- addAll<CreateDropCollection>();
addAll<TruncateCollection>();
addAll<CreateIndex>();
addAll<DropIndex>();
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 8670b784130..b210aeb5fd6 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -2306,8 +2306,9 @@ public:
durableCatalog, origIdents, /*expectedNewIndexIdents*/ 3, indexCommitTs);
// Assert the 'a_1' and `b_1` indexes becomes ready at the last oplog entry time.
- RecordId renamedCatalogId =
- CollectionCatalog::get(_opCtx).lookupCollectionByNamespace(renamedNss)->getCatalogId();
+ RecordId renamedCatalogId = CollectionCatalog::get(_opCtx)
+ .lookupCollectionByNamespace(_opCtx, renamedNss)
+ ->getCatalogId();
ASSERT_TRUE(getIndexMetaData(
getMetaDataAtTime(durableCatalog, renamedCatalogId, indexCommitTs), "a_1")
.ready);