summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2023-03-01 20:06:19 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-03-02 00:59:02 +0000
commit7a80f01c3a56292904a87856ea60124beab5c44a (patch)
treed24fedcca6eeb0493b801047cdf41f1efa11ea00 /src
parent1d664e7af7039a8c88802642e0fd8113a9d2a3c0 (diff)
downloadmongo-7a80f01c3a56292904a87856ea60124beab5c44a.tar.gz
SERVER-55280 Remove the interface that returns Collection as shared_ptr from CollectionCatalog
Cleanup and clarify requirements for Collection instances to remain valid in CollectionCatalog header
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp12
-rw-r--r--src/mongo/db/catalog/collection_catalog.h96
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp129
-rw-r--r--src/mongo/db/catalog/collection_writer_test.cpp8
-rw-r--r--src/mongo/db/catalog/collection_yield_restore.cpp9
-rw-r--r--src/mongo/db/catalog_raii.cpp2
-rw-r--r--src/mongo/db/commands/list_collections.cpp4
-rw-r--r--src/mongo/db/commands/write_commands.cpp4
-rw-r--r--src/mongo/db/db_raii.cpp2
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp11
-rw-r--r--src/mongo/db/op_observer/op_observer_impl.cpp5
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp11
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp7
-rw-r--r--src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp5
-rw-r--r--src/mongo/db/query/plan_insert_listener.cpp5
-rw-r--r--src/mongo/db/repl/replication_info.cpp6
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp5
-rw-r--r--src/mongo/db/s/shardsvr_create_collection_command.cpp5
-rw-r--r--src/mongo/db/service_entry_point_common.cpp5
-rw-r--r--src/mongo/db/stats/storage_stats.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/oplog_truncate_markers_server_status_section.cpp6
-rw-r--r--src/mongo/db/timeseries/catalog_helper.cpp5
22 files changed, 179 insertions, 167 deletions
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index ca4ff5a2c0d..15effb9bdaf 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -883,9 +883,9 @@ const Collection* CollectionCatalog::_openCollectionAtLatestByNamespaceOrUUID(
auto latestCollection = [&]() -> std::shared_ptr<const Collection> {
if (const auto& nss = nssOrUUID.nss()) {
- return lookupCollectionByNamespaceForRead(opCtx, *nss);
+ return _getCollectionByNamespace(opCtx, *nss);
}
- return lookupCollectionByUUIDForRead(opCtx, *nssOrUUID.uuid());
+ return _getCollectionByUUID(opCtx, *nssOrUUID.uuid());
}();
// At least one of latest and pending should be a valid pointer.
@@ -944,7 +944,7 @@ const Collection* CollectionCatalog::_openCollectionAtLatestByNamespaceOrUUID(
} else {
// If pending by UUID does not contain the right namespace, a regular lookup in
// the catalog by UUID should have it.
- auto latestCollectionByUUID = lookupCollectionByUUIDForRead(opCtx, uuid);
+ auto latestCollectionByUUID = _getCollectionByUUID(opCtx, uuid);
invariant(latestCollectionByUUID && latestCollectionByUUID->ns() == nsInDurableCatalog);
openedCollections.store(latestCollectionByUUID, latestCollectionByUUID->ns(), uuid);
}
@@ -1336,8 +1336,8 @@ uint64_t CollectionCatalog::getEpoch() const {
return _epoch;
}
-std::shared_ptr<const Collection> CollectionCatalog::lookupCollectionByUUIDForRead(
- OperationContext* opCtx, const UUID& uuid) const {
+std::shared_ptr<const Collection> CollectionCatalog::_getCollectionByUUID(OperationContext* opCtx,
+ const UUID& uuid) const {
auto [found, uncommittedColl, newColl] =
UncommittedCatalogUpdates::lookupCollection(opCtx, uuid);
if (uncommittedColl) {
@@ -1445,7 +1445,7 @@ std::shared_ptr<Collection> CollectionCatalog::_lookupCollectionByUUID(UUID uuid
return foundIt == _catalog.end() ? nullptr : foundIt->second;
}
-std::shared_ptr<const Collection> CollectionCatalog::lookupCollectionByNamespaceForRead(
+std::shared_ptr<const Collection> CollectionCatalog::_getCollectionByNamespace(
OperationContext* opCtx, const NamespaceString& nss) const {
auto [found, uncommittedColl, newColl] =
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index 934fd6afe72..3b4541fdb12 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -367,66 +367,73 @@ public:
void notifyIdentDropped(const std::string& ident);
/**
- * This function gets the Collection pointer that corresponds to the UUID.
+ * Returns a Collection pointer that corresponds to the provided
+ * NamespaceString/UUID/NamespaceOrUUID.
*
- * The required locks must be obtained prior to calling this function, or else the found
- * Collection pointer might no longer be valid when the call returns.
+ * For the returned collection instance to remain valid remains valid, one of two preconditions
+ * needs to be met:
+ * 1. A collection lock of at least MODE_IS is being held.
+ * 2. A reference to this catalog instance is held or stashed AND the storage snapshot remains
+ * open.
*
- * 'lookupCollectionByUUIDForMetadataWrite' requires a MODE_X collection lock, returns a copy to
- * the caller because catalog updates are copy-on-write.
+ * Releasing the collection lock, catalog instance or storage snapshot will invalidate the
+ * returned collection instance.
*
- * 'lookupCollectionByUUID' requires a MODE_IS collection lock.
+ * A read or write AutoGetCollection style RAII object meets the requirements and ensures
+ * validity for collection instances during its lifetime.
*
- * 'lookupCollectionByUUIDForRead' does not require locks and should only be used in the context
- * of a lock-free read wherein we also have a consistent storage snapshot.
+ * It is NOT safe to cache this pointer or any pointer obtained from this instance across
+ * storage snapshots such as query yield.
*
- * Returns nullptr if the 'uuid' is not known.
+ * Returns nullptr if no collection is known.
*/
- Collection* lookupCollectionByUUIDForMetadataWrite(OperationContext* opCtx,
- const UUID& uuid) const;
const Collection* lookupCollectionByUUID(OperationContext* opCtx, UUID uuid) const;
- std::shared_ptr<const Collection> lookupCollectionByUUIDForRead(OperationContext* opCtx,
- const UUID& uuid) const;
-
- /**
- * Returns true if the collection has been registered in the CollectionCatalog but not yet made
- * visible.
- */
- bool isCollectionAwaitingVisibility(UUID uuid) const;
+ const Collection* lookupCollectionByNamespace(OperationContext* opCtx,
+ const NamespaceString& nss) const;
+ const Collection* lookupCollectionByNamespaceOrUUID(
+ OperationContext* opCtx, const NamespaceStringOrUUID& nssOrUUID) const;
/**
- * These functions fetch a Collection pointer that corresponds to the NamespaceString.
+ * Returns a non-const Collection pointer that corresponds to the provided NamespaceString/UUID
+ * for a DDL operation.
*
- * The required locks must be obtained prior to calling this function, or else the found
- * Collection pointer may no longer be valid when the call returns.
+ * A MODE_X collection lock is required to call this function, unless the namespace/UUID
+ * corresponds to an uncommitted collection creation in which case a MODE_IX lock is sufficient.
*
- * 'lookupCollectionByNamespaceForMetadataWrite' requires a MODE_X collection lock, returns a
- * copy to the caller because catalog updates are copy-on-write.
+ * A WriteUnitOfWork must be active and the instance returned will be created using
+ * copy-on-write and will be different than prior calls to lookupCollection. However, subsequent
+ * calls to lookupCollection will return the same instance as this function as long as the
+ * WriteUnitOfWork remains active.
*
- * 'lookupCollectionByNamespace' requires a MODE_IS collection lock.
+ * When the WriteUnitOfWork commits future versions of the CollectionCatalog will return this
+ * instance. If the WriteUnitOfWork rolls back the instance will be discarded.
*
- * 'lookupCollectionByNamespaceForRead' does not require locks and should only be used in the
- * context of a lock-free read wherein we also have a consistent storage snapshot.
+ * It is safe to write to the returned instance in onCommit handlers but not in onRollback
+ * handlers.
*
- * Returns nullptr if the namespace is unknown.
+ * Returns nullptr if the 'uuid' is not known.
*/
+ Collection* lookupCollectionByUUIDForMetadataWrite(OperationContext* opCtx,
+ const UUID& uuid) const;
Collection* lookupCollectionByNamespaceForMetadataWrite(OperationContext* opCtx,
const NamespaceString& nss) const;
- const Collection* lookupCollectionByNamespace(OperationContext* opCtx,
- const NamespaceString& nss) const;
- std::shared_ptr<const Collection> lookupCollectionByNamespaceForRead(
- OperationContext* opCtx, const NamespaceString& nss) const;
/**
- * Fetches a collection pointer that corresponds to the NamespaceString or UUID.
- *
- * Requires a MODE_IS collection lock to be obtained prior to calling this function, or else the
- * found Collection pointer might no longer be valid when the call returns.
- *
- * Returns nullptr is the namespace or uuid is unknown.
+ * Returns true if the collection has been registered in the CollectionCatalog but not yet made
+ * visible.
*/
- const Collection* lookupCollectionByNamespaceOrUUID(
- OperationContext* opCtx, const NamespaceStringOrUUID& nssOrUUID) const;
+ bool isCollectionAwaitingVisibility(UUID uuid) const;
+
+ // TODO SERVER-74468: Remove this function
+ std::shared_ptr<const Collection> lookupCollectionByNamespaceForRead_DONT_USE(
+ OperationContext* opCtx, const NamespaceString& nss) const {
+ return _getCollectionByNamespace(opCtx, nss);
+ }
+ // TODO SERVER-74468: Remove this function
+ std::shared_ptr<const Collection> lookupCollectionByUUIDForRead_DONT_USE(
+ OperationContext* opCtx, const UUID& uuid) const {
+ return _getCollectionByUUID(opCtx, uuid);
+ }
/**
* This function gets the NamespaceString from the collection catalog entry that
@@ -686,6 +693,15 @@ private:
class PublishCatalogUpdates;
/**
+ * Gets shared_ptr to Collections by UUID/Namespace.
+ */
+ std::shared_ptr<const Collection> _getCollectionByNamespace(OperationContext* opCtx,
+ const NamespaceString& nss) const;
+
+ std::shared_ptr<const Collection> _getCollectionByUUID(OperationContext* opCtx,
+ const UUID& uuid) const;
+
+ /**
* Register the collection with `uuid`.
*
* If 'twoPhase' is true, this call must be followed by 'onCreateCollection' which continues the
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 11ba4ae194b..343a10ae9f3 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -1275,19 +1275,17 @@ private:
t.join();
+ auto catalog = CollectionCatalog::get(opCtx);
if (expectedExistence) {
ASSERT(coll);
- NamespaceString nss =
- CollectionCatalog::get(opCtx)->resolveNamespaceStringOrUUID(opCtx, nssOrUUID);
+ NamespaceString nss = catalog->resolveNamespaceStringOrUUID(opCtx, nssOrUUID);
ASSERT_EQ(coll->ns(), nss);
// Check that lookup returns the same instance as openCollection above
- ASSERT_EQ(CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, coll->ns()),
- coll);
- ASSERT_EQ(CollectionCatalog::get(opCtx)->lookupCollectionByUUID(opCtx, coll->uuid()),
- coll);
- ASSERT_EQ(CollectionCatalog::get(opCtx)->lookupNSSByUUID(opCtx, coll->uuid()), nss);
+ ASSERT_EQ(catalog->lookupCollectionByNamespace(opCtx, coll->ns()), coll);
+ ASSERT_EQ(catalog->lookupCollectionByUUID(opCtx, coll->uuid()), coll);
+ ASSERT_EQ(catalog->lookupNSSByUUID(opCtx, coll->uuid()), nss);
ASSERT_EQ(coll->getIndexCatalog()->numIndexesTotal(), expectedNumIndexes);
auto catalogEntry =
@@ -1297,14 +1295,8 @@ private:
coll->isMetadataEqual(DurableCatalog::getMetadataFromCatalogEntry(catalogEntry)));
// Lookups from the catalog should return the newly opened collection.
- ASSERT_EQ(CollectionCatalog::get(opCtx)
- ->lookupCollectionByNamespaceForRead(opCtx, coll->ns())
- .get(),
- coll);
- ASSERT_EQ(CollectionCatalog::get(opCtx)
- ->lookupCollectionByUUIDForRead(opCtx, coll->uuid())
- .get(),
- coll);
+ ASSERT_EQ(catalog->lookupCollectionByNamespace(opCtx, coll->ns()), coll);
+ ASSERT_EQ(catalog->lookupCollectionByUUID(opCtx, coll->uuid()), coll);
} else {
ASSERT(!coll);
if (auto nss = nssOrUUID.nss()) {
@@ -1314,10 +1306,7 @@ private:
// Lookups from the catalog should return the newly opened collection (in this case
// nullptr).
- ASSERT_EQ(CollectionCatalog::get(opCtx)
- ->lookupCollectionByNamespaceForRead(opCtx, *nss)
- .get(),
- coll);
+ ASSERT_EQ(catalog->lookupCollectionByNamespace(opCtx, *nss), coll);
} else if (auto uuid = nssOrUUID.uuid()) {
auto catalogEntry =
DurableCatalog::get(opCtx)->scanForCatalogEntryByUUID(opCtx, *uuid);
@@ -1325,10 +1314,7 @@ private:
// Lookups from the catalog should return the newly opened collection (in this case
// nullptr).
- ASSERT_EQ(CollectionCatalog::get(opCtx)
- ->lookupCollectionByUUIDForRead(opCtx, *uuid)
- .get(),
- coll);
+ ASSERT_EQ(catalog->lookupCollectionByUUID(opCtx, *uuid), coll);
}
}
@@ -1407,9 +1393,7 @@ TEST_F(CollectionCatalogTimestampTest, OpenCollectionBeforeCreateTimestamp) {
ASSERT(!coll);
// Lookups from the catalog should return the newly opened collection (in this case nullptr).
- ASSERT_EQ(CollectionCatalog::get(opCtx.get())
- ->lookupCollectionByNamespaceForRead(opCtx.get(), nss)
- .get(),
+ ASSERT_EQ(CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss),
coll);
}
@@ -1444,13 +1428,13 @@ TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollection) {
auto newClient = opCtx->getServiceContext()->makeClient("AlternativeClient");
AlternativeClientRegion acr(newClient);
auto newOpCtx = cc().makeOperationContext();
- auto latestColl = CollectionCatalog::get(newOpCtx.get())
- ->lookupCollectionByNamespaceForRead(newOpCtx.get(), nss);
+ auto latestColl =
+ CollectionCatalog::get(newOpCtx.get())->lookupCollectionByNamespace(newOpCtx.get(), nss);
ASSERT(latestColl);
ASSERT_EQ(1, latestColl->getIndexCatalog()->numIndexesTotal());
// Ensure the idents are shared between the collection instances.
- ASSERT_NE(coll, latestColl.get());
+ ASSERT_NE(coll, latestColl);
ASSERT_EQ(coll->getSharedIdent(), latestColl->getSharedIdent());
}
@@ -1491,13 +1475,13 @@ TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollectionWithIndex) {
auto newClient = opCtx->getServiceContext()->makeClient("AlternativeClient");
AlternativeClientRegion acr(newClient);
auto newOpCtx = cc().makeOperationContext();
- auto latestColl = CollectionCatalog::get(newOpCtx.get())
- ->lookupCollectionByNamespaceForRead(newOpCtx.get(), nss);
+ auto latestColl =
+ CollectionCatalog::get(newOpCtx.get())->lookupCollectionByNamespace(newOpCtx.get(), nss);
ASSERT(latestColl);
ASSERT_EQ(2, latestColl->getIndexCatalog()->numIndexesTotal());
// Ensure the idents are shared between the collection and index instances.
- ASSERT_NE(coll, latestColl.get());
+ ASSERT_NE(coll, latestColl);
ASSERT_EQ(coll->getSharedIdent(), latestColl->getSharedIdent());
auto indexDescPast = coll->getIndexCatalog()->findIndexByName(opCtx.get(), "x_1");
@@ -1534,8 +1518,8 @@ TEST_F(CollectionCatalogTimestampTest, OpenLatestCollectionWithIndex) {
// Verify that the CollectionCatalog returns the latest collection.
auto currentColl =
- CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespaceForRead(opCtx.get(), nss);
- ASSERT_EQ(coll, currentColl.get());
+ CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss);
+ ASSERT_EQ(coll, currentColl);
// Ensure the idents are shared between the collection and index instances.
ASSERT_EQ(coll->getSharedIdent(), currentColl->getSharedIdent());
@@ -1573,8 +1557,8 @@ TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollectionWithDropPendingIndex
// Maintain a shared_ptr to "x_1", so it's not expired in drop pending map, but not for "y_1".
std::shared_ptr<const IndexCatalogEntry> index;
{
- auto latestColl = CollectionCatalog::get(opCtx.get())
- ->lookupCollectionByNamespaceForRead(opCtx.get(), nss);
+ auto latestColl =
+ CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss);
auto desc = latestColl->getIndexCatalog()->findIndexByName(opCtx.get(), "x_1");
index = latestColl->getIndexCatalog()->getEntryShared(desc);
}
@@ -1596,9 +1580,9 @@ TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollectionWithDropPendingIndex
auto newClient = opCtx->getServiceContext()->makeClient("AlternativeClient");
AlternativeClientRegion acr(newClient);
auto newOpCtx = cc().makeOperationContext();
- auto latestColl = CollectionCatalog::get(newOpCtx.get())
- ->lookupCollectionByNamespaceForRead(newOpCtx.get(), nss);
- ASSERT_NE(coll, latestColl.get());
+ auto latestColl =
+ CollectionCatalog::get(newOpCtx.get())->lookupCollectionByNamespace(newOpCtx.get(), nss);
+ ASSERT_NE(coll, latestColl);
auto indexDescX = coll->getIndexCatalog()->findIndexByName(opCtx.get(), "x_1");
auto indexDescY = coll->getIndexCatalog()->findIndexByName(opCtx.get(), "y_1");
@@ -1655,8 +1639,8 @@ TEST_F(CollectionCatalogTimestampTest,
// Maintain a shared_ptr to "z_1", so it's not expired in drop pending map. This is required so
// that this index entry's ident will be re-used when openCollection is called.
std::shared_ptr<const IndexCatalogEntry> index = [&] {
- auto latestColl = CollectionCatalog::get(opCtx.get())
- ->lookupCollectionByNamespaceForRead(opCtx.get(), nss);
+ auto latestColl =
+ CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss);
auto desc = latestColl->getIndexCatalog()->findIndexByName(opCtx.get(), zIndexName);
return latestColl->getIndexCatalog()->getEntryShared(desc);
}();
@@ -1692,10 +1676,10 @@ TEST_F(CollectionCatalogTimestampTest,
auto newClient = opCtx->getServiceContext()->makeClient("AlternativeClient");
AlternativeClientRegion acr(newClient);
auto newOpCtx = cc().makeOperationContext();
- auto latestColl = CollectionCatalog::get(newOpCtx.get())
- ->lookupCollectionByNamespaceForRead(newOpCtx.get(), nss);
+ auto latestColl =
+ CollectionCatalog::get(newOpCtx.get())->lookupCollectionByNamespace(newOpCtx.get(), nss);
- ASSERT_NE(coll, latestColl.get());
+ ASSERT_NE(coll, latestColl);
auto indexDescZ = coll->getIndexCatalog()->findIndexByName(opCtx.get(), zIndexName);
auto indexEntryZ = coll->getIndexCatalog()->getEntryShared(indexDescZ);
@@ -1716,11 +1700,12 @@ TEST_F(CollectionCatalogTimestampTest, OpenEarlierAlreadyDropPendingCollection)
createCollection(opCtx.get(), firstNss, createCollectionTs);
createCollection(opCtx.get(), secondNss, createCollectionTs);
- // Maintain a shared_ptr to "a.b" so that it isn't expired in the drop pending map after we drop
- // the collections.
- std::shared_ptr<const Collection> coll =
- CollectionCatalog::get(opCtx.get())
- ->lookupCollectionByNamespaceForRead(opCtx.get(), firstNss);
+ // Maintain a shared_ptr to the catalog so that collection "a.b" isn't expired in the drop
+ // pending map after we drop the collections.
+ auto catalog = CollectionCatalog::get(opCtx.get());
+ auto coll =
+
+ catalog->lookupCollectionByNamespace(opCtx.get(), firstNss);
ASSERT(coll);
// Make the collections drop pending.
@@ -1787,10 +1772,10 @@ TEST_F(CollectionCatalogTimestampTest, OpenNewCollectionUsingDropPendingCollecti
<< "key" << BSON("x" << 1)),
createIndexTs);
- // Maintain a shared_ptr to "a.b" so that it isn't expired in the drop pending map after we drop
- // it.
- auto coll =
- CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespaceForRead(opCtx.get(), nss);
+ // Maintain a shared_ptr to the catalog so that the collection "a.b" isn't expired in the drop
+ // pending map after we drop it.
+ auto catalog = CollectionCatalog::get(opCtx.get());
+ auto coll = catalog->lookupCollectionByNamespace(opCtx.get(), nss);
ASSERT(coll);
ASSERT_EQ(coll->getMinimumValidSnapshot(), createIndexTs);
@@ -1808,7 +1793,7 @@ TEST_F(CollectionCatalogTimestampTest, OpenNewCollectionUsingDropPendingCollecti
auto openedColl = CollectionCatalog::get(opCtx.get())
->establishConsistentCollection(opCtx.get(), nss, readTimestamp);
ASSERT(openedColl);
- ASSERT_NE(coll.get(), openedColl);
+ ASSERT_NE(coll, openedColl);
// Ensure the idents are shared between the opened collection and the drop pending collection.
ASSERT_EQ(coll->getSharedIdent(), openedColl->getSharedIdent());
opCtx->recoveryUnit()->abandonSnapshot();
@@ -1826,9 +1811,9 @@ TEST_F(CollectionCatalogTimestampTest, OpenExistingCollectionWithReaper) {
auto storageEngine = opCtx->getServiceContext()->getStorageEngine();
- // Maintain a shared_ptr so that the reaper cannot drop the collection ident.
- std::shared_ptr<const Collection> coll =
- CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespaceForRead(opCtx.get(), nss);
+ // Maintain a shared_ptr to the catalog so that the reaper cannot drop the collection ident.
+ auto catalog = CollectionCatalog::get(opCtx.get());
+ auto coll = catalog->lookupCollectionByNamespace(opCtx.get(), nss);
ASSERT(coll);
// Mark the collection as drop pending. The dropToken in the ident reaper is not expired as we
@@ -1857,7 +1842,7 @@ TEST_F(CollectionCatalogTimestampTest, OpenExistingCollectionWithReaper) {
ASSERT_EQ(coll->getSharedIdent(), openedColl->getSharedIdent());
// The ident is now expired and should be removed the next time the ident reaper runs.
- coll.reset();
+ catalog.reset();
}
{
@@ -1959,9 +1944,9 @@ TEST_F(CollectionCatalogTimestampTest, OpenExistingCollectionAndIndexesWithReape
dropIndex(opCtx.get(), nss, "x_1", dropXIndexTs);
dropIndex(opCtx.get(), nss, "y_1", dropYIndexTs);
- // Maintain a shared_ptr to the collection so that the reaper cannot drop the collection ident.
- std::shared_ptr<const Collection> coll =
- CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespaceForRead(opCtx.get(), nss);
+ // Maintain a shared_ptr to the catalog so that the reaper cannot drop the collection ident.
+ auto catalog = CollectionCatalog::get(opCtx.get());
+ auto coll = catalog->lookupCollectionByNamespace(opCtx.get(), nss);
ASSERT(coll);
dropCollection(opCtx.get(), nss, dropCollectionTs);
@@ -2033,7 +2018,7 @@ TEST_F(CollectionCatalogTimestampTest, OpenExistingCollectionAndIndexesWithReape
{
// Drop all remaining idents.
- coll.reset();
+ catalog.reset();
storageEngine->dropIdentsOlderThan(opCtx.get(), Timestamp::max());
ASSERT_EQ(0, storageEngine->getDropPendingIdents().size());
@@ -3655,13 +3640,11 @@ TEST_F(CollectionCatalogTimestampTest, OpenCollectionBetweenIndexBuildInProgress
// Lookups from the catalog should return the newly opened collection.
ASSERT_EQ(CollectionCatalog::get(opCtx.get())
- ->lookupCollectionByNamespaceForRead(opCtx.get(), coll->ns())
- .get(),
- coll);
- ASSERT_EQ(CollectionCatalog::get(opCtx.get())
- ->lookupCollectionByUUIDForRead(opCtx.get(), coll->uuid())
- .get(),
+ ->lookupCollectionByNamespace(opCtx.get(), coll->ns()),
coll);
+ ASSERT_EQ(
+ CollectionCatalog::get(opCtx.get())->lookupCollectionByUUID(opCtx.get(), coll->uuid()),
+ coll);
}
finishIndexBuild(opCtx.get(), nss, std::move(indexBuildBlock), indexReadyTs);
@@ -3679,13 +3662,11 @@ TEST_F(CollectionCatalogTimestampTest, OpenCollectionBetweenIndexBuildInProgress
// Lookups from the catalog should return the newly opened collection.
ASSERT_EQ(CollectionCatalog::get(opCtx.get())
- ->lookupCollectionByNamespaceForRead(opCtx.get(), coll->ns())
- .get(),
- coll);
- ASSERT_EQ(CollectionCatalog::get(opCtx.get())
- ->lookupCollectionByUUIDForRead(opCtx.get(), coll->uuid())
- .get(),
+ ->lookupCollectionByNamespace(opCtx.get(), coll->ns()),
coll);
+ ASSERT_EQ(
+ CollectionCatalog::get(opCtx.get())->lookupCollectionByUUID(opCtx.get(), coll->uuid()),
+ coll);
}
}
} // namespace
diff --git a/src/mongo/db/catalog/collection_writer_test.cpp b/src/mongo/db/catalog/collection_writer_test.cpp
index 979801f62fc..15368437ff4 100644
--- a/src/mongo/db/catalog/collection_writer_test.cpp
+++ b/src/mongo/db/catalog/collection_writer_test.cpp
@@ -72,8 +72,7 @@ protected:
const Collection* lookupCollectionFromCatalogForRead() {
return CollectionCatalog::get(operationContext())
- ->lookupCollectionByNamespaceForRead(operationContext(), kNss)
- .get();
+ ->lookupCollectionByNamespace(operationContext(), kNss);
}
void verifyCollectionInCatalogUsingDifferentClient(const Collection* expected) {
@@ -198,9 +197,8 @@ TEST_F(CollectionWriterTest, CatalogWrite) {
// We should see a different catalog instance than a reader would
ASSERT_NE(&writableCatalog, catalog.get());
// However, it should be a shallow copy. The collection instance should be the same
- ASSERT_EQ(
- writableCatalog.lookupCollectionByNamespaceForRead(operationContext(), kNss).get(),
- catalog->lookupCollectionByNamespaceForRead(operationContext(), kNss).get());
+ ASSERT_EQ(writableCatalog.lookupCollectionByNamespace(operationContext(), kNss),
+ catalog->lookupCollectionByNamespace(operationContext(), kNss));
});
auto after = CollectionCatalog::latest(getServiceContext());
ASSERT_NE(&catalog, &after);
diff --git a/src/mongo/db/catalog/collection_yield_restore.cpp b/src/mongo/db/catalog/collection_yield_restore.cpp
index 1c5c2f39536..0e6d5cad146 100644
--- a/src/mongo/db/catalog/collection_yield_restore.cpp
+++ b/src/mongo/db/catalog/collection_yield_restore.cpp
@@ -50,9 +50,12 @@ const Collection* LockedCollectionYieldRestore::operator()(OperationContext* opC
// Confirm that we are holding the neccessary collection level lock.
invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_IS));
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+
// Fetch the Collection by UUID. A rename could have occurred which means we might not be
// holding the collection-level lock on the right namespace.
- auto collection = CollectionCatalog::get(opCtx)->lookupCollectionByUUIDForRead(opCtx, uuid);
+ auto collection = catalog->lookupCollectionByUUID(opCtx, uuid);
// Collection dropped during yielding.
if (!collection) {
@@ -68,7 +71,7 @@ const Collection* LockedCollectionYieldRestore::operator()(OperationContext* opC
// Non-lock-free readers use this path and need to re-establish their capped snapshot.
if (collection->usesCappedSnapshots()) {
- CappedSnapshots::get(opCtx).establish(opCtx, collection.get());
+ CappedSnapshots::get(opCtx).establish(opCtx, collection);
}
// After yielding and reacquiring locks, the preconditions that were used to select our
@@ -78,7 +81,7 @@ const Collection* LockedCollectionYieldRestore::operator()(OperationContext* opC
// necessary.
SnapshotHelper::changeReadSourceIfNeeded(opCtx, collection->ns());
- return collection.get();
+ return collection;
}
} // namespace mongo
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 4533ec2a7d7..20f5bb2f0ce 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -452,7 +452,7 @@ AutoGetCollectionLockFree::AutoGetCollectionLockFree(OperationContext* opCtx,
auto catalog = CollectionCatalog::get(opCtx);
_resolvedNss = catalog->resolveNamespaceStringOrUUID(opCtx, nsOrUUID);
- _collection = catalog->lookupCollectionByNamespaceForRead(opCtx, _resolvedNss);
+ _collection = catalog->lookupCollectionByNamespaceForRead_DONT_USE(opCtx, _resolvedNss);
// When we restore from yield on this CollectionPtr we will update _collection above and use its
// new pointer in the CollectionPtr
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 5cc543c334f..b0ecdfcbcab 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -465,9 +465,7 @@ public:
}
if (view.timeseries()) {
- if (!CollectionCatalog::get(opCtx)
- ->lookupCollectionByNamespaceForRead(opCtx,
- view.viewOn())) {
+ if (!catalog->lookupCollectionByNamespace(opCtx, view.viewOn())) {
// There is no buckets collection backing this time-series view,
// which means that it was not already added along with the
// buckets collection above.
diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp
index 02daf23fd07..ee4deb5dd7c 100644
--- a/src/mongo/db/commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands.cpp
@@ -131,7 +131,9 @@ bool isTimeseries(OperationContext* opCtx, const Request& request) {
// collection does not yet exist, this check may return false unnecessarily. As a result, an
// insert attempt into the time-series namespace will either succeed or fail, depending on who
// wins the race.
- auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, bucketNss);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto coll = catalog->lookupCollectionByNamespace(opCtx, bucketNss);
return (coll && coll->getTimeseriesOptions());
}
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index ecb21f3bb7d..d1c6b552edf 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -935,7 +935,7 @@ void AutoGetCollectionForReadLockFreeLegacy::EmplaceHelper::emplace(
// should never be nested.
invariant(!isLockFreeReadSubOperation);
- auto coll = catalog.lookupCollectionByUUIDForRead(opCtx, uuid);
+ auto coll = catalog.lookupCollectionByUUIDForRead_DONT_USE(opCtx, uuid);
if (coll) {
if (coll->usesCappedSnapshots()) {
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 7c1803ea794..c4532d35d17 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -2322,9 +2322,9 @@ Status IndexBuildsCoordinator::_setUpIndexBuild(OperationContext* opCtx,
_setUpIndexBuildInner(opCtx, replState, startTimestamp, indexBuildOptions);
} catch (const DBException& ex) {
auto status = ex.toStatus();
- auto collectionSharedPtr = CollectionCatalog::get(opCtx)->lookupCollectionByUUIDForRead(
- opCtx, replState->collectionUUID);
- CollectionPtr collection(collectionSharedPtr.get());
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ CollectionPtr collection(catalog->lookupCollectionByUUID(opCtx, replState->collectionUUID));
invariant(collection,
str::stream() << "Collection with UUID " << replState->collectionUUID
<< " should exist because an index build is in progress: "
@@ -2635,9 +2635,8 @@ void IndexBuildsCoordinator::_runIndexBuildInner(
// dropped while the index build is still registered for the collection -- until abortIndexBuild
// is called. The collection can be renamed, but it is OK for the name to be stale just for
// logging purposes.
- auto collectionSharedPtr = CollectionCatalog::get(opCtx)->lookupCollectionByUUIDForRead(
- opCtx, replState->collectionUUID);
- CollectionPtr collection(collectionSharedPtr.get());
+ auto catalog = CollectionCatalog::get(opCtx);
+ CollectionPtr collection(catalog->lookupCollectionByUUID(opCtx, replState->collectionUUID));
invariant(collection,
str::stream() << "Collection with UUID " << replState->collectionUUID
<< " should exist because an index build is in progress: "
diff --git a/src/mongo/db/op_observer/op_observer_impl.cpp b/src/mongo/db/op_observer/op_observer_impl.cpp
index f02f57c5b92..f29fe5bd56d 100644
--- a/src/mongo/db/op_observer/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer/op_observer_impl.cpp
@@ -725,8 +725,9 @@ void OpObserverImpl::onInserts(OperationContext* opCtx,
// to insert measurements with dates outside the standard range, chances are they will do so
// again, and we will have only set the flag a little early.
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX));
- auto bucketsColl =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, nss);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto bucketsColl = catalog->lookupCollectionByNamespace(opCtx, nss);
tassert(6905201, "Could not find collection for write", bucketsColl);
auto timeSeriesOptions = bucketsColl->getTimeseriesOptions();
if (timeSeriesOptions.has_value()) {
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index c1df76b8525..f40a849d842 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -2070,10 +2070,11 @@ TimeseriesSingleWriteResult performTimeseriesBucketCompression(
compressionStats.result = result.result.getStatus();
// Report stats for the bucket collection
- auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(
- opCtx, compressionOp.getNamespace());
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto coll = catalog->lookupCollectionByNamespace(opCtx, compressionOp.getNamespace());
if (coll) {
- const auto& stats = TimeseriesStats::get(coll.get());
+ const auto& stats = TimeseriesStats::get(coll);
stats.onBucketClosed(*beforeSize, compressionStats);
}
}
@@ -2379,8 +2380,8 @@ insertIntoBucketCatalog(OperationContext* opCtx,
auto bucketsNs = makeTimeseriesBucketsNamespace(ns(request));
// Holding this shared pointer to the collection guarantees that the collator is not
// invalidated.
- auto bucketsColl =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, bucketsNs);
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto bucketsColl = catalog->lookupCollectionByNamespace(opCtx, bucketsNs);
uassert(ErrorCodes::NamespaceNotFound,
"Could not find time-series buckets collection for write",
bucketsColl);
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index 76ace15b0e5..fe441f9e2c2 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -138,12 +138,13 @@ void DocumentSourceOut::validateTimeseries() {
} else {
// if a time-series collection doesn't exist, the namespace should not have a
// collection nor a conflicting view.
- auto collection = CollectionCatalog::get(pExpCtx->opCtx)
- ->lookupCollectionByNamespaceForRead(pExpCtx->opCtx, outNs);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(pExpCtx->opCtx);
+ auto collection = catalog->lookupCollectionByNamespace(pExpCtx->opCtx, outNs);
uassert(7268700,
"Cannot create a time-series collection from a non time-series collection.",
!collection);
- auto view = CollectionCatalog::get(pExpCtx->opCtx)->lookupView(pExpCtx->opCtx, outNs);
+ auto view = catalog->lookupView(pExpCtx->opCtx, outNs);
uassert(
7268703, "Cannot create a time-series collection from a non time-series view.", !view);
}
diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
index 143043d3b1f..5988ee9d979 100644
--- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
@@ -282,8 +282,9 @@ std::deque<BSONObj> CommonMongodProcessInterface::listCatalog(OperationContext*
}
for (const auto& svns : systemViewsNamespaces) {
- auto collection = CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(
- opCtx, *svns.nss());
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto collection = catalog->lookupCollectionByNamespace(opCtx, *svns.nss());
if (!collection) {
continue;
}
diff --git a/src/mongo/db/query/plan_insert_listener.cpp b/src/mongo/db/query/plan_insert_listener.cpp
index 6e31be68b0b..c9458234402 100644
--- a/src/mongo/db/query/plan_insert_listener.cpp
+++ b/src/mongo/db/query/plan_insert_listener.cpp
@@ -92,8 +92,9 @@ std::unique_ptr<Notifier> getCappedInsertNotifier(OperationContext* opCtx,
if (opCtx->recoveryUnit()->getTimestampReadSource() == RecoveryUnit::kMajorityCommitted) {
return std::make_unique<MajorityCommittedPointNotifier>();
} else {
- auto collection =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, nss);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto collection = catalog->lookupCollectionByNamespace(opCtx, nss);
invariant(collection);
return std::make_unique<LocalCappedInsertNotifier>(
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 58c92ee9a00..8debebb6ece 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -253,8 +253,10 @@ public:
result.append("latestOptime", replCoord->getMyLastAppliedOpTime().getTimestamp());
auto earliestOplogTimestampFetch = [&]() -> Timestamp {
- auto oplog = CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(
- opCtx, NamespaceString::kRsOplogNamespace);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto oplog =
+ catalog->lookupCollectionByNamespace(opCtx, NamespaceString::kRsOplogNamespace);
if (!oplog) {
return Timestamp();
}
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 592117f7fa9..afb0c40c024 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -887,8 +887,9 @@ TranslatedRequestParams CreateCollectionCoordinator::_translateRequestParameters
};
auto bucketsNs = originalNss().makeTimeseriesBucketsNamespace();
- auto existingBucketsColl =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, bucketsNs);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto existingBucketsColl = catalog->lookupCollectionByNamespace(opCtx, bucketsNs);
auto targetingStandardCollection = !_request.getTimeseries() && !existingBucketsColl;
diff --git a/src/mongo/db/s/shardsvr_create_collection_command.cpp b/src/mongo/db/s/shardsvr_create_collection_command.cpp
index 29453c185c8..34091418cea 100644
--- a/src/mongo/db/s/shardsvr_create_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_create_collection_command.cpp
@@ -52,8 +52,9 @@ void translateToTimeseriesCollection(OperationContext* opCtx,
NamespaceString* nss,
CreateCollectionRequest* createCmdRequest) {
auto bucketsNs = nss->makeTimeseriesBucketsNamespace();
- auto bucketsColl =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, bucketsNs);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto bucketsColl = catalog->lookupCollectionByNamespace(opCtx, bucketsNs);
// If the 'system.buckets' exists or 'timeseries' parameters are passed in, we know that
// we are trying shard a timeseries collection.
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 62d953e82e7..282e62f4301 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -1712,8 +1712,9 @@ void ExecCommandDatabase::_initiateCommand() {
// possible that a stale mongos may send the request over a view namespace. In this case, we
// initialize the 'OperationShardingState' with buckets namespace.
auto bucketNss = _invocation->ns().makeTimeseriesBucketsNamespace();
- auto coll =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, bucketNss);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto coll = catalog->lookupCollectionByNamespace(opCtx, bucketNss);
auto namespaceForSharding =
(coll && coll->getTimeseriesOptions()) ? bucketNss : _invocation->ns();
boost::optional<ShardVersion> shardVersion;
diff --git a/src/mongo/db/stats/storage_stats.cpp b/src/mongo/db/stats/storage_stats.cpp
index f96cb04bc81..d6d089606c1 100644
--- a/src/mongo/db/stats/storage_stats.cpp
+++ b/src/mongo/db/stats/storage_stats.cpp
@@ -241,8 +241,10 @@ Status appendCollectionStorageStats(OperationContext* opCtx,
static constexpr auto kStorageStatsField = "storageStats"_sd;
const auto bucketNss = nss.makeTimeseriesBucketsNamespace();
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
const auto isTimeseries = nss.isTimeseriesBucketsCollection() ||
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, bucketNss);
+ catalog->lookupCollectionByNamespace(opCtx, bucketNss);
const auto collNss =
(isTimeseries && !nss.isTimeseriesBucketsCollection()) ? std::move(bucketNss) : nss;
diff --git a/src/mongo/db/storage/wiredtiger/oplog_truncate_markers_server_status_section.cpp b/src/mongo/db/storage/wiredtiger/oplog_truncate_markers_server_status_section.cpp
index 4254ccf2921..3b296685965 100644
--- a/src/mongo/db/storage/wiredtiger/oplog_truncate_markers_server_status_section.cpp
+++ b/src/mongo/db/storage/wiredtiger/oplog_truncate_markers_server_status_section.cpp
@@ -58,8 +58,10 @@ public:
return builder.obj();
}
- auto oplogCollection = CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(
- opCtx, NamespaceString::kRsOplogNamespace);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto oplogCollection =
+ catalog->lookupCollectionByNamespace(opCtx, NamespaceString::kRsOplogNamespace);
if (oplogCollection) {
oplogCollection->getRecordStore()->getOplogTruncateStats(builder);
}
diff --git a/src/mongo/db/timeseries/catalog_helper.cpp b/src/mongo/db/timeseries/catalog_helper.cpp
index 4b577d55d30..ca58a5aa7a6 100644
--- a/src/mongo/db/timeseries/catalog_helper.cpp
+++ b/src/mongo/db/timeseries/catalog_helper.cpp
@@ -41,8 +41,9 @@ boost::optional<TimeseriesOptions> getTimeseriesOptions(OperationContext* opCtx,
const NamespaceString& nss,
bool convertToBucketsNamespace) {
auto bucketsNs = convertToBucketsNamespace ? nss.makeTimeseriesBucketsNamespace() : nss;
- auto bucketsColl =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForRead(opCtx, bucketsNs);
+ // Hold reference to the catalog for collection lookup without locks to be safe.
+ auto catalog = CollectionCatalog::get(opCtx);
+ auto bucketsColl = catalog->lookupCollectionByNamespace(opCtx, bucketsNs);
if (!bucketsColl) {
return boost::none;
}