summaryrefslogtreecommitdiff
path: root/src/mongo/db/catalog
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2022-04-05 13:25:46 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-04-05 14:17:12 +0000
commitfa32d665bd63de7a9d246fa99df5e30840a931de (patch)
treefbe101ea2f8888e6361acfb14f220d2fa8eeace4 /src/mongo/db/catalog
parent4a594750742b3620dba3696cb6e91d60aa8d0df6 (diff)
downloadmongo-fa32d665bd63de7a9d246fa99df5e30840a931de.tar.gz
SERVER-52877 Unify how writable Collections instances are handled
CollectionCatalog::LifetimeMode has been removed. Catalog writes now require that we are in an active WUOW. Make it allowed to use WriteUnitOfWork when the server is in readOnly mode. It does not open storage sessions, just allows registration of RecoveryUnit callbacks that are executed when calling commit(). This allows for the unification of code where we need to initialize Collection instances even in readOnly mode. Handling of enforcing readOnly has been pushed down to the RecordStore. All interfaces that perform write now check if we are in readOnly mode and throw if we are. Catalog updates using the BatchedCollectionCatalogWriter class bypass the Collection cloning if the batched CollectionCatalog instance already has a uniquely owned copy (a previous write to this collection has been requested). It is also not required to be in an active WUOW when the BatchedCollectionCatalogWriter is used.
Diffstat (limited to 'src/mongo/db/catalog')
-rw-r--r--src/mongo/db/catalog/SConscript1
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp31
-rw-r--r--src/mongo/db/catalog/collection.h2
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp96
-rw-r--r--src/mongo/db/catalog/collection_catalog.h38
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp27
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp4
-rw-r--r--src/mongo/db/catalog/collection_impl.h2
-rw-r--r--src/mongo/db/catalog/collection_mock.h2
-rw-r--r--src/mongo/db/catalog/collection_writer_test.cpp46
-rw-r--r--src/mongo/db/catalog/database_impl.cpp8
11 files changed, 177 insertions, 80 deletions
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index e33238fd2d1..86393b8f51b 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -139,6 +139,7 @@ env.Library(
'$BUILD_DIR/mongo/db/concurrency/write_conflict_exception',
'$BUILD_DIR/mongo/db/curop',
'$BUILD_DIR/mongo/db/query/query_knobs',
+ '$BUILD_DIR/mongo/db/storage/record_store_base',
'$BUILD_DIR/mongo/db/storage/storage_repair_observer',
'index_repair',
'multi_index_block',
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index 510f45537a9..dadfed598c0 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -51,14 +51,19 @@ namespace catalog {
namespace {
void reopenAllDatabasesAndReloadCollectionCatalog(
OperationContext* opCtx,
- std::shared_ptr<const CollectionCatalog> catalog,
StorageEngine* storageEngine,
const MinVisibleTimestampMap& minVisibleTimestampMap,
Timestamp stableTimestamp) {
+
// Open all databases and repopulate the CollectionCatalog.
LOGV2(20276, "openCatalog: reopening all databases");
- boost::optional<BatchedCollectionCatalogWriter> catalogBatchWriter;
- catalogBatchWriter.emplace(opCtx);
+
+ // Applies all Collection writes to the in-memory catalog in a single copy-on-write to the
+ // catalog. This avoids quadratic behavior where we iterate over every collection and perform
+ // writes where the catalog would be copied every time. boost::optional is used to be able to
+ // finish the write batch when encountering the oplog as other systems except immediate
+ // visibility for the oplog.
+ boost::optional<BatchedCollectionCatalogWriter> catalogWriter(opCtx);
auto databaseHolder = DatabaseHolder::get(opCtx);
std::vector<TenantDatabaseName> databasesToOpen = storageEngine->listDatabases();
@@ -67,10 +72,10 @@ void reopenAllDatabasesAndReloadCollectionCatalog(
23992, 1, "openCatalog: dbholder reopening database", "db"_attr = tenantDbName);
auto db = databaseHolder->openDb(opCtx, tenantDbName);
invariant(db, str::stream() << "failed to reopen database " << tenantDbName.toString());
- for (auto&& collNss : catalog->getAllCollectionNamesFromDb(opCtx, tenantDbName)) {
+ for (auto&& collNss :
+ catalogWriter.get()->getAllCollectionNamesFromDb(opCtx, tenantDbName)) {
// Note that the collection name already includes the database component.
- auto collection = catalog->lookupCollectionByNamespaceForMetadataWrite(
- opCtx, CollectionCatalog::LifetimeMode::kInplace, collNss);
+ auto collection = catalogWriter.get()->lookupCollectionByNamespace(opCtx, collNss);
invariant(collection,
str::stream()
<< "failed to get valid collection pointer for namespace " << collNss);
@@ -87,7 +92,10 @@ void reopenAllDatabasesAndReloadCollectionCatalog(
// cost/effort.
auto minVisible = std::min(stableTimestamp,
minVisibleTimestampMap.find(collection->uuid())->second);
- collection->setMinimumVisibleSnapshot(minVisible);
+ auto writableCollection =
+ catalogWriter.get()->lookupCollectionByUUIDForMetadataWrite(opCtx,
+ collection->uuid());
+ writableCollection->setMinimumVisibleSnapshot(minVisible);
}
// If this is the oplog collection, re-establish the replication system's cached pointer
@@ -97,9 +105,9 @@ void reopenAllDatabasesAndReloadCollectionCatalog(
// The oplog collection must be visible when establishing for repl. Finish our
// batched catalog write and continue on a new batch afterwards.
- catalogBatchWriter.reset();
+ catalogWriter.reset();
collection->establishOplogCollectionForLogging(opCtx);
- catalogBatchWriter.emplace(opCtx);
+ catalogWriter.emplace(opCtx);
}
}
}
@@ -246,15 +254,14 @@ void openCatalog(OperationContext* opCtx,
opCtx, reconcileResult.indexBuildsToRestart, reconcileResult.indexBuildsToResume);
reopenAllDatabasesAndReloadCollectionCatalog(
- opCtx, catalog, storageEngine, minVisibleTimestampMap, stableTimestamp);
+ opCtx, storageEngine, minVisibleTimestampMap, stableTimestamp);
}
void openCatalogAfterStorageChange(OperationContext* opCtx) {
invariant(opCtx->lockState()->isW());
auto storageEngine = opCtx->getServiceContext()->getStorageEngine();
- auto catalog = CollectionCatalog::get(opCtx);
- reopenAllDatabasesAndReloadCollectionCatalog(opCtx, catalog, storageEngine, {}, {});
+ reopenAllDatabasesAndReloadCollectionCatalog(opCtx, storageEngine, {}, {});
}
} // namespace catalog
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index f4799a02f81..425b02dc9d5 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -826,7 +826,7 @@ public:
* Called by catalog::openCatalog() to re-establish the oplog collection pointer while holding
* onto the global lock in exclusive mode.
*/
- virtual void establishOplogCollectionForLogging(OperationContext* opCtx) = 0;
+ virtual void establishOplogCollectionForLogging(OperationContext* opCtx) const = 0;
/**
* Called when this Collection is deregistered from the catalog
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index 81d8f6fc616..d052293d23a 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -349,6 +349,16 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan
public:
static constexpr size_t kNumStaticActions = 2;
+ static void setCollectionInCatalog(CollectionCatalog& catalog,
+ std::shared_ptr<Collection> collection) {
+ catalog._collections[collection->ns()] = collection;
+ catalog._catalog[collection->uuid()] = collection;
+ // TODO SERVER-64608 Use tenantID from ns
+ auto dbIdPair = std::make_pair(TenantDatabaseName(boost::none, collection->ns().db()),
+ collection->uuid());
+ catalog._orderedCollections[dbIdPair] = collection;
+ }
+
PublishCatalogUpdates(OperationContext* opCtx,
UncommittedCatalogUpdates& uncommittedCatalogUpdates)
: _opCtx(opCtx), _uncommittedCatalogUpdates(uncommittedCatalogUpdates) {}
@@ -371,16 +381,10 @@ public:
for (auto&& entry : entries) {
switch (entry.action) {
case UncommittedCatalogUpdates::Entry::Action::kWritableCollection:
- writeJobs.push_back([collection = std::move(entry.collection)](
- CollectionCatalog& catalog) {
- catalog._collections[collection->ns()] = collection;
- catalog._catalog[collection->uuid()] = collection;
- // TODO SERVER-64608 Use tenantID from ns
- auto dbIdPair =
- std::make_pair(TenantDatabaseName(boost::none, collection->ns().db()),
- collection->uuid());
- catalog._orderedCollections[dbIdPair] = collection;
- });
+ writeJobs.push_back(
+ [collection = std::move(entry.collection)](CollectionCatalog& catalog) {
+ setCollectionInCatalog(catalog, std::move(collection));
+ });
break;
case UncommittedCatalogUpdates::Entry::Action::kRenamedCollection:
writeJobs.push_back(
@@ -484,10 +488,9 @@ CollectionCatalog::iterator::value_type CollectionCatalog::iterator::operator*()
return {_opCtx, _mapIter->second.get(), LookupCollectionForYieldRestore()};
}
-Collection* CollectionCatalog::iterator::getWritableCollection(OperationContext* opCtx,
- LifetimeMode mode) {
+Collection* CollectionCatalog::iterator::getWritableCollection(OperationContext* opCtx) {
return CollectionCatalog::get(opCtx)->lookupCollectionByUUIDForMetadataWrite(
- opCtx, mode, operator*()->uuid());
+ opCtx, operator*()->uuid());
}
boost::optional<UUID> CollectionCatalog::iterator::uuid() {
@@ -936,12 +939,7 @@ std::shared_ptr<const Collection> CollectionCatalog::lookupCollectionByUUIDForRe
}
Collection* CollectionCatalog::lookupCollectionByUUIDForMetadataWrite(OperationContext* opCtx,
- LifetimeMode mode,
const UUID& uuid) const {
- if (mode == LifetimeMode::kInplace) {
- return const_cast<Collection*>(lookupCollectionByUUID(opCtx, uuid).get());
- }
-
auto& uncommittedCatalogUpdates = UncommittedCatalogUpdates::get(opCtx);
auto [found, uncommittedPtr] = uncommittedCatalogUpdates.lookupCollection(uuid);
// If UUID is managed by uncommittedCatalogUpdates return the pointer which will be nullptr in
@@ -965,8 +963,25 @@ Collection* CollectionCatalog::lookupCollectionByUUIDForMetadataWrite(OperationC
return coll.get();
invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_X));
+
+ // Skip cloning and return directly if allowed.
+ if (_alreadyClonedForBatchedWriter(coll)) {
+ return coll.get();
+ }
+
auto cloned = coll->clone();
auto ptr = cloned.get();
+
+ // If we are in a batch write, set this Collection instance in the batched catalog write
+ // instance. We don't want to store as uncommitted in this case as we need to observe the write
+ // on the thread doing the batch write and it would trigger the regular path where we do a
+ // copy-on-write on the catalog when committing.
+ if (_isCatalogBatchWriter()) {
+ PublishCatalogUpdates::setCollectionInCatalog(*batchedCatalogWriteInstance,
+ std::move(cloned));
+ return ptr;
+ }
+
uncommittedCatalogUpdates.writableCollection(std::move(cloned));
PublishCatalogUpdates::ensureRegisteredWithRecoveryUnit(opCtx, uncommittedCatalogUpdates);
@@ -1032,8 +1047,10 @@ std::shared_ptr<const Collection> CollectionCatalog::lookupCollectionByNamespace
}
Collection* CollectionCatalog::lookupCollectionByNamespaceForMetadataWrite(
- OperationContext* opCtx, LifetimeMode mode, const NamespaceString& nss) const {
- if (mode == LifetimeMode::kInplace || nss.isOplog()) {
+ OperationContext* opCtx, const NamespaceString& nss) const {
+ // Oplog is special and can only be modified in a few contexts. It is modified inplace and care
+ // need to be taken for concurrency.
+ if (nss.isOplog()) {
return const_cast<Collection*>(lookupCollectionByNamespace(opCtx, nss).get());
}
@@ -1064,8 +1081,25 @@ Collection* CollectionCatalog::lookupCollectionByNamespaceForMetadataWrite(
return nullptr;
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
+
+ // Skip cloning and return directly if allowed.
+ if (_alreadyClonedForBatchedWriter(coll)) {
+ return coll.get();
+ }
+
auto cloned = coll->clone();
auto ptr = cloned.get();
+
+ // If we are in a batch write, set this Collection instance in the batched catalog write
+ // instance. We don't want to store as uncommitted in this case as we need to observe the write
+ // on the thread doing the batch write and it would trigger the regular path where we do a
+ // copy-on-write on the catalog when committing.
+ if (_isCatalogBatchWriter()) {
+ PublishCatalogUpdates::setCollectionInCatalog(*batchedCatalogWriteInstance,
+ std::move(cloned));
+ return ptr;
+ }
+
uncommittedCatalogUpdates.writableCollection(std::move(cloned));
PublishCatalogUpdates::ensureRegisteredWithRecoveryUnit(opCtx, uncommittedCatalogUpdates);
@@ -1669,6 +1703,25 @@ Status CollectionCatalog::_createOrUpdateView(
return res;
}
+
+bool CollectionCatalog::_isCatalogBatchWriter() const {
+ return batchedCatalogWriteInstance.get() == this;
+}
+
+bool CollectionCatalog::_alreadyClonedForBatchedWriter(
+ const std::shared_ptr<Collection>& collection) const {
+ // We may skip cloning the Collection instance if and only if we are currently in a batched
+ // catalog write and all references to this Collection is owned by the cloned CollectionCatalog
+ // instance owned by the batch writer. i.e. the Collection is uniquely owned by the batch
+ // writer. When the batch writer initially clones the catalog, all collections will have a
+ // 'use_count' of at least kNumCollectionReferencesStored*2 (because there are at least 2
+ // catalog instances). To check for uniquely owned we need to check that the reference count is
+ // exactly kNumCollectionReferencesStored (owned by a single catalog) while also account for the
+ // instance that is extracted from the catalog and provided as a parameter to this function, we
+ // therefore need to add 1.
+ return _isCatalogBatchWriter() && collection.use_count() == kNumCollectionReferencesStored + 1;
+}
+
CollectionCatalogStasher::CollectionCatalogStasher(OperationContext* opCtx)
: _opCtx(opCtx), _stashed(false) {}
@@ -1736,9 +1789,11 @@ BatchedCollectionCatalogWriter::BatchedCollectionCatalogWriter(OperationContext*
// copy the collection catalog, this could be expensive, store it for future writes during this
// batcher
batchedCatalogWriteInstance = std::make_shared<CollectionCatalog>(*_base);
+ _batchedInstance = batchedCatalogWriteInstance.get();
}
BatchedCollectionCatalogWriter::~BatchedCollectionCatalogWriter() {
invariant(_opCtx->lockState()->isW());
+ invariant(_batchedInstance == batchedCatalogWriteInstance.get());
// Publish out batched instance, validate that no other writers have been able to write during
// the batcher.
@@ -1747,6 +1802,7 @@ BatchedCollectionCatalogWriter::~BatchedCollectionCatalogWriter() {
atomic_compare_exchange_strong(&storage.catalog, &_base, batchedCatalogWriteInstance));
// Clear out batched pointer so no more attempts of batching are made
+ _batchedInstance = nullptr;
batchedCatalogWriteInstance = nullptr;
}
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index 9bb23cfa5a8..2f43bd1eff3 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -55,19 +55,10 @@ public:
using CollectionInfoFn = std::function<bool(const CollectionPtr& collection)>;
using ViewIteratorCallback = std::function<bool(const ViewDefinition& view)>;
- /**
- * Defines lifetime and behavior of writable Collections.
- */
- enum class LifetimeMode {
- // Lifetime of writable Collection is managed by an active write unit of work. The writable
- // collection is installed in the catalog during commit and discarded on rollback.
- kManagedInWriteUnitOfWork,
-
- // Inplace writable access to the Collection currently installed in the catalog. This is
- // only safe when the server is in a state where there can be no concurrent readers. Does
- // not require an active write unit of work.
- kInplace
- };
+ // Number of how many Collection references for a single Collection that is stored in the
+ // catalog. Used to determine whether there are external references (uniquely owned). Needs to
+ // be kept in sync with the data structures below.
+ static constexpr size_t kNumCollectionReferencesStored = 3;
class iterator {
public:
@@ -85,7 +76,7 @@ public:
iterator operator++(int);
boost::optional<UUID> uuid();
- Collection* getWritableCollection(OperationContext* opCtx, LifetimeMode mode);
+ Collection* getWritableCollection(OperationContext* opCtx);
/*
* Equality operators == and != do not attempt to reposition the iterators being compared.
@@ -287,7 +278,6 @@ public:
* Returns nullptr if the 'uuid' is not known.
*/
Collection* lookupCollectionByUUIDForMetadataWrite(OperationContext* opCtx,
- LifetimeMode mode,
const UUID& uuid) const;
CollectionPtr lookupCollectionByUUID(OperationContext* opCtx, UUID uuid) const;
std::shared_ptr<const Collection> lookupCollectionByUUIDForRead(OperationContext* opCtx,
@@ -316,7 +306,6 @@ public:
* Returns nullptr if the namespace is unknown.
*/
Collection* lookupCollectionByNamespaceForMetadataWrite(OperationContext* opCtx,
- LifetimeMode mode,
const NamespaceString& nss) const;
CollectionPtr lookupCollectionByNamespace(OperationContext* opCtx,
const NamespaceString& nss) const;
@@ -550,6 +539,18 @@ private:
ViewsForDatabase&& viewsForDb) const;
/**
+ * Returns true if this CollectionCatalog instance is part of an ongoing batched catalog write.
+ */
+ bool _isCatalogBatchWriter() const;
+
+ /**
+ * Returns true if we can saftely skip performing copy-on-write on the provided collection
+ * instance.
+ */
+ bool _alreadyClonedForBatchedWriter(const std::shared_ptr<Collection>& collection) const;
+
+
+ /**
* Throws 'WriteConflictException' if given namespace is already registered with the catalog, as
* either a view or collection. In the case of an collection drop (by the calling thread) that
* has not been committed yet, it will not throw, but it will return
@@ -678,11 +679,16 @@ public:
BatchedCollectionCatalogWriter(const BatchedCollectionCatalogWriter&) = delete;
BatchedCollectionCatalogWriter(BatchedCollectionCatalogWriter&&) = delete;
+ const CollectionCatalog* operator->() const {
+ return _batchedInstance;
+ }
+
private:
OperationContext* _opCtx;
// Store base when we clone the CollectionCatalog so we can verify that there has been no other
// writers during the batching.
std::shared_ptr<CollectionCatalog> _base = nullptr;
+ const CollectionCatalog* _batchedInstance = nullptr;
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 82e67d758b6..49b0551de40 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -71,7 +71,12 @@ public:
std::shared_ptr<Collection> collection = std::make_shared<CollectionMock>(nss);
col = CollectionPtr(collection.get(), CollectionPtr::NoYieldTag{});
// Register dummy collection in catalog.
- catalog.registerCollection(opCtx.get(), colUUID, std::move(collection));
+ catalog.registerCollection(opCtx.get(), colUUID, collection);
+
+ // Validate that kNumCollectionReferencesStored is correct, add one reference for the one we
+ // hold in this function.
+ ASSERT_EQUALS(collection.use_count(),
+ CollectionCatalog::kNumCollectionReferencesStored + 1);
}
protected:
@@ -99,6 +104,7 @@ public:
dbMap["foo"].insert(std::make_pair(fooUuid, fooColl.get()));
dbMap["bar"].insert(std::make_pair(barUuid, barColl.get()));
+
catalog.registerCollection(&opCtx, fooUuid, fooColl);
catalog.registerCollection(&opCtx, barUuid, barColl);
}
@@ -620,9 +626,10 @@ TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNamesWithUncommitt
catalog.registerCollection(opCtx.get(), uuid, std::move(newColl));
}
- // One dbName with only an invisible collection does not appear in dbNames.
- auto invisibleCollA = catalog.lookupCollectionByNamespaceForMetadataWrite(
- opCtx.get(), CollectionCatalog::LifetimeMode::kInplace, aColl);
+ // One dbName with only an invisible collection does not appear in dbNames. Use const_cast to
+ // modify the collection in the catalog inplace, this bypasses copy-on-write behavior.
+ auto invisibleCollA =
+ const_cast<Collection*>(catalog.lookupCollectionByNamespace(opCtx.get(), aColl).get());
invisibleCollA->setCommitted(false);
Lock::DBLock dbLock(opCtx.get(), "dbA", MODE_S);
@@ -644,8 +651,10 @@ TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNamesWithUncommitt
std::vector<NamespaceString> dCollList = dbDNss;
dCollList.erase(std::find(dCollList.begin(), dCollList.end(), nss));
- auto invisibleCollD = catalog.lookupCollectionByNamespaceForMetadataWrite(
- opCtx.get(), CollectionCatalog::LifetimeMode::kInplace, nss);
+ // Use const_cast to modify the collection in the catalog inplace, this bypasses
+ // copy-on-write behavior.
+ auto invisibleCollD =
+ const_cast<Collection*>(catalog.lookupCollectionByNamespace(opCtx.get(), nss).get());
invisibleCollD->setCommitted(false);
Lock::DBLock dbLock(opCtx.get(), "dbD", MODE_S);
@@ -662,8 +671,10 @@ TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNamesWithUncommitt
// If all dbNames consist only of invisible collections, none of these dbs is visible.
for (auto& nss : nsss) {
- auto invisibleColl = catalog.lookupCollectionByNamespaceForMetadataWrite(
- opCtx.get(), CollectionCatalog::LifetimeMode::kInplace, nss);
+ // Use const_cast to modify the collection in the catalog inplace, this bypasses
+ // copy-on-write behavior.
+ auto invisibleColl =
+ const_cast<Collection*>(catalog.lookupCollectionByNamespace(opCtx.get(), nss).get());
invisibleColl->setCommitted(false);
}
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index df58fce5962..135a086fa8b 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -2030,8 +2030,8 @@ void CollectionImpl::indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntr
_indexCatalog->indexBuildSuccess(opCtx, this, index);
}
-void CollectionImpl::establishOplogCollectionForLogging(OperationContext* opCtx) {
- repl::establishOplogCollectionForLogging(opCtx, this);
+void CollectionImpl::establishOplogCollectionForLogging(OperationContext* opCtx) const {
+ repl::establishOplogCollectionForLogging(opCtx, {this, CollectionPtr::NoYieldTag{}});
}
StatusWith<int> CollectionImpl::checkMetaDataForIndex(const std::string& indexName,
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index e763250c6b4..d3379568d22 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -428,7 +428,7 @@ public:
void indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntry* index) final;
- void establishOplogCollectionForLogging(OperationContext* opCtx) final;
+ void establishOplogCollectionForLogging(OperationContext* opCtx) const final;
void onDeregisterFromCatalog(OperationContext* opCtx) final;
StatusWith<int> checkMetaDataForIndex(const std::string& indexName,
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index 46fb5ac47d3..303026b8dce 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -374,7 +374,7 @@ public:
std::abort();
}
- void establishOplogCollectionForLogging(OperationContext* opCtx) {
+ void establishOplogCollectionForLogging(OperationContext* opCtx) const {
std::abort();
}
diff --git a/src/mongo/db/catalog/collection_writer_test.cpp b/src/mongo/db/catalog/collection_writer_test.cpp
index 13214b1f738..1f828f38800 100644
--- a/src/mongo/db/catalog/collection_writer_test.cpp
+++ b/src/mongo/db/catalog/collection_writer_test.cpp
@@ -88,20 +88,6 @@ protected:
const NamespaceString kNss{"testdb", "testcol"};
};
-TEST_F(CollectionWriterTest, Inplace) {
- CollectionWriter writer(operationContext(), kNss, CollectionCatalog::LifetimeMode::kInplace);
-
- // CollectionWriter in Inplace mode should operate directly on the Collection instance stored in
- // the catalog. So no Collection copy should be made.
- ASSERT_EQ(writer.get(), lookupCollectionFromCatalog());
- ASSERT_EQ(lookupCollectionFromCatalog().get(), lookupCollectionFromCatalogForRead());
-
-
- auto writable = writer.getWritableCollection();
- ASSERT_EQ(writable, lookupCollectionFromCatalog().get());
- ASSERT_EQ(lookupCollectionFromCatalog().get(), lookupCollectionFromCatalogForRead());
-}
-
TEST_F(CollectionWriterTest, Commit) {
CollectionWriter writer(operationContext(), kNss);
@@ -306,4 +292,36 @@ TEST_F(CatalogReadCopyUpdateTest, ConcurrentCatalogWriteBatchingMayThrow) {
}
}
+class BatchedCollectionCatalogWriterTest : public CollectionWriterTest {
+public:
+ Collection* lookupCollectionFromCatalogForMetadataWrite() {
+ return CollectionCatalog::get(operationContext())
+ ->lookupCollectionByNamespaceForMetadataWrite(operationContext(), kNss);
+ }
+};
+
+TEST_F(BatchedCollectionCatalogWriterTest, BatchedTest) {
+
+ const Collection* before = lookupCollectionFromCatalogForRead();
+ const Collection* after = nullptr;
+ {
+ Lock::GlobalWrite lock(operationContext());
+ BatchedCollectionCatalogWriter batched(operationContext());
+
+ // We should get a unique clone the first time we request a writable collection
+ Collection* firstWritable = lookupCollectionFromCatalogForMetadataWrite();
+ ASSERT_NE(firstWritable, before);
+
+ // Subsequent requests should return the same instance.
+ Collection* secondWritable = lookupCollectionFromCatalogForMetadataWrite();
+ ASSERT_EQ(secondWritable, firstWritable);
+
+ after = firstWritable;
+ }
+
+ // When the batched writer commits our collection instance should be replaced.
+ ASSERT_NE(lookupCollectionFromCatalogForRead(), before);
+ ASSERT_EQ(lookupCollectionFromCatalogForRead(), after);
+}
+
} // namespace
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 76371a65f80..7581deb2bff 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -169,15 +169,13 @@ Status DatabaseImpl::init(OperationContext* const opCtx) {
auto catalog = CollectionCatalog::get(opCtx);
for (const auto& uuid : catalog->getAllCollectionUUIDsFromDb(_name)) {
- CollectionWriter collection(
- opCtx,
- uuid,
- opCtx->lockState()->isW() ? CollectionCatalog::LifetimeMode::kInplace
- : CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork);
+ CollectionWriter collection(opCtx, uuid);
invariant(collection);
// If this is called from the repair path, the collection is already initialized.
if (!collection->isInitialized()) {
+ WriteUnitOfWork wuow(opCtx);
collection.getWritableCollection()->init(opCtx);
+ wuow.commit();
}
}