summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2020-08-31 15:39:53 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-09-15 14:28:38 +0000
commitaccc7e7cd7e7a984347361d03ee76514c4a54163 (patch)
treed01b140af8d8609d19ed1b5f5126511bc06377e3 /src/mongo/db
parentb3f2165c7cf7d2e4d098f86eaf8bfa3b87b683a1 (diff)
downloadmongo-accc7e7cd7e7a984347361d03ee76514c4a54163.tar.gz
SERVER-50349 Getting a writable collection now requires the caller to be inside a WUOW by default
There are three modes when accessing a writable Collection: * Managed in WUOW (default) * Unmanaged (users need to commit/rollback) * Inplace that provides direct access to the Collection in the catalog. (Users need to ensure there's no concurrent operations going on) Added a helper RAII type CollectionWriter that abstract the three modes above and also provides abstraction on different methods of accessing Collections (AutoGetCollection or manual lookups). Writable Collection is aquired lazily when needed (usually inside a WUOW).
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/SConscript1
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp6
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp4
-rw-r--r--src/mongo/db/catalog/collection.h2
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp56
-rw-r--r--src/mongo/db/catalog/collection_catalog.h32
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp9
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp4
-rw-r--r--src/mongo/db/catalog/collection_impl.h2
-rw-r--r--src/mongo/db/catalog/collection_mock.h2
-rw-r--r--src/mongo/db/catalog/database_impl.cpp24
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp48
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp13
-rw-r--r--src/mongo/db/catalog/index_builds_manager.h9
-rw-r--r--src/mongo/db/catalog/index_builds_manager_test.cpp16
-rw-r--r--src/mongo/db/catalog/index_signature_test.cpp8
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp18
-rw-r--r--src/mongo/db/catalog/multi_index_block.h7
-rw-r--r--src/mongo/db/catalog/multi_index_block_test.cpp44
-rw-r--r--src/mongo/db/catalog_raii.cpp150
-rw-r--r--src/mongo/db/catalog_raii.h90
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp19
-rw-r--r--src/mongo/db/commands/test_commands.cpp5
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp87
-rw-r--r--src/mongo/db/index_builds_coordinator.h2
-rw-r--r--src/mongo/db/repair.cpp18
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp55
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.h3
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp39
-rw-r--r--src/mongo/db/startup_recovery.cpp11
31 files changed, 546 insertions, 242 deletions
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index 0c54bb95887..dc247376302 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -229,6 +229,7 @@ env.Library(
LIBDEPS=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/audit',
+ '$BUILD_DIR/mongo/db/catalog_raii',
'$BUILD_DIR/mongo/db/concurrency/write_conflict_exception',
'$BUILD_DIR/mongo/db/curop',
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 81c9661c819..a96cb39b739 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -71,9 +71,7 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
Database* db = autoDb.getDb();
uassert(ErrorCodes::NamespaceNotFound, "no such database", db);
- Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(opCtx,
- collectionName);
+ CollectionWriter collection(opCtx, collectionName);
uassert(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "emptycapped not supported on view: " << collectionName.ns(),
collection || !ViewCatalog::get(db)->lookup(opCtx, collectionName.ns()));
@@ -96,7 +94,7 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
WriteUnitOfWork wuow(opCtx);
- Status status = collection->truncate(opCtx);
+ Status status = collection.getWritableCollection()->truncate(opCtx);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index e12b489f558..a81bf952f4c 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -190,8 +190,8 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
CollectionCatalog::get(opCtx).getAllCollectionNamesFromDb(opCtx, dbName)) {
// Note that the collection name already includes the database component.
auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(opCtx,
- collNss);
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(
+ opCtx, CollectionCatalog::LifetimeMode::kInplace, collNss);
invariant(collection,
str::stream()
<< "failed to get valid collection pointer for namespace " << collNss);
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 9914a63f76c..53d2b9900b7 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -438,7 +438,7 @@ public:
*/
virtual void cappedTruncateAfter(OperationContext* const opCtx,
RecordId end,
- const bool inclusive) = 0;
+ const bool inclusive) const = 0;
/**
* Returns a non-ok Status if validator is not legal for this collection.
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index 35f307e1e5a..34444d2a45f 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -103,6 +103,12 @@ CollectionCatalog::iterator::value_type CollectionCatalog::iterator::operator*()
return _mapIter->second.get();
}
+Collection* CollectionCatalog::iterator::getWritableCollection(OperationContext* opCtx,
+ LifetimeMode mode) {
+ return CollectionCatalog::get(opCtx).lookupCollectionByUUIDForMetadataWrite(
+ opCtx, mode, operator*()->uuid());
+}
+
boost::optional<CollectionUUID> CollectionCatalog::iterator::uuid() {
return _uuid;
}
@@ -269,8 +275,26 @@ std::shared_ptr<const Collection> CollectionCatalog::lookupCollectionByUUIDForRe
}
Collection* CollectionCatalog::lookupCollectionByUUIDForMetadataWrite(OperationContext* opCtx,
+ LifetimeMode mode,
CollectionUUID uuid) {
- return const_cast<Collection*>(lookupCollectionByUUID(opCtx, uuid));
+ if (mode == LifetimeMode::kManagedInWriteUnitOfWork) {
+ // Placeholder to invariant if not in wuow
+ opCtx->recoveryUnit()->onCommit([](boost::optional<Timestamp>) {});
+ }
+
+ if (auto coll = UncommittedCollections::getForTxn(opCtx, uuid)) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_IX));
+ return coll.get();
+ }
+
+ stdx::lock_guard<Latch> lock(_catalogLock);
+ auto coll = _lookupCollectionByUUID(lock, uuid);
+ if (coll && coll->isCommitted()) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_X));
+ return coll.get();
+ }
+
+ return nullptr;
}
const Collection* CollectionCatalog::lookupCollectionByUUID(OperationContext* opCtx,
@@ -315,8 +339,26 @@ std::shared_ptr<const Collection> CollectionCatalog::lookupCollectionByNamespace
}
Collection* CollectionCatalog::lookupCollectionByNamespaceForMetadataWrite(
- OperationContext* opCtx, const NamespaceString& nss) {
- return const_cast<Collection*>(lookupCollectionByNamespace(opCtx, nss));
+ OperationContext* opCtx, LifetimeMode mode, const NamespaceString& nss) {
+ if (mode == LifetimeMode::kManagedInWriteUnitOfWork) {
+ // Placeholder to invariant if not in wuow
+ opCtx->recoveryUnit()->onCommit([](boost::optional<Timestamp>) {});
+ }
+
+ if (auto coll = UncommittedCollections::getForTxn(opCtx, nss)) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX));
+ return coll.get();
+ }
+
+ stdx::lock_guard<Latch> lock(_catalogLock);
+ auto it = _collections.find(nss);
+ auto coll = (it == _collections.end() ? nullptr : it->second);
+ if (coll && coll->isCommitted()) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
+ return coll.get();
+ }
+
+ return nullptr;
}
const Collection* CollectionCatalog::lookupCollectionByNamespace(OperationContext* opCtx,
@@ -649,4 +691,12 @@ void CollectionCatalog::addResource(const ResourceId& rid, const std::string& en
namespaces.insert(entry);
}
+void CollectionCatalog::commitUnmanagedClone(Collection* collection) {
+ // TODO SERVER-50145
+}
+
+void CollectionCatalog::discardUnmanagedClone(Collection* collection) {
+ // TODO SERVER-50145
+}
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index f9ad25938f9..32afe461f04 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -57,9 +57,23 @@ class CollectionCatalog {
public:
using CollectionInfoFn = std::function<bool(const Collection* collection)>;
+ enum class LifetimeMode {
+ // Lifetime of writable Collection is managed by an active write unit of work. The writable
+ // collection is installed in the catalog during commit.
+ kManagedInWriteUnitOfWork,
+
+ // Unmanaged writable Collection usable outside of write unit of work. Users need to commit
+ // the Collection to the catalog.
+ kUnmanagedClone,
+
+ // Inplace writable access to the Collection currently installed in the catalog. This is
+ // only safe when the server is in a state where there can be no concurrent readers.
+ kInplace
+ };
+
class iterator {
public:
- using value_type = Collection*;
+ using value_type = const Collection*;
iterator(StringData dbName, uint64_t genNum, const CollectionCatalog& catalog);
iterator(std::map<std::pair<std::string, CollectionUUID>,
@@ -69,6 +83,8 @@ public:
iterator operator++(int);
boost::optional<CollectionUUID> uuid();
+ Collection* getWritableCollection(OperationContext* opCtx, LifetimeMode mode);
+
/*
* Equality operators == and != do not attempt to reposition the iterators being compared.
* The behavior for comparing invalid iterators is undefined.
@@ -164,6 +180,7 @@ public:
* Returns nullptr if the 'uuid' is not known.
*/
Collection* lookupCollectionByUUIDForMetadataWrite(OperationContext* opCtx,
+ LifetimeMode mode,
CollectionUUID uuid);
const Collection* lookupCollectionByUUID(OperationContext* opCtx, CollectionUUID uuid) const;
std::shared_ptr<const Collection> lookupCollectionByUUIDForRead(OperationContext* opCtx,
@@ -186,6 +203,7 @@ public:
* Returns nullptr if the namespace is unknown.
*/
Collection* lookupCollectionByNamespaceForMetadataWrite(OperationContext* opCtx,
+ LifetimeMode mode,
const NamespaceString& nss);
const Collection* lookupCollectionByNamespace(OperationContext* opCtx,
const NamespaceString& nss) const;
@@ -326,6 +344,18 @@ public:
*/
void addResource(const ResourceId& rid, const std::string& entry);
+ /**
+ * Commit unmanaged Collection that was acquired by lookupCollectionBy***ForMetadataWrite and
+ * lifetime mode kUnmanagedClone.
+ */
+ void commitUnmanagedClone(Collection* collection);
+
+ /**
+ * Discard unmanaged Collection that was acquired by lookupCollectionBy***ForMetadataWrite and
+ * lifetime mode kUnmanagedClone.
+ */
+ void discardUnmanagedClone(Collection* collection);
+
private:
friend class CollectionCatalog::iterator;
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 3e5f1e906e9..5ac89ed76a8 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -655,7 +655,8 @@ TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNamesWithUncommitt
}
// One dbName with only an invisible collection does not appear in dbNames.
- auto invisibleCollA = catalog.lookupCollectionByNamespaceForMetadataWrite(&opCtx, aColl);
+ auto invisibleCollA = catalog.lookupCollectionByNamespaceForMetadataWrite(
+ &opCtx, CollectionCatalog::LifetimeMode::kInplace, aColl);
invisibleCollA->setCommitted(false);
auto res = catalog.getAllCollectionNamesFromDb(&opCtx, "dbA");
@@ -672,7 +673,8 @@ TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNamesWithUncommitt
std::vector<NamespaceString> dCollList = dbDNss;
dCollList.erase(std::find(dCollList.begin(), dCollList.end(), nss));
- auto invisibleCollD = catalog.lookupCollectionByNamespaceForMetadataWrite(&opCtx, nss);
+ auto invisibleCollD = catalog.lookupCollectionByNamespaceForMetadataWrite(
+ &opCtx, CollectionCatalog::LifetimeMode::kInplace, nss);
invisibleCollD->setCommitted(false);
res = catalog.getAllCollectionNamesFromDb(&opCtx, "dbD");
@@ -687,7 +689,8 @@ TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNamesWithUncommitt
// If all dbNames consist only of invisible collections, none of these dbs is visible.
for (auto& nss : nsss) {
- auto invisibleColl = catalog.lookupCollectionByNamespaceForMetadataWrite(&opCtx, nss);
+ auto invisibleColl = catalog.lookupCollectionByNamespaceForMetadataWrite(
+ &opCtx, CollectionCatalog::LifetimeMode::kInplace, nss);
invisibleColl->setCommitted(false);
}
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index f30be055410..f6eb83db709 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -1027,7 +1027,9 @@ Status CollectionImpl::truncate(OperationContext* opCtx) {
return Status::OK();
}
-void CollectionImpl::cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
+void CollectionImpl::cappedTruncateAfter(OperationContext* opCtx,
+ RecordId end,
+ bool inclusive) const {
dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
invariant(isCapped());
invariant(_indexCatalog->numIndexesInProgress(opCtx) == 0);
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index 41ed1815a90..7f5c03cb018 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -237,7 +237,7 @@ public:
* The caller should hold a collection X lock and ensure there are no index builds in progress
* on the collection.
*/
- void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) final;
+ void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) const final;
/**
* Returns a non-ok Status if validator is not legal for this collection.
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index 0362503e236..23220322696 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -170,7 +170,7 @@ public:
std::abort();
}
- void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
+ void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) const {
std::abort();
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index beec4fb751a..97be806b2a3 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -172,7 +172,8 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
auto& catalog = CollectionCatalog::get(opCtx);
for (const auto& uuid : catalog.getAllCollectionUUIDsFromDb(_name)) {
- auto collection = catalog.lookupCollectionByUUIDForMetadataWrite(opCtx, uuid);
+ auto collection = catalog.lookupCollectionByUUIDForMetadataWrite(
+ opCtx, CollectionCatalog::LifetimeMode::kInplace, uuid);
invariant(collection);
// If this is called from the repair path, the collection is already initialized.
if (!collection->isInitialized())
@@ -360,8 +361,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
"dropCollection() cannot accept a valid drop optime when writes are replicated.");
}
- Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(opCtx, nss);
+ CollectionWriter collection(opCtx, nss);
if (!collection) {
return Status::OK(); // Post condition already met.
@@ -390,10 +390,10 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
auto opObserver = serviceContext->getOpObserver();
auto isOplogDisabledForNamespace = replCoord->isOplogDisabledFor(opCtx, nss);
if (dropOpTime.isNull() && isOplogDisabledForNamespace) {
- _dropCollectionIndexes(opCtx, nss, collection);
+ _dropCollectionIndexes(opCtx, nss, collection.getWritableCollection());
opObserver->onDropCollection(
opCtx, nss, uuid, numRecords, OpObserver::CollectionDropType::kOnePhase);
- return _finishDropCollection(opCtx, nss, collection);
+ return _finishDropCollection(opCtx, nss, collection.get());
}
// Replicated collections should be dropped in two phases.
@@ -402,7 +402,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
// storage engine and will no longer be visible at the catalog layer with 3.6-style
// <db>.system.drop.* namespaces.
if (serviceContext->getStorageEngine()->supportsPendingDrops()) {
- _dropCollectionIndexes(opCtx, nss, collection);
+ _dropCollectionIndexes(opCtx, nss, collection.getWritableCollection());
auto commitTimestamp = opCtx->recoveryUnit()->getCommitTimestamp();
LOGV2(20314,
@@ -430,7 +430,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
str::stream() << "OpTime is not null. OpTime: " << opTime.toString());
}
- return _finishDropCollection(opCtx, nss, collection);
+ return _finishDropCollection(opCtx, nss, collection.get());
}
// Old two-phase drop: Replicated collections will be renamed with a special drop-pending
@@ -527,8 +527,7 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
<< "' because the destination namespace already exists");
}
- Collection* collToRename =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(opCtx, fromNss);
+ CollectionWriter collToRename(opCtx, fromNss);
if (!collToRename) {
return Status(ErrorCodes::NamespaceNotFound, "collection not found to rename");
}
@@ -550,12 +549,13 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
// Set the namespace of 'collToRename' from within the CollectionCatalog. This is necessary
// because the CollectionCatalog mutex synchronizes concurrent access to the collection's
// namespace for callers that may not hold a collection lock.
- CollectionCatalog::get(opCtx).setCollectionNamespace(opCtx, collToRename, fromNss, toNss);
+ auto writableCollection = collToRename.getWritableCollection();
+ CollectionCatalog::get(opCtx).setCollectionNamespace(opCtx, writableCollection, fromNss, toNss);
- opCtx->recoveryUnit()->onCommit([collToRename](auto commitTime) {
+ opCtx->recoveryUnit()->onCommit([writableCollection](auto commitTime) {
// Ban reading from this collection on committed reads on snapshots before now.
if (commitTime) {
- collToRename->setMinimumVisibleSnapshot(commitTime.get());
+ writableCollection->setMinimumVisibleSnapshot(commitTime.get());
}
});
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 08def9db579..bf684381922 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -320,20 +320,19 @@ Status dropIndexes(OperationContext* opCtx,
BSONObjBuilder* result) {
// We only need to hold an intent lock to send abort signals to the active index builder(s) we
// intend to abort.
- boost::optional<AutoGetCollection> autoColl;
- autoColl.emplace(opCtx, nss, MODE_IX);
+ boost::optional<AutoGetCollection> collection;
+ collection.emplace(opCtx, nss, MODE_IX);
- Database* db = autoColl->getDb();
- Collection* collection = autoColl->getWritableCollection();
- Status status = checkView(opCtx, nss, db, collection);
+ Database* db = collection->getDb();
+ Status status = checkView(opCtx, nss, db, collection->getCollection());
if (!status.isOK()) {
return status;
}
- const UUID collectionUUID = collection->uuid();
+ const UUID collectionUUID = (*collection)->uuid();
const NamespaceStringOrUUID dbAndUUID = {nss.db().toString(), collectionUUID};
- status = checkReplState(opCtx, dbAndUUID, collection);
+ status = checkReplState(opCtx, dbAndUUID, collection->getCollection());
if (!status.isOK()) {
return status;
}
@@ -346,7 +345,7 @@ Status dropIndexes(OperationContext* opCtx,
"indexes"_attr = cmdObj[kIndexFieldName].toString(false));
}
- result->appendNumber("nIndexesWas", collection->getIndexCatalog()->numIndexesTotal(opCtx));
+ result->appendNumber("nIndexesWas", (*collection)->getIndexCatalog()->numIndexesTotal(opCtx));
// Validate basic user input.
BSONElement indexElem = cmdObj.getField(kIndexFieldName);
@@ -358,7 +357,7 @@ Status dropIndexes(OperationContext* opCtx,
if (indexNameElem.type() != String) {
return Status(ErrorCodes::TypeMismatch,
str::stream()
- << "dropIndexes " << collection->ns() << " (" << collectionUUID
+ << "dropIndexes " << (*collection)->ns() << " (" << collectionUUID
<< ") failed to drop multiple indexes "
<< indexElem.toString(false) << ": index name must be a string");
}
@@ -373,7 +372,7 @@ Status dropIndexes(OperationContext* opCtx,
std::vector<UUID> abortedIndexBuilders;
std::vector<std::string> indexNames;
while (true) {
- auto swIndexNames = getIndexNames(opCtx, collection, indexElem);
+ auto swIndexNames = getIndexNames(opCtx, collection->getCollection(), indexElem);
if (!swIndexNames.isOK()) {
return swIndexNames.getStatus();
}
@@ -381,11 +380,11 @@ Status dropIndexes(OperationContext* opCtx,
indexNames = swIndexNames.getValue();
// Copy the namespace and UUID before dropping locks.
- auto collUUID = collection->uuid();
- auto collNs = collection->ns();
+ auto collUUID = (*collection)->uuid();
+ auto collNs = (*collection)->ns();
// Release locks before aborting index builds. The helper will acquire locks on our behalf.
- autoColl = boost::none;
+ collection = boost::none;
// Send the abort signal to any index builders that match the users request. Waits until all
// aborted builders complete.
@@ -400,22 +399,21 @@ Status dropIndexes(OperationContext* opCtx,
// Take an exclusive lock on the collection now to be able to perform index catalog writes
// when removing ready indexes from disk.
- autoColl.emplace(opCtx, dbAndUUID, MODE_X);
+ collection.emplace(opCtx, dbAndUUID, MODE_X);
// Abandon the snapshot as the index catalog will compare the in-memory state to the disk
// state, which may have changed when we released the lock temporarily.
opCtx->recoveryUnit()->abandonSnapshot();
- db = autoColl->getDb();
- collection = autoColl->getWritableCollection();
- if (!collection) {
+ db = collection->getDb();
+ if (!*collection) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream()
<< "Collection '" << nss << "' with UUID " << dbAndUUID.uuid()
<< " in database " << dbAndUUID.db() << " does not exist.");
}
- status = checkReplState(opCtx, dbAndUUID, collection);
+ status = checkReplState(opCtx, dbAndUUID, collection->getCollection());
if (!status.isOK()) {
return status;
}
@@ -441,12 +439,12 @@ Status dropIndexes(OperationContext* opCtx,
WriteUnitOfWork wuow(opCtx);
// This is necessary to check shard version.
- OldClientContext ctx(opCtx, collection->ns().ns());
+ OldClientContext ctx(opCtx, (*collection)->ns().ns());
// Iterate through all the aborted indexes and drop any indexes that are ready in the
// index catalog. This would indicate that while we yielded our locks during the abort
// phase, a new identical index was created.
- auto indexCatalog = collection->getIndexCatalog();
+ auto indexCatalog = collection->getWritableCollection()->getIndexCatalog();
const bool includeUnfinished = false;
for (const auto& indexName : indexNames) {
auto desc = indexCatalog->findIndexByName(opCtx, indexName, includeUnfinished);
@@ -455,7 +453,8 @@ Status dropIndexes(OperationContext* opCtx,
continue;
}
- Status status = dropIndexByDescriptor(opCtx, collection, indexCatalog, desc);
+ Status status =
+ dropIndexByDescriptor(opCtx, collection->getCollection(), indexCatalog, desc);
if (!status.isOK()) {
return status;
}
@@ -472,7 +471,7 @@ Status dropIndexes(OperationContext* opCtx,
invariant(isWildcard);
invariant(indexNames.size() == 1);
invariant(indexNames.front() == "*");
- invariant(collection->getIndexCatalog()->numIndexesInProgress(opCtx) == 0);
+ invariant((*collection)->getIndexCatalog()->numIndexesInProgress(opCtx) == 0);
} else {
// The index catalog requires that no active index builders are running when dropping
// indexes.
@@ -484,11 +483,12 @@ Status dropIndexes(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
// This is necessary to check shard version.
- OldClientContext ctx(opCtx, collection->ns().ns());
+ OldClientContext ctx(opCtx, (*collection)->ns().ns());
// Use an empty BSONObjBuilder to avoid duplicate appends to result on retry loops.
BSONObjBuilder tempObjBuilder;
- Status status = dropReadyIndexes(opCtx, collection, indexNames, &tempObjBuilder);
+ Status status = dropReadyIndexes(
+ opCtx, collection->getWritableCollection(), indexNames, &tempObjBuilder);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 15f355bf9c0..b6d8c11c8fc 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -80,7 +80,7 @@ IndexBuildsManager::~IndexBuildsManager() {
}
Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const std::vector<BSONObj>& specs,
const UUID& buildUUID,
OnInitFn onInit,
@@ -135,7 +135,7 @@ Status IndexBuildsManager::resumeBuildingIndexFromBulkLoadPhase(OperationContext
}
StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingIndexForRecovery(
- OperationContext* opCtx, Collection* coll, const UUID& buildUUID, RepairData repair) {
+ OperationContext* opCtx, const Collection* coll, const UUID& buildUUID, RepairData repair) {
auto builder = invariant(_getBuilder(buildUUID));
// Iterate all records in the collection. Validate the records and index them
@@ -290,7 +290,7 @@ Status IndexBuildsManager::checkIndexConstraintViolations(OperationContext* opCt
}
Status IndexBuildsManager::commitIndexBuild(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const NamespaceString& nss,
const UUID& buildUUID,
MultiIndexBlock::OnCreateEachFn onCreateEachFn,
@@ -301,9 +301,10 @@ Status IndexBuildsManager::commitIndexBuild(OperationContext* opCtx,
opCtx,
"IndexBuildsManager::commitIndexBuild",
nss.ns(),
- [this, builder, buildUUID, opCtx, collection, nss, &onCreateEachFn, &onCommitFn] {
+ [this, builder, buildUUID, opCtx, &collection, nss, &onCreateEachFn, &onCommitFn] {
WriteUnitOfWork wunit(opCtx);
- auto status = builder->commit(opCtx, collection, onCreateEachFn, onCommitFn);
+ auto status = builder->commit(
+ opCtx, collection.getWritableCollection(), onCreateEachFn, onCommitFn);
if (!status.isOK()) {
return status;
}
@@ -313,7 +314,7 @@ Status IndexBuildsManager::commitIndexBuild(OperationContext* opCtx,
}
bool IndexBuildsManager::abortIndexBuild(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const UUID& buildUUID,
OnCleanUpFn onCleanUpFn) {
auto builder = _getBuilder(buildUUID);
diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h
index c48902425f2..7ce8b7dd63b 100644
--- a/src/mongo/db/catalog/index_builds_manager.h
+++ b/src/mongo/db/catalog/index_builds_manager.h
@@ -35,6 +35,7 @@
#include <vector>
#include "mongo/db/catalog/multi_index_block.h"
+#include "mongo/db/catalog_raii.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/rebuild_indexes.h"
#include "mongo/db/repl_index_build_state.h"
@@ -80,7 +81,7 @@ public:
*/
using OnInitFn = MultiIndexBlock::OnInitFn;
Status setUpIndexBuild(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const std::vector<BSONObj>& specs,
const UUID& buildUUID,
OnInitFn onInit,
@@ -109,7 +110,7 @@ public:
* Returns the number of records and the size of the data iterated over.
*/
StatusWith<std::pair<long long, long long>> startBuildingIndexForRecovery(
- OperationContext* opCtx, Collection* coll, const UUID& buildUUID, RepairData repair);
+ OperationContext* opCtx, const Collection* coll, const UUID& buildUUID, RepairData repair);
/**
* Document inserts observed during the scanning/insertion phase of an index build are not
@@ -140,7 +141,7 @@ public:
using OnCreateEachFn = MultiIndexBlock::OnCreateEachFn;
using OnCommitFn = MultiIndexBlock::OnCommitFn;
Status commitIndexBuild(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const NamespaceString& nss,
const UUID& buildUUID,
OnCreateEachFn onCreateEachFn,
@@ -151,7 +152,7 @@ public:
*/
using OnCleanUpFn = MultiIndexBlock::OnCleanUpFn;
bool abortIndexBuild(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const UUID& buildUUID,
OnCleanUpFn onCleanUpFn);
diff --git a/src/mongo/db/catalog/index_builds_manager_test.cpp b/src/mongo/db/catalog/index_builds_manager_test.cpp
index b8c1ca36993..649c4b0e749 100644
--- a/src/mongo/db/catalog/index_builds_manager_test.cpp
+++ b/src/mongo/db/catalog/index_builds_manager_test.cpp
@@ -81,18 +81,14 @@ std::vector<BSONObj> makeSpecs(const NamespaceString& nss, std::vector<std::stri
TEST_F(IndexBuildsManagerTest, IndexBuildsManagerSetUpAndTearDown) {
AutoGetCollection autoColl(operationContext(), _nss, MODE_X);
+ CollectionWriter collection(autoColl);
auto specs = makeSpecs(_nss, {"a", "b"});
- ASSERT_OK(_indexBuildsManager.setUpIndexBuild(operationContext(),
- autoColl.getWritableCollection(),
- specs,
- _buildUUID,
- MultiIndexBlock::kNoopOnInitFn));
-
- _indexBuildsManager.abortIndexBuild(operationContext(),
- autoColl.getWritableCollection(),
- _buildUUID,
- MultiIndexBlock::kNoopOnCleanUpFn);
+ ASSERT_OK(_indexBuildsManager.setUpIndexBuild(
+ operationContext(), collection, specs, _buildUUID, MultiIndexBlock::kNoopOnInitFn));
+
+ _indexBuildsManager.abortIndexBuild(
+ operationContext(), collection, _buildUUID, MultiIndexBlock::kNoopOnCleanUpFn);
_indexBuildsManager.unregisterIndexBuild(_buildUUID);
}
} // namespace
diff --git a/src/mongo/db/catalog/index_signature_test.cpp b/src/mongo/db/catalog/index_signature_test.cpp
index ca5ee599308..4aee9d6a052 100644
--- a/src/mongo/db/catalog/index_signature_test.cpp
+++ b/src/mongo/db/catalog/index_signature_test.cpp
@@ -44,10 +44,10 @@ public:
IndexSignatureTest() : CatalogTestFixture() {}
StatusWith<const IndexCatalogEntry*> createIndex(BSONObj spec) {
- // Get the index catalog associated with the test collection.
- auto* indexCatalog = coll()->getIndexCatalog();
// Build the specified index on the collection.
WriteUnitOfWork wuow(opCtx());
+ // Get the index catalog associated with the test collection.
+ auto* indexCatalog = _coll->getWritableCollection()->getIndexCatalog();
auto status = indexCatalog->createIndexOnEmptyCollection(opCtx(), spec);
if (!status.isOK()) {
return status.getStatus();
@@ -68,8 +68,8 @@ public:
return _nss;
}
- Collection* coll() const {
- return _coll->getWritableCollection();
+ const Collection* coll() const {
+ return (*_coll).getCollection();
}
OperationContext* opCtx() {
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 35c98a16862..ad75878d59f 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -118,7 +118,7 @@ MultiIndexBlock::~MultiIndexBlock() {
MultiIndexBlock::OnCleanUpFn MultiIndexBlock::kNoopOnCleanUpFn = []() {};
void MultiIndexBlock::abortIndexBuild(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
OnCleanUpFn onCleanUp) noexcept {
if (_collectionUUID) {
// init() was previously called with a collection pointer, so ensure that the same
@@ -139,7 +139,7 @@ void MultiIndexBlock::abortIndexBuild(OperationContext* opCtx,
// This cleans up all index builds. Because that may need to write, it is done inside of
// a WUOW. Nothing inside this block can fail, and it is made fatal if it does.
for (size_t i = 0; i < _indexes.size(); i++) {
- _indexes[i].block->fail(opCtx, collection);
+ _indexes[i].block->fail(opCtx, collection.getWritableCollection());
_indexes[i].block->finalizeTemporaryTables(
opCtx, TemporaryRecordStore::FinalizationAction::kDelete);
}
@@ -187,7 +187,7 @@ MultiIndexBlock::OnInitFn MultiIndexBlock::makeTimestampedIndexOnInitFn(Operatio
}
StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const BSONObj& spec,
OnInitFn onInit) {
const auto indexes = std::vector<BSONObj>(1, spec);
@@ -196,7 +196,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(OperationContext* opCtx,
StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const std::vector<BSONObj>& indexSpecs,
OnInitFn onInit,
const boost::optional<ResumeIndexInfo>& resumeInfo) {
@@ -277,7 +277,11 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
boost::optional<IndexSorterInfo> sorterInfo;
IndexToBuild index;
index.block = std::make_unique<IndexBuildBlock>(
- collection->getIndexCatalog(), collection->ns(), info, _method, _buildUUID);
+ collection.getWritableCollection()->getIndexCatalog(),
+ collection->ns(),
+ info,
+ _method,
+ _buildUUID);
if (resumeInfo) {
auto resumeInfoIndexes = resumeInfo->getIndexes();
// Find the resume information that corresponds to this spec.
@@ -296,9 +300,9 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
sorterInfo = *sorterInfoIt;
status = index.block->initForResume(
- opCtx, collection, *sorterInfo, resumeInfo->getPhase());
+ opCtx, collection.getWritableCollection(), *sorterInfo, resumeInfo->getPhase());
} else {
- status = index.block->init(opCtx, collection);
+ status = index.block->init(opCtx, collection.getWritableCollection());
}
if (!status.isOK())
return status;
diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h
index 8a9b4b6e27e..7aa68a65776 100644
--- a/src/mongo/db/catalog/multi_index_block.h
+++ b/src/mongo/db/catalog/multi_index_block.h
@@ -43,6 +43,7 @@
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/catalog/index_build_block.h"
#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog_raii.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_build_interceptor.h"
#include "mongo/db/record_id.h"
@@ -111,12 +112,12 @@ public:
using OnInitFn = std::function<Status(std::vector<BSONObj>& specs)>;
StatusWith<std::vector<BSONObj>> init(
OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const std::vector<BSONObj>& specs,
OnInitFn onInit,
const boost::optional<ResumeIndexInfo>& resumeInfo = boost::none);
StatusWith<std::vector<BSONObj>> init(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const BSONObj& spec,
OnInitFn onInit);
StatusWith<std::vector<BSONObj>> initForResume(OperationContext* opCtx,
@@ -256,7 +257,7 @@ public:
*/
using OnCleanUpFn = std::function<void()>;
void abortIndexBuild(OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
OnCleanUpFn onCleanUp) noexcept;
/**
diff --git a/src/mongo/db/catalog/multi_index_block_test.cpp b/src/mongo/db/catalog/multi_index_block_test.cpp
index 4f379451bec..5b258e4174e 100644
--- a/src/mongo/db/catalog/multi_index_block_test.cpp
+++ b/src/mongo/db/catalog/multi_index_block_test.cpp
@@ -90,12 +90,11 @@ void MultiIndexBlockTest::tearDown() {
TEST_F(MultiIndexBlockTest, CommitWithoutInsertingDocuments) {
auto indexer = getIndexer();
- AutoGetCollection coll(operationContext(), getNSS(), MODE_X);
+ AutoGetCollection autoColl(operationContext(), getNSS(), MODE_X);
+ CollectionWriter coll(autoColl);
- auto specs = unittest::assertGet(indexer->init(operationContext(),
- coll.getWritableCollection(),
- std::vector<BSONObj>(),
- MultiIndexBlock::kNoopOnInitFn));
+ auto specs = unittest::assertGet(indexer->init(
+ operationContext(), coll, std::vector<BSONObj>(), MultiIndexBlock::kNoopOnInitFn));
ASSERT_EQUALS(0U, specs.size());
ASSERT_OK(indexer->dumpInsertsFromBulk(operationContext()));
@@ -114,12 +113,11 @@ TEST_F(MultiIndexBlockTest, CommitWithoutInsertingDocuments) {
TEST_F(MultiIndexBlockTest, CommitAfterInsertingSingleDocument) {
auto indexer = getIndexer();
- AutoGetCollection coll(operationContext(), getNSS(), MODE_X);
+ AutoGetCollection autoColl(operationContext(), getNSS(), MODE_X);
+ CollectionWriter coll(autoColl);
- auto specs = unittest::assertGet(indexer->init(operationContext(),
- coll.getWritableCollection(),
- std::vector<BSONObj>(),
- MultiIndexBlock::kNoopOnInitFn));
+ auto specs = unittest::assertGet(indexer->init(
+ operationContext(), coll, std::vector<BSONObj>(), MultiIndexBlock::kNoopOnInitFn));
ASSERT_EQUALS(0U, specs.size());
ASSERT_OK(indexer->insertSingleDocumentForInitialSyncOrRecovery(operationContext(), {}, {}));
@@ -136,19 +134,17 @@ TEST_F(MultiIndexBlockTest, CommitAfterInsertingSingleDocument) {
}
// abort() should have no effect after the index build is committed.
- indexer->abortIndexBuild(
- operationContext(), coll.getWritableCollection(), MultiIndexBlock::kNoopOnCleanUpFn);
+ indexer->abortIndexBuild(operationContext(), coll, MultiIndexBlock::kNoopOnCleanUpFn);
}
TEST_F(MultiIndexBlockTest, AbortWithoutCleanupAfterInsertingSingleDocument) {
auto indexer = getIndexer();
- AutoGetCollection coll(operationContext(), getNSS(), MODE_X);
+ AutoGetCollection autoColl(operationContext(), getNSS(), MODE_X);
+ CollectionWriter coll(autoColl);
- auto specs = unittest::assertGet(indexer->init(operationContext(),
- coll.getWritableCollection(),
- std::vector<BSONObj>(),
- MultiIndexBlock::kNoopOnInitFn));
+ auto specs = unittest::assertGet(indexer->init(
+ operationContext(), coll, std::vector<BSONObj>(), MultiIndexBlock::kNoopOnInitFn));
ASSERT_EQUALS(0U, specs.size());
ASSERT_OK(indexer->insertSingleDocumentForInitialSyncOrRecovery(operationContext(), {}, {}));
auto isResumable = false;
@@ -158,7 +154,8 @@ TEST_F(MultiIndexBlockTest, AbortWithoutCleanupAfterInsertingSingleDocument) {
TEST_F(MultiIndexBlockTest, InitWriteConflictException) {
auto indexer = getIndexer();
- AutoGetCollection coll(operationContext(), getNSS(), MODE_X);
+ AutoGetCollection autoColl(operationContext(), getNSS(), MODE_X);
+ CollectionWriter coll(autoColl);
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
@@ -167,7 +164,7 @@ TEST_F(MultiIndexBlockTest, InitWriteConflictException) {
{
WriteUnitOfWork wuow(operationContext());
ASSERT_THROWS_CODE(indexer->init(operationContext(),
- coll.getWritableCollection(),
+ coll,
{spec},
[](std::vector<BSONObj>& specs) -> Status {
throw WriteConflictException();
@@ -178,17 +175,12 @@ TEST_F(MultiIndexBlockTest, InitWriteConflictException) {
{
WriteUnitOfWork wuow(operationContext());
- ASSERT_OK(indexer
- ->init(operationContext(),
- coll.getWritableCollection(),
- {spec},
- MultiIndexBlock::kNoopOnInitFn)
+ ASSERT_OK(indexer->init(operationContext(), coll, {spec}, MultiIndexBlock::kNoopOnInitFn)
.getStatus());
wuow.commit();
}
- indexer->abortIndexBuild(
- operationContext(), coll.getWritableCollection(), MultiIndexBlock::kNoopOnCleanUpFn);
+ indexer->abortIndexBuild(operationContext(), coll, MultiIndexBlock::kNoopOnCleanUpFn);
}
} // namespace
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 867296068e0..ab90c0a3587 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -149,6 +149,156 @@ AutoGetCollectionBase<CatalogCollectionLookupT>::AutoGetCollectionBase(
!_view || viewMode == AutoGetCollectionViewMode::kViewsPermitted);
}
+AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
+ const NamespaceStringOrUUID& nsOrUUID,
+ LockMode modeColl,
+ AutoGetCollectionViewMode viewMode,
+ Date_t deadline)
+ : AutoGetCollectionBase(opCtx, nsOrUUID, modeColl, viewMode, deadline), _opCtx(opCtx) {}
+
+Collection* AutoGetCollection::getWritableCollection(CollectionCatalog::LifetimeMode mode) {
+ // Acquire writable instance if not already available
+ if (!_writableColl) {
+
+ // Resets the writable Collection when the write unit of work finishes so we re-fetches and
+ // re-clones the Collection if a new write unit of work is opened.
+ class WritableCollectionReset : public RecoveryUnit::Change {
+ public:
+ WritableCollectionReset(AutoGetCollection& autoColl,
+ const Collection* rollbackCollection)
+ : _autoColl(autoColl), _rollbackCollection(rollbackCollection) {}
+ void commit(boost::optional<Timestamp> commitTime) final {
+ _autoColl._writableColl = nullptr;
+ }
+ void rollback() final {
+ _autoColl._coll = _rollbackCollection;
+ _autoColl._writableColl = nullptr;
+ }
+
+ private:
+ AutoGetCollection& _autoColl;
+ const Collection* _rollbackCollection;
+ };
+
+ _writableColl = CollectionCatalog::get(_opCtx).lookupCollectionByNamespaceForMetadataWrite(
+ _opCtx, mode, _resolvedNss);
+ if (mode == CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork) {
+ _opCtx->recoveryUnit()->registerChange(
+ std::make_unique<WritableCollectionReset>(*this, _coll));
+ }
+
+ _coll = _writableColl;
+ }
+ return _writableColl;
+}
+
+struct CollectionWriter::SharedImpl {
+ SharedImpl(CollectionWriter* parent) : _parent(parent) {}
+
+ CollectionWriter* _parent;
+ std::function<Collection*(CollectionCatalog::LifetimeMode)> _writableCollectionInitializer;
+};
+
+CollectionWriter::CollectionWriter(OperationContext* opCtx,
+ const CollectionUUID& uuid,
+ CollectionCatalog::LifetimeMode mode)
+ : _opCtx(opCtx), _mode(mode), _sharedImpl(std::make_shared<SharedImpl>(this)) {
+
+ _collection = CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, uuid);
+ _sharedImpl->_writableCollectionInitializer = [opCtx,
+ uuid](CollectionCatalog::LifetimeMode mode) {
+ return CollectionCatalog::get(opCtx).lookupCollectionByUUIDForMetadataWrite(
+ opCtx, mode, uuid);
+ };
+}
+
+CollectionWriter::CollectionWriter(OperationContext* opCtx,
+ const NamespaceString& nss,
+ CollectionCatalog::LifetimeMode mode)
+ : _opCtx(opCtx), _mode(mode), _sharedImpl(std::make_shared<SharedImpl>(this)) {
+ _collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
+ _sharedImpl->_writableCollectionInitializer = [opCtx,
+ nss](CollectionCatalog::LifetimeMode mode) {
+ return CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(
+ opCtx, mode, nss);
+ };
+}
+
+CollectionWriter::CollectionWriter(AutoGetCollection& autoCollection,
+ CollectionCatalog::LifetimeMode mode)
+ : _opCtx(autoCollection.getOperationContext()),
+ _mode(mode),
+ _sharedImpl(std::make_shared<SharedImpl>(this)) {
+ _collection = autoCollection.getCollection();
+ _sharedImpl->_writableCollectionInitializer =
+ [&autoCollection](CollectionCatalog::LifetimeMode mode) {
+ return autoCollection.getWritableCollection(mode);
+ };
+}
+
+CollectionWriter::CollectionWriter(Collection* writableCollection)
+ : _collection(writableCollection),
+ _writableCollection(writableCollection),
+ _mode(CollectionCatalog::LifetimeMode::kInplace) {}
+
+CollectionWriter::~CollectionWriter() {
+ // Notify shared state that this instance is destroyed
+ if (_sharedImpl) {
+ _sharedImpl->_parent = nullptr;
+ }
+
+ if (_mode == CollectionCatalog::LifetimeMode::kUnmanagedClone && _writableCollection) {
+ CollectionCatalog::get(_opCtx).discardUnmanagedClone(_writableCollection);
+ }
+}
+
+Collection* CollectionWriter::getWritableCollection() {
+ // Acquire writable instance lazily if not already available
+ if (!_writableCollection) {
+ _writableCollection = _sharedImpl->_writableCollectionInitializer(_mode);
+
+ // Resets the writable Collection when the write unit of work finishes so we re-fetch and
+ // re-clone the Collection if a new write unit of work is opened. Holds the back pointer to
+ // the CollectionWriter via a shared_ptr so we can detect if the instance is already
+ // destroyed.
+ class WritableCollectionReset : public RecoveryUnit::Change {
+ public:
+ WritableCollectionReset(std::shared_ptr<SharedImpl> shared,
+ const Collection* rollbackCollection)
+ : _shared(std::move(shared)), _rollbackCollection(rollbackCollection) {}
+ void commit(boost::optional<Timestamp> commitTime) final {
+ if (_shared->_parent)
+ _shared->_parent->_writableCollection = nullptr;
+ }
+ void rollback() final {
+ if (_shared->_parent) {
+ _shared->_parent->_collection = _rollbackCollection;
+ _shared->_parent->_writableCollection = nullptr;
+ }
+ }
+
+ private:
+ std::shared_ptr<SharedImpl> _shared;
+ const Collection* _rollbackCollection;
+ };
+
+ if (_mode == CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork) {
+ _opCtx->recoveryUnit()->registerChange(
+ std::make_unique<WritableCollectionReset>(_sharedImpl, _collection));
+ }
+
+ _collection = _writableCollection;
+ }
+ return _writableCollection;
+}
+
+void CollectionWriter::commitToCatalog() {
+ dassert(_mode == CollectionCatalog::LifetimeMode::kUnmanagedClone);
+ dassert(_writableCollection);
+ CollectionCatalog::get(_opCtx).commitUnmanagedClone(_writableCollection);
+ _writableCollection = nullptr;
+}
+
CatalogCollectionLookup::CollectionStorage CatalogCollectionLookup::lookupCollection(
OperationContext* opCtx, const NamespaceString& nss) {
return CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
diff --git a/src/mongo/db/catalog_raii.h b/src/mongo/db/catalog_raii.h
index 367b87e933b..917bff29707 100644
--- a/src/mongo/db/catalog_raii.h
+++ b/src/mongo/db/catalog_raii.h
@@ -169,7 +169,7 @@ public:
return _resolvedNss;
}
-private:
+protected:
AutoGetDb _autoDb;
// If the object was instantiated with a UUID, contains the resolved namespace, otherwise it is
@@ -205,15 +205,97 @@ struct CatalogCollectionLookupForRead {
class AutoGetCollection : public AutoGetCollectionBase<CatalogCollectionLookup> {
public:
- using AutoGetCollectionBase::AutoGetCollectionBase;
+ AutoGetCollection(
+ OperationContext* opCtx,
+ const NamespaceStringOrUUID& nsOrUUID,
+ LockMode modeColl,
+ AutoGetCollectionViewMode viewMode = AutoGetCollectionViewMode::kViewsForbidden,
+ Date_t deadline = Date_t::max());
/**
* Returns writable Collection. Necessary Collection lock mode is required.
* Any previous Collection that has been returned may be invalidated.
*/
- Collection* getWritableCollection() const {
- return const_cast<Collection*>(getCollection());
+ Collection* getWritableCollection(
+ CollectionCatalog::LifetimeMode mode =
+ CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork);
+
+ OperationContext* getOperationContext() const {
+ return _opCtx;
}
+
+private:
+ Collection* _writableColl = nullptr;
+ OperationContext* _opCtx = nullptr;
+};
+
+/**
+ * RAII-style class to handle the lifetime of writable Collections.
+ * It does not take any locks, concurrency needs to be handled separately using explicit locks or
+ * AutoGetCollection. This class can serve as an adaptor to unify different methods of acquiring a
+ * writable collection.
+ *
+ * It is safe to re-use an instance for multiple WriteUnitOfWorks or to destroy it before the active
+ * WriteUnitOfWork finishes.
+ */
+class CollectionWriter final {
+public:
+ // Gets the collection from the catalog for the provided uuid
+ CollectionWriter(OperationContext* opCtx,
+ const CollectionUUID& uuid,
+ CollectionCatalog::LifetimeMode mode =
+ CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork);
+ // Gets the collection from the catalog for the provided namespace string
+ CollectionWriter(OperationContext* opCtx,
+ const NamespaceString& nss,
+ CollectionCatalog::LifetimeMode mode =
+ CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork);
+ // Acts as an adaptor for AutoGetCollection
+ CollectionWriter(AutoGetCollection& autoCollection,
+ CollectionCatalog::LifetimeMode mode =
+ CollectionCatalog::LifetimeMode::kManagedInWriteUnitOfWork);
+ // Acts as an adaptor for a writable Collection that has been retrieved elsewhere
+ CollectionWriter(Collection* writableCollection);
+
+ ~CollectionWriter();
+
+ // Not allowed to copy or move.
+ CollectionWriter(const CollectionWriter&) = delete;
+ CollectionWriter(CollectionWriter&&) = delete;
+ CollectionWriter& operator=(const CollectionWriter&) = delete;
+ CollectionWriter& operator=(CollectionWriter&&) = delete;
+
+ explicit operator bool() const {
+ return get();
+ }
+
+ const Collection* operator->() const {
+ return get();
+ }
+
+ const Collection& operator*() const {
+ return *get();
+ }
+
+ const Collection* get() const {
+ return _collection;
+ }
+
+ // Returns writable Collection, any previous Collection that has been returned may be
+ // invalidated.
+ Collection* getWritableCollection();
+
+ // Commits unmanaged Collection to the catalog
+ void commitToCatalog();
+
+private:
+ const Collection* _collection = nullptr;
+ Collection* _writableCollection = nullptr;
+ OperationContext* _opCtx = nullptr;
+ CollectionCatalog::LifetimeMode _mode;
+
+ struct SharedImpl;
+ std::shared_ptr<SharedImpl> _sharedImpl;
};
/**
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index c7b1af95ef2..04f31e0d99f 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -145,15 +145,16 @@ public:
<< toReIndexNss << "' while replication is active");
}
- AutoGetCollection collection(opCtx, toReIndexNss, MODE_X);
- if (!collection) {
- auto db = collection.getDb();
+ AutoGetCollection autoColl(opCtx, toReIndexNss, MODE_X);
+ if (!autoColl) {
+ auto db = autoColl.getDb();
if (db && ViewCatalog::get(db)->lookup(opCtx, toReIndexNss.ns()))
uasserted(ErrorCodes::CommandNotSupportedOnView, "can't re-index a view");
else
uasserted(ErrorCodes::NamespaceNotFound, "collection does not exist");
}
+ CollectionWriter collection(autoColl, CollectionCatalog::LifetimeMode::kUnmanagedClone);
IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(
collection->uuid());
@@ -216,21 +217,19 @@ public:
indexer->setIndexBuildMethod(IndexBuildMethod::kForeground);
StatusWith<std::vector<BSONObj>> swIndexesToRebuild(ErrorCodes::UnknownError,
"Uninitialized");
-
writeConflictRetry(opCtx, "dropAllIndexes", toReIndexNss.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
collection.getWritableCollection()->getIndexCatalog()->dropAllIndexes(opCtx, true);
- swIndexesToRebuild = indexer->init(
- opCtx, collection.getWritableCollection(), all, MultiIndexBlock::kNoopOnInitFn);
+ swIndexesToRebuild =
+ indexer->init(opCtx, collection, all, MultiIndexBlock::kNoopOnInitFn);
uassertStatusOK(swIndexesToRebuild.getStatus());
wunit.commit();
});
// The 'indexer' can throw, so ensure build cleanup occurs.
auto abortOnExit = makeGuard([&] {
- indexer->abortIndexBuild(
- opCtx, collection.getWritableCollection(), MultiIndexBlock::kNoopOnCleanUpFn);
+ indexer->abortIndexBuild(opCtx, collection, MultiIndexBlock::kNoopOnCleanUpFn);
});
if (MONGO_unlikely(reIndexCrashAfterDrop.shouldFail())) {
@@ -240,8 +239,7 @@ public:
// The following function performs its own WriteConflict handling, so don't wrap it in a
// writeConflictRetry loop.
- uassertStatusOK(
- indexer->insertAllDocumentsInCollection(opCtx, collection.getWritableCollection()));
+ uassertStatusOK(indexer->insertAllDocumentsInCollection(opCtx, collection.get()));
uassertStatusOK(indexer->checkConstraints(opCtx));
@@ -261,6 +259,7 @@ public:
// snapshot so are unable to be used.
auto clusterTime = LogicalClock::getClusterTimeForReplicaSet(opCtx).asTimestamp();
collection.getWritableCollection()->setMinimumVisibleSnapshot(clusterTime);
+ collection.commitToCatalog();
result.append("nIndexes", static_cast<int>(swIndexesToRebuild.getValue().size()));
result.append("indexes", swIndexesToRebuild.getValue());
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index c3585082655..de86a427505 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -145,8 +145,7 @@ public:
}
// Lock the database in mode IX and lock the collection exclusively.
- AutoGetCollection autoColl(opCtx, fullNs, MODE_X);
- Collection* collection = autoColl.getWritableCollection();
+ AutoGetCollection collection(opCtx, fullNs, MODE_X);
if (!collection) {
uasserted(ErrorCodes::NamespaceNotFound,
str::stream() << "collection " << fullNs.ns() << " does not exist");
@@ -163,7 +162,7 @@ public:
// end.
auto exec = InternalPlanner::collectionScan(opCtx,
fullNs.ns(),
- collection,
+ collection.getCollection(),
PlanYieldPolicy::YieldPolicy::NO_YIELD,
InternalPlanner::BACKWARD);
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index e459b036282..8bee222b62d 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -518,9 +518,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::rebuildIndex
return status;
}
- auto& collectionCatalog = CollectionCatalog::get(opCtx->getServiceContext());
- Collection* collection =
- collectionCatalog.lookupCollectionByNamespaceForMetadataWrite(opCtx, nss);
+ CollectionWriter collection(opCtx, nss);
// Complete the index build.
return _runIndexRebuildForRecovery(opCtx, collection, buildUUID, repair);
@@ -545,16 +543,15 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
indexNames.push_back(name);
}
- auto& collectionCatalog = CollectionCatalog::get(opCtx->getServiceContext());
- Collection* collection =
- collectionCatalog.lookupCollectionByNamespaceForMetadataWrite(opCtx, nss);
- auto indexCatalog = collection->getIndexCatalog();
+ CollectionWriter collection(opCtx, nss);
{
// These steps are combined into a single WUOW to ensure there are no commits without
// the indexes.
// 1) Drop all unfinished indexes.
// 2) Start, but do not complete the index build process.
WriteUnitOfWork wuow(opCtx);
+ auto indexCatalog = collection.getWritableCollection()->getIndexCatalog();
+
for (size_t i = 0; i < indexNames.size(); i++) {
bool includeUnfinished = false;
@@ -621,7 +618,7 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
// We need to initialize the collection to rebuild the indexes. The collection may already
// be initialized when rebuilding indexes with rollback-via-refetch.
if (!collection->isInitialized()) {
- collection->init(opCtx);
+ collection.getWritableCollection()->init(opCtx);
}
auto dbName = nss.db().toString();
@@ -667,9 +664,8 @@ Status IndexBuildsCoordinator::_setUpResumeIndexBuild(OperationContext* opCtx,
Lock::DBLock dbLock(opCtx, dbName, MODE_IX);
Lock::CollectionLock collLock(opCtx, nssOrUuid, MODE_X);
- auto& collectionCatalog = CollectionCatalog::get(opCtx->getServiceContext());
- auto collection = collectionCatalog.lookupCollectionByUUIDForMetadataWrite(
- opCtx, resumeInfo.getCollectionUUID());
+ CollectionWriter collection(
+ opCtx, resumeInfo.getCollectionUUID(), CollectionCatalog::LifetimeMode::kInplace);
invariant(collection);
auto durableCatalog = DurableCatalog::get(opCtx);
@@ -708,7 +704,7 @@ Status IndexBuildsCoordinator::_setUpResumeIndexBuild(OperationContext* opCtx,
}
if (!collection->isInitialized()) {
- collection->init(opCtx);
+ collection.getWritableCollection()->init(opCtx);
}
auto protocol = IndexBuildProtocol::kTwoPhase;
@@ -1335,8 +1331,7 @@ void IndexBuildsCoordinator::_completeAbort(OperationContext* opCtx,
std::shared_ptr<ReplIndexBuildState> replState,
IndexBuildAction signalAction,
Status reason) {
- auto coll = CollectionCatalog::get(opCtx).lookupCollectionByUUIDForMetadataWrite(
- opCtx, replState->collectionUUID);
+ CollectionWriter coll(opCtx, replState->collectionUUID);
const NamespaceStringOrUUID dbAndUUID(replState->dbName, replState->collectionUUID);
auto nss = coll->ns();
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
@@ -1411,7 +1406,7 @@ void IndexBuildsCoordinator::_completeAbort(OperationContext* opCtx,
invariant(replCoord->getMemberState().rollback());
auto isResumable = !replState->lastOpTimeBeforeInterceptors.isNull();
_indexBuildsManager.abortIndexBuildWithoutCleanupForRollback(
- opCtx, coll, replState->buildUUID, isResumable);
+ opCtx, coll.get(), replState->buildUUID, isResumable);
break;
}
case IndexBuildAction::kNoAction:
@@ -1753,8 +1748,8 @@ void IndexBuildsCoordinator::createIndex(OperationContext* opCtx,
const BSONObj& spec,
IndexBuildsManager::IndexConstraints indexConstraints,
bool fromMigrate) {
- auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByUUIDForMetadataWrite(opCtx, collectionUUID);
+ CollectionWriter collection(opCtx, collectionUUID);
+
invariant(collection,
str::stream() << "IndexBuildsCoordinator::createIndexes: " << collectionUUID);
auto nss = collection->ns();
@@ -1767,7 +1762,7 @@ void IndexBuildsCoordinator::createIndex(OperationContext* opCtx,
ON_BLOCK_EXIT([&] { _indexBuildsManager.unregisterIndexBuild(buildUUID); });
try {
- auto onInitFn = MultiIndexBlock::makeTimestampedIndexOnInitFn(opCtx, collection);
+ auto onInitFn = MultiIndexBlock::makeTimestampedIndexOnInitFn(opCtx, collection.get());
IndexBuildsManager::SetupOptions options;
options.indexConstraints = indexConstraints;
uassertStatusOK(_indexBuildsManager.setUpIndexBuild(
@@ -1794,14 +1789,15 @@ void IndexBuildsCoordinator::createIndex(OperationContext* opCtx,
_indexBuildsManager.abortIndexBuild(
opCtx, collection, buildUUID, MultiIndexBlock::kNoopOnCleanUpFn);
});
- uassertStatusOK(_indexBuildsManager.startBuildingIndex(opCtx, collection, buildUUID));
+ uassertStatusOK(_indexBuildsManager.startBuildingIndex(opCtx, collection.get(), buildUUID));
// Retry indexing records that failed key generation, but only if we are primary. Secondaries
// rely on the primary's decision to commit as assurance that it has checked all key generation
// errors on its behalf.
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (replCoord->canAcceptWritesFor(opCtx, nss)) {
- uassertStatusOK(_indexBuildsManager.retrySkippedRecords(opCtx, buildUUID, collection));
+ uassertStatusOK(
+ _indexBuildsManager.retrySkippedRecords(opCtx, buildUUID, collection.get()));
}
uassertStatusOK(_indexBuildsManager.checkIndexConstraintViolations(opCtx, buildUUID));
@@ -1819,8 +1815,7 @@ void IndexBuildsCoordinator::createIndexesOnEmptyCollection(OperationContext* op
UUID collectionUUID,
const std::vector<BSONObj>& specs,
bool fromMigrate) {
- auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByUUIDForMetadataWrite(opCtx, collectionUUID);
+ CollectionWriter collection(opCtx, collectionUUID);
invariant(collection, str::stream() << collectionUUID);
invariant(collection->isEmpty(opCtx), str::stream() << collectionUUID);
@@ -1832,7 +1827,7 @@ void IndexBuildsCoordinator::createIndexesOnEmptyCollection(OperationContext* op
auto opObserver = opCtx->getServiceContext()->getOpObserver();
- auto indexCatalog = collection->getIndexCatalog();
+ auto indexCatalog = collection.getWritableCollection()->getIndexCatalog();
// Always run single phase index build for empty collection. And, will be coordinated using
// createIndexes oplog entry.
for (const auto& spec : specs) {
@@ -2075,7 +2070,8 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild
const IndexBuildOptions& indexBuildOptions) {
const NamespaceStringOrUUID nssOrUuid{replState->dbName, replState->collectionUUID};
- AutoGetCollection collection(opCtx, nssOrUuid, MODE_X);
+ AutoGetCollection coll(opCtx, nssOrUuid, MODE_X);
+ CollectionWriter collection(coll);
CollectionShardingState::get(opCtx, collection->ns())->checkShardVersionOrThrow(opCtx);
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
@@ -2130,7 +2126,7 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild
return Status::OK();
};
} else {
- onInitFn = MultiIndexBlock::makeTimestampedIndexOnInitFn(opCtx, collection.getCollection());
+ onInitFn = MultiIndexBlock::makeTimestampedIndexOnInitFn(opCtx, collection.get());
}
IndexBuildsManager::SetupOptions options;
@@ -2145,12 +2141,8 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild
if (!replSetAndNotPrimary) {
// On standalones and primaries, call setUpIndexBuild(), which makes the initial catalog
// write. On primaries, this replicates the startIndexBuild oplog entry.
- uassertStatusOK(_indexBuildsManager.setUpIndexBuild(opCtx,
- collection.getWritableCollection(),
- replState->indexSpecs,
- replState->buildUUID,
- onInitFn,
- options));
+ uassertStatusOK(_indexBuildsManager.setUpIndexBuild(
+ opCtx, collection, replState->indexSpecs, replState->buildUUID, onInitFn, options));
} else {
// If we are starting the index build as a secondary, we must suppress calls to write
// our initial oplog entry in setUpIndexBuild().
@@ -2164,18 +2156,12 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild
tsBlock.emplace(opCtx, startTimestamp);
}
- uassertStatusOK(_indexBuildsManager.setUpIndexBuild(opCtx,
- collection.getWritableCollection(),
- replState->indexSpecs,
- replState->buildUUID,
- onInitFn,
- options));
+ uassertStatusOK(_indexBuildsManager.setUpIndexBuild(
+ opCtx, collection, replState->indexSpecs, replState->buildUUID, onInitFn, options));
}
} catch (DBException& ex) {
- _indexBuildsManager.abortIndexBuild(opCtx,
- collection.getWritableCollection(),
- replState->buildUUID,
- MultiIndexBlock::kNoopOnCleanUpFn);
+ _indexBuildsManager.abortIndexBuild(
+ opCtx, collection, replState->buildUUID, MultiIndexBlock::kNoopOnCleanUpFn);
const auto& status = ex.toStatus();
if (status == ErrorCodes::IndexAlreadyExists ||
@@ -2781,8 +2767,7 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
}
// The collection object should always exist while an index build is registered.
- auto collection = CollectionCatalog::get(opCtx).lookupCollectionByUUIDForMetadataWrite(
- opCtx, replState->collectionUUID);
+ CollectionWriter collection(opCtx, replState->collectionUUID);
invariant(collection,
str::stream() << "Collection not found after relocking. Index build: "
<< replState->buildUUID
@@ -2819,8 +2804,8 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
// Secondaries rely on the primary's decision to commit as assurance that it has checked all
// key generation errors on its behalf.
if (isMaster) {
- uassertStatusOK(
- _indexBuildsManager.retrySkippedRecords(opCtx, replState->buildUUID, collection));
+ uassertStatusOK(_indexBuildsManager.retrySkippedRecords(
+ opCtx, replState->buildUUID, collection.get()));
}
// Duplicate key constraint checking phase. Duplicate key errors are tracked for
@@ -2837,7 +2822,7 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
}
} catch (const ExceptionForCat<ErrorCategory::ShutdownError>& e) {
logFailure(e.toStatus(), collection->ns(), replState);
- _completeAbortForShutdown(opCtx, replState, collection);
+ _completeAbortForShutdown(opCtx, replState, collection.get());
throw;
} catch (const DBException& e) {
auto status = e.toStatus();
@@ -2890,7 +2875,7 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
uassertStatusOK(_indexBuildsManager.commitIndexBuild(
opCtx, collection, collection->ns(), replState->buildUUID, onCreateEachFn, onCommitFn));
removeIndexBuildEntryAfterCommitOrAbort(opCtx, dbAndUUID, *replState);
- replState->stats.numIndexesAfter = getNumIndexesTotal(opCtx, collection);
+ replState->stats.numIndexesAfter = getNumIndexesTotal(opCtx, collection.get());
LOGV2(20663,
"Index build: completed successfully",
"buildUUID"_attr = replState->buildUUID,
@@ -2904,7 +2889,7 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexRebuildForRecovery(
OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const UUID& buildUUID,
RepairData repair) noexcept {
invariant(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X));
@@ -2922,7 +2907,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
long long dataSize = 0;
ReplIndexBuildState::IndexCatalogStats indexCatalogStats;
- indexCatalogStats.numIndexesBefore = getNumIndexesTotal(opCtx, collection);
+ indexCatalogStats.numIndexesBefore = getNumIndexesTotal(opCtx, collection.get());
try {
LOGV2(20673,
@@ -2933,7 +2918,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
std::tie(numRecords, dataSize) =
uassertStatusOK(_indexBuildsManager.startBuildingIndexForRecovery(
- opCtx, collection, buildUUID, repair));
+ opCtx, collection.get(), buildUUID, repair));
// Since we are holding an exclusive collection lock to stop new writes, do not yield locks
// while draining.
@@ -2954,7 +2939,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
MultiIndexBlock::kNoopOnCreateEachFn,
MultiIndexBlock::kNoopOnCommitFn));
- indexCatalogStats.numIndexesAfter = getNumIndexesTotal(opCtx, collection);
+ indexCatalogStats.numIndexesAfter = getNumIndexesTotal(opCtx, collection.get());
LOGV2(20674,
"Index builds manager completed successfully: {buildUUID}: {namespace}. Index specs "
diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h
index 12e80653829..a6bd7847534 100644
--- a/src/mongo/db/index_builds_coordinator.h
+++ b/src/mongo/db/index_builds_coordinator.h
@@ -772,7 +772,7 @@ protected:
*/
StatusWith<std::pair<long long, long long>> _runIndexRebuildForRecovery(
OperationContext* opCtx,
- Collection* collection,
+ CollectionWriter& collection,
const UUID& buildUUID,
RepairData repair) noexcept;
diff --git a/src/mongo/db/repair.cpp b/src/mongo/db/repair.cpp
index 0c06a70d3f9..1ef573f0b45 100644
--- a/src/mongo/db/repair.cpp
+++ b/src/mongo/db/repair.cpp
@@ -87,7 +87,7 @@ Status rebuildIndexesForNamespace(OperationContext* opCtx,
}
namespace {
-Status dropUnfinishedIndexes(OperationContext* opCtx, Collection* collection) {
+Status dropUnfinishedIndexes(OperationContext* opCtx, const Collection* collection) {
std::vector<std::string> indexNames;
auto durableCatalog = DurableCatalog::get(opCtx);
durableCatalog->getAllIndexes(opCtx, collection->getCatalogId(), &indexNames);
@@ -173,7 +173,8 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const std:
auto clusterTime = LogicalClock::getClusterTimeForReplicaSet(opCtx).asTimestamp();
for (auto collIt = db->begin(opCtx); collIt != db->end(opCtx); ++collIt) {
- auto collection = *collIt;
+ auto collection =
+ collIt.getWritableCollection(opCtx, CollectionCatalog::LifetimeMode::kInplace);
if (collection) {
collection->setMinimumVisibleSnapshot(clusterTime);
}
@@ -201,14 +202,17 @@ Status repairCollection(OperationContext* opCtx,
LOGV2(21027, "Repairing collection", "namespace"_attr = nss);
- auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(opCtx, nss);
- Status status = engine->repairRecordStore(opCtx, collection->getCatalogId(), nss);
+ Status status = Status::OK();
+ {
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, nss);
+ status = engine->repairRecordStore(opCtx, collection->getCatalogId(), nss);
+ }
+
// Need to lookup from catalog again because the old collection object was invalidated by
// repairRecordStore.
- collection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(opCtx, nss);
+ auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(
+ opCtx, CollectionCatalog::LifetimeMode::kInplace, nss);
// If data was modified during repairRecordStore, we know to rebuild indexes without needing
// to run an expensive collection validation.
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index 23fce736413..13dd6dae785 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -59,9 +59,8 @@ CollectionBulkLoaderImpl::CollectionBulkLoaderImpl(ServiceContext::UniqueClient&
const BSONObj& idIndexSpec)
: _client{std::move(client)},
_opCtx{std::move(opCtx)},
- _autoColl{std::move(autoColl)},
- _collection{_autoColl->getWritableCollection()},
- _nss{_autoColl->getCollection()->ns()},
+ _collection{std::move(autoColl)},
+ _nss{_collection->getCollection()->ns()},
_idIndexBlock(std::make_unique<MultiIndexBlock>()),
_secondaryIndexesBlock(std::make_unique<MultiIndexBlock>()),
_idIndexSpec(idIndexSpec.getOwned()) {
@@ -75,20 +74,20 @@ CollectionBulkLoaderImpl::~CollectionBulkLoaderImpl() {
}
Status CollectionBulkLoaderImpl::init(const std::vector<BSONObj>& secondaryIndexSpecs) {
- return _runTaskReleaseResourcesOnFailure([coll = _autoColl->getCollection(),
- &secondaryIndexSpecs,
- this]() -> Status {
+ return _runTaskReleaseResourcesOnFailure([&secondaryIndexSpecs, this]() -> Status {
+ WriteUnitOfWork wuow(_opCtx.get());
// All writes in CollectionBulkLoaderImpl should be unreplicated.
// The opCtx is accessed indirectly through _secondaryIndexesBlock.
UnreplicatedWritesBlock uwb(_opCtx.get());
// This enforces the buildIndexes setting in the replica set configuration.
- auto indexCatalog = coll->getIndexCatalog();
+ CollectionWriter collWriter(*_collection);
+ auto indexCatalog = collWriter.getWritableCollection()->getIndexCatalog();
auto specs = indexCatalog->removeExistingIndexesNoChecks(_opCtx.get(), secondaryIndexSpecs);
if (specs.size()) {
_secondaryIndexesBlock->ignoreUniqueConstraint();
auto status =
_secondaryIndexesBlock
- ->init(_opCtx.get(), _collection, specs, MultiIndexBlock::kNoopOnInitFn)
+ ->init(_opCtx.get(), collWriter, specs, MultiIndexBlock::kNoopOnInitFn)
.getStatus();
if (!status.isOK()) {
return status;
@@ -99,7 +98,7 @@ Status CollectionBulkLoaderImpl::init(const std::vector<BSONObj>& secondaryIndex
if (!_idIndexSpec.isEmpty()) {
auto status =
_idIndexBlock
- ->init(_opCtx.get(), _collection, _idIndexSpec, MultiIndexBlock::kNoopOnInitFn)
+ ->init(_opCtx.get(), collWriter, _idIndexSpec, MultiIndexBlock::kNoopOnInitFn)
.getStatus();
if (!status.isOK()) {
return status;
@@ -108,6 +107,7 @@ Status CollectionBulkLoaderImpl::init(const std::vector<BSONObj>& secondaryIndex
_idIndexBlock.reset();
}
+ wuow.commit();
return Status::OK();
});
}
@@ -134,8 +134,9 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForUncappedCollection(
const auto& doc = *insertIter++;
bytesInBlock += doc.objsize();
// This version of insert will not update any indexes.
- const auto status = _autoColl->getCollection()->insertDocumentForBulkLoader(
- _opCtx.get(), doc, onRecordInserted);
+ const auto status =
+ (*_collection)
+ ->insertDocumentForBulkLoader(_opCtx.get(), doc, onRecordInserted);
if (!status.isOK()) {
return status;
}
@@ -181,8 +182,8 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForCappedCollection(
WriteUnitOfWork wunit(_opCtx.get());
// For capped collections, we use regular insertDocument, which
// will update pre-existing indexes.
- const auto status = _autoColl->getCollection()->insertDocument(
- _opCtx.get(), InsertStatement(doc), nullptr);
+ const auto status =
+ (*_collection)->insertDocument(_opCtx.get(), InsertStatement(doc), nullptr);
if (!status.isOK()) {
return status;
}
@@ -235,7 +236,7 @@ Status CollectionBulkLoaderImpl::commit() {
WriteUnitOfWork wunit(_opCtx.get());
auto status =
_secondaryIndexesBlock->commit(_opCtx.get(),
- _collection,
+ _collection->getWritableCollection(),
MultiIndexBlock::kNoopOnCreateEachFn,
MultiIndexBlock::kNoopOnCommitFn);
if (!status.isOK()) {
@@ -262,12 +263,13 @@ Status CollectionBulkLoaderImpl::commit() {
// before committing the index build, the index removal code uses
// 'dupsAllowed', which forces the storage engine to only unindex
// records that match the same key and RecordId.
- _autoColl->getCollection()->deleteDocument(_opCtx.get(),
- kUninitializedStmtId,
- rid,
- nullptr /** OpDebug **/,
- false /* fromMigrate */,
- true /* noWarn */);
+ (*_collection)
+ ->deleteDocument(_opCtx.get(),
+ kUninitializedStmtId,
+ rid,
+ nullptr /** OpDebug **/,
+ false /* fromMigrate */,
+ true /* noWarn */);
wunit.commit();
return Status::OK();
});
@@ -296,7 +298,7 @@ Status CollectionBulkLoaderImpl::commit() {
_opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [this] {
WriteUnitOfWork wunit(_opCtx.get());
auto status = _idIndexBlock->commit(_opCtx.get(),
- _collection,
+ _collection->getWritableCollection(),
MultiIndexBlock::kNoopOnCreateEachFn,
MultiIndexBlock::kNoopOnCommitFn);
if (!status.isOK()) {
@@ -322,7 +324,7 @@ Status CollectionBulkLoaderImpl::commit() {
// _releaseResources.
_idIndexBlock.reset();
_secondaryIndexesBlock.reset();
- _autoColl.reset();
+ _collection.reset();
return Status::OK();
});
}
@@ -330,19 +332,20 @@ Status CollectionBulkLoaderImpl::commit() {
void CollectionBulkLoaderImpl::_releaseResources() {
invariant(&cc() == _opCtx->getClient());
if (_secondaryIndexesBlock) {
+ CollectionWriter collWriter(*_collection);
_secondaryIndexesBlock->abortIndexBuild(
- _opCtx.get(), _collection, MultiIndexBlock::kNoopOnCleanUpFn);
+ _opCtx.get(), collWriter, MultiIndexBlock::kNoopOnCleanUpFn);
_secondaryIndexesBlock.reset();
}
if (_idIndexBlock) {
- _idIndexBlock->abortIndexBuild(
- _opCtx.get(), _collection, MultiIndexBlock::kNoopOnCleanUpFn);
+ CollectionWriter collWriter(*_collection);
+ _idIndexBlock->abortIndexBuild(_opCtx.get(), collWriter, MultiIndexBlock::kNoopOnCleanUpFn);
_idIndexBlock.reset();
}
// release locks.
- _autoColl.reset();
+ _collection.reset();
}
template <typename F>
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.h b/src/mongo/db/repl/collection_bulk_loader_impl.h
index afb6df03bc2..fab8ed9c922 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.h
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.h
@@ -104,8 +104,7 @@ private:
ServiceContext::UniqueClient _client;
ServiceContext::UniqueOperationContext _opCtx;
- std::unique_ptr<AutoGetCollection> _autoColl;
- Collection* _collection;
+ std::unique_ptr<AutoGetCollection> _collection;
NamespaceString _nss;
std::unique_ptr<MultiIndexBlock> _idIndexBlock;
std::unique_ptr<MultiIndexBlock> _secondaryIndexesBlock;
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index c0c242421f9..ecaf6fb104e 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -650,8 +650,8 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx,
const NamespaceString oplogNss(NamespaceString::kRsOplogNamespace);
AutoGetDb autoDb(opCtx, oplogNss.db(), MODE_IX);
Lock::CollectionLock oplogCollectionLoc(opCtx, oplogNss, MODE_X);
- Collection* oplogCollection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(opCtx, oplogNss);
+ auto oplogCollection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogNss);
if (!oplogCollection) {
fassertFailedWithStatusNoTrace(
34418,
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 6c3480157e9..fa6b07c6696 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -870,7 +870,7 @@ void dropIndex(OperationContext* opCtx,
"namespace"_attr = nss.toString());
return;
}
- WriteUnitOfWork wunit(opCtx);
+
auto entry = indexCatalog->getEntry(indexDescriptor);
if (entry->isReady(opCtx)) {
auto status = indexCatalog->dropIndex(opCtx, indexDescriptor);
@@ -894,7 +894,6 @@ void dropIndex(OperationContext* opCtx,
"error"_attr = redact(status));
}
}
- wunit.commit();
}
/**
@@ -907,8 +906,7 @@ void rollbackCreateIndexes(OperationContext* opCtx, UUID uuid, std::set<std::str
CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, uuid);
invariant(nss);
Lock::DBLock dbLock(opCtx, nss->db(), MODE_X);
- Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByUUIDForMetadataWrite(opCtx, uuid);
+ CollectionWriter collection(opCtx, uuid);
// If we cannot find the collection, we skip over dropping the index.
if (!collection) {
@@ -946,7 +944,9 @@ void rollbackCreateIndexes(OperationContext* opCtx, UUID uuid, std::set<std::str
"uuid"_attr = uuid,
"indexName"_attr = indexName);
- dropIndex(opCtx, indexCatalog, indexName, *nss);
+ WriteUnitOfWork wuow(opCtx);
+ dropIndex(opCtx, collection.getWritableCollection()->getIndexCatalog(), indexName, *nss);
+ wuow.commit();
LOGV2_DEBUG(21673,
1,
@@ -1574,8 +1574,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
auto db = databaseHolder->openDb(opCtx, nss->db().toString());
invariant(db);
- Collection* collection =
- CollectionCatalog::get(opCtx).lookupCollectionByUUIDForMetadataWrite(opCtx, uuid);
+ CollectionWriter collection(opCtx, uuid);
invariant(collection);
auto infoResult = rollbackSource.getCollectionInfoByUUID(nss->db().toString(), uuid);
@@ -1627,7 +1626,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// Set any document validation options. We update the validator fields without
// parsing/validation, since we fetched the options object directly from the sync
// source, and we should set our validation options to match it exactly.
- auto validatorStatus = collection->updateValidator(
+ auto validatorStatus = collection.getWritableCollection()->updateValidator(
opCtx, options.validator, options.validationLevel, options.validationAction);
if (!validatorStatus.isOK()) {
throw RSFatalException(str::stream()
@@ -1729,8 +1728,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
const NamespaceString docNss(doc.ns);
Lock::DBLock docDbLock(opCtx, docNss.db(), MODE_X);
OldClientContext ctx(opCtx, doc.ns.toString());
- Collection* collection =
- catalog.lookupCollectionByUUIDForMetadataWrite(opCtx, uuid);
+ CollectionWriter collection(opCtx, uuid);
// Adds the doc to our rollback file if the collection was not dropped while
// rolling back createCollection operations. Does not log an error when
@@ -1740,7 +1738,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
if (collection && removeSaver) {
BSONObj obj;
- bool found = Helpers::findOne(opCtx, collection, pattern, obj, false);
+ bool found = Helpers::findOne(opCtx, collection.get(), pattern, obj, false);
if (found) {
auto status = removeSaver->goingToDelete(obj);
if (!status.isOK()) {
@@ -1791,7 +1789,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
const auto clock = opCtx->getServiceContext()->getFastClockSource();
const auto findOneStart = clock->now();
- RecordId loc = Helpers::findOne(opCtx, collection, pattern, false);
+ RecordId loc =
+ Helpers::findOne(opCtx, collection.get(), pattern, false);
if (clock->now() - findOneStart > Milliseconds(200))
LOGV2_WARNING(
21726,
@@ -1807,8 +1806,9 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
collection->ns().ns(),
[&] {
WriteUnitOfWork wunit(opCtx);
- collection->cappedTruncateAfter(
- opCtx, loc, true);
+ collection.getWritableCollection()
+ ->cappedTruncateAfter(
+ opCtx, loc, true);
wunit.commit();
});
} catch (const DBException& e) {
@@ -1817,7 +1817,9 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
writeConflictRetry(
opCtx, "truncate", collection->ns().ns(), [&] {
WriteUnitOfWork wunit(opCtx);
- uassertStatusOK(collection->truncate(opCtx));
+ uassertStatusOK(
+ collection.getWritableCollection()
+ ->truncate(opCtx));
wunit.commit();
});
} else {
@@ -1843,7 +1845,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
}
} else {
deleteObjects(opCtx,
- collection,
+ collection.get(),
*nss,
pattern,
true, // justOne
@@ -1947,9 +1949,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
Lock::DBLock oplogDbLock(opCtx, oplogNss.db(), MODE_IX);
Lock::CollectionLock oplogCollectionLoc(opCtx, oplogNss, MODE_X);
OldClientContext ctx(opCtx, oplogNss.ns());
- Collection* oplogCollection =
- CollectionCatalog::get(opCtx).lookupCollectionByNamespaceForMetadataWrite(opCtx,
- oplogNss);
+ auto oplogCollection =
+ CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogNss);
if (!oplogCollection) {
fassertFailedWithStatusNoTrace(
40495,
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index 6beb7f8fa56..5817d07bd2c 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -164,13 +164,16 @@ bool checkIdIndexExists(OperationContext* opCtx, RecordId catalogId) {
Status buildMissingIdIndex(OperationContext* opCtx, Collection* collection) {
LOGV2(4805002, "Building missing _id index", logAttrs(*collection));
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard(
- [&] { indexer.abortIndexBuild(opCtx, collection, MultiIndexBlock::kNoopOnCleanUpFn); });
+ auto abortOnExit = makeGuard([&] {
+ CollectionWriter collWriter(collection);
+ indexer.abortIndexBuild(opCtx, collWriter, MultiIndexBlock::kNoopOnCleanUpFn);
+ });
const auto indexCatalog = collection->getIndexCatalog();
const auto idIndexSpec = indexCatalog->getDefaultIdIndexSpec();
- auto swSpecs = indexer.init(opCtx, collection, idIndexSpec, MultiIndexBlock::kNoopOnInitFn);
+ CollectionWriter collWriter(collection);
+ auto swSpecs = indexer.init(opCtx, collWriter, idIndexSpec, MultiIndexBlock::kNoopOnInitFn);
if (!swSpecs.isOK()) {
return swSpecs.getStatus();
}
@@ -213,7 +216,7 @@ Status ensureCollectionProperties(OperationContext* opCtx,
Database* db,
EnsureIndexPolicy ensureIndexPolicy) {
for (auto collIt = db->begin(opCtx); collIt != db->end(opCtx); ++collIt) {
- auto coll = *collIt;
+ auto coll = collIt.getWritableCollection(opCtx, CollectionCatalog::LifetimeMode::kInplace);
if (!coll) {
break;
}