summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2020-09-14 09:16:11 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-09-15 18:42:42 +0000
commita13aa67e179d3400bb8a61a8a995f601d97336be (patch)
treefaf9babb1384c0584201f4cf6db865ffe5998798 /src
parent46a7656f3c4805dc6ffff32573886b00a4e63888 (diff)
downloadmongo-a13aa67e179d3400bb8a61a8a995f601d97336be.tar.gz
SERVER-50928 Remove IndexCatalog* stored in IndexBuildBlock
It can go stale when we perform copy-on-write on the Collection. Pass in current Collection where it is needed.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog/database_test.cpp2
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp68
-rw-r--r--src/mongo/db/catalog/index_build_block.h11
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp15
-rw-r--r--src/mongo/db/catalog/index_builds_manager.h8
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h4
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.h6
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp4
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp76
-rw-r--r--src/mongo/db/catalog/multi_index_block.h15
-rw-r--r--src/mongo/db/catalog/multi_index_block_test.cpp10
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp2
-rw-r--r--src/mongo/db/index/index_build_interceptor.h2
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp36
-rw-r--r--src/mongo/db/index_builds_coordinator.h2
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp12
-rw-r--r--src/mongo/db/startup_recovery.cpp2
-rw-r--r--src/mongo/dbtests/dbtests.cpp2
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp6
-rw-r--r--src/mongo/dbtests/querytests.cpp2
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp8
-rw-r--r--src/mongo/dbtests/wildcard_multikey_persistence_test.cpp2
23 files changed, 158 insertions, 139 deletions
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index 0d871c358bb..45a688b5caa 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -309,7 +309,7 @@ void _testDropCollectionThrowsExceptionIfThereAreIndexesInProgress(OperationCont
<< "a_1");
auto indexBuildBlock = std::make_unique<IndexBuildBlock>(
- indexCatalog, collection->ns(), indexInfoObj, IndexBuildMethod::kHybrid, UUID::gen());
+ collection->ns(), indexInfoObj, IndexBuildMethod::kHybrid, UUID::gen());
{
WriteUnitOfWork wuow(opCtx);
ASSERT_OK(indexBuildBlock->init(opCtx, collection));
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index 381d8d09ecb..20ea86bd9d2 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -52,17 +52,11 @@ namespace mongo {
class IndexCatalog;
-IndexBuildBlock::IndexBuildBlock(IndexCatalog* indexCatalog,
- const NamespaceString& nss,
+IndexBuildBlock::IndexBuildBlock(const NamespaceString& nss,
const BSONObj& spec,
IndexBuildMethod method,
boost::optional<UUID> indexBuildUUID)
- : _indexCatalog(indexCatalog),
- _nss(nss),
- _spec(spec.getOwned()),
- _method(method),
- _buildUUID(indexBuildUUID),
- _indexCatalogEntry(nullptr) {}
+ : _nss(nss), _spec(spec.getOwned()), _method(method), _buildUUID(indexBuildUUID) {}
void IndexBuildBlock::finalizeTemporaryTables(OperationContext* opCtx,
TemporaryRecordStore::FinalizationAction action) {
@@ -75,8 +69,9 @@ void IndexBuildBlock::_completeInit(OperationContext* opCtx, Collection* collect
// Register this index with the CollectionQueryInfo to regenerate the cache. This way, updates
// occurring while an index is being build in the background will be aware of whether or not
// they need to modify any indexes.
+ auto indexCatalogEntry = getEntry(opCtx, collection);
CollectionQueryInfo::get(collection)
- .addedIndex(opCtx, collection, _indexCatalogEntry->descriptor());
+ .addedIndex(opCtx, collection, indexCatalogEntry->descriptor());
}
Status IndexBuildBlock::initForResume(OperationContext* opCtx,
@@ -85,14 +80,14 @@ Status IndexBuildBlock::initForResume(OperationContext* opCtx,
IndexBuildPhaseEnum phase) {
_indexName = _spec.getStringField("name");
- auto descriptor =
- _indexCatalog->findIndexByName(opCtx, _indexName, true /* includeUnfinishedIndexes */);
+ auto descriptor = collection->getIndexCatalog()->findIndexByName(
+ opCtx, _indexName, true /* includeUnfinishedIndexes */);
- _indexCatalogEntry = descriptor->getEntry();
+ auto indexCatalogEntry = descriptor->getEntry();
uassert(4945000,
"Index catalog entry not found while attempting to resume index build",
- _indexCatalogEntry);
+ indexCatalogEntry);
uassert(
4945001, "Cannot resume a non-hybrid index build", _method == IndexBuildMethod::kHybrid);
@@ -103,19 +98,19 @@ Status IndexBuildBlock::initForResume(OperationContext* opCtx,
opCtx,
collection->getCatalogId(),
descriptor,
- _indexCatalogEntry->getIdent(),
- _indexCatalogEntry->getPrefix());
+ indexCatalogEntry->getIdent(),
+ indexCatalogEntry->getPrefix());
if (!status.isOK())
return status;
}
_indexBuildInterceptor =
std::make_unique<IndexBuildInterceptor>(opCtx,
- _indexCatalogEntry,
+ indexCatalogEntry,
sorterInfo.getSideWritesTable(),
sorterInfo.getDuplicateKeyTrackerTable(),
sorterInfo.getSkippedRecordTrackerTable());
- _indexCatalogEntry->setIndexBuildInterceptor(_indexBuildInterceptor.get());
+ indexCatalogEntry->setIndexBuildInterceptor(_indexBuildInterceptor.get());
_completeInit(opCtx, collection);
@@ -150,17 +145,17 @@ Status IndexBuildBlock::init(OperationContext* opCtx, Collection* collection) {
if (!status.isOK())
return status;
- _indexCatalogEntry =
- _indexCatalog->createIndexEntry(opCtx, std::move(descriptor), CreateIndexEntryFlags::kNone);
+ auto indexCatalogEntry = collection->getIndexCatalog()->createIndexEntry(
+ opCtx, std::move(descriptor), CreateIndexEntryFlags::kNone);
if (_method == IndexBuildMethod::kHybrid) {
- _indexBuildInterceptor = std::make_unique<IndexBuildInterceptor>(opCtx, _indexCatalogEntry);
- _indexCatalogEntry->setIndexBuildInterceptor(_indexBuildInterceptor.get());
+ _indexBuildInterceptor = std::make_unique<IndexBuildInterceptor>(opCtx, indexCatalogEntry);
+ indexCatalogEntry->setIndexBuildInterceptor(_indexBuildInterceptor.get());
}
if (isBackgroundIndex) {
opCtx->recoveryUnit()->onCommit(
- [entry = _indexCatalogEntry, coll = collection](boost::optional<Timestamp> commitTime) {
+ [entry = indexCatalogEntry, coll = collection](boost::optional<Timestamp> commitTime) {
// This will prevent the unfinished index from being visible on index iterators.
if (commitTime) {
entry->setMinimumVisibleSnapshot(commitTime.get());
@@ -184,13 +179,14 @@ void IndexBuildBlock::fail(OperationContext* opCtx, Collection* collection) {
invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_X));
- if (_indexCatalogEntry) {
- invariant(_indexCatalog->dropIndexEntry(opCtx, _indexCatalogEntry).isOK());
+ auto indexCatalogEntry = getEntry(opCtx, collection);
+ if (indexCatalogEntry) {
+ invariant(collection->getIndexCatalog()->dropIndexEntry(opCtx, indexCatalogEntry).isOK());
if (_indexBuildInterceptor) {
- _indexCatalogEntry->setIndexBuildInterceptor(nullptr);
+ indexCatalogEntry->setIndexBuildInterceptor(nullptr);
}
} else {
- _indexCatalog->deleteIndexFromDisk(opCtx, _indexName);
+ collection->getIndexCatalog()->deleteIndexFromDisk(opCtx, _indexName);
}
}
@@ -213,14 +209,15 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) {
invariant(_indexBuildInterceptor->areAllWritesApplied(opCtx));
}
- collection->indexBuildSuccess(opCtx, _indexCatalogEntry);
+ auto indexCatalogEntry = getEntry(opCtx, collection);
+ collection->indexBuildSuccess(opCtx, indexCatalogEntry);
auto svcCtx = opCtx->getClient()->getServiceContext();
opCtx->recoveryUnit()->onCommit(
[svcCtx,
indexName = _indexName,
spec = _spec,
- entry = _indexCatalogEntry,
+ entry = indexCatalogEntry,
coll = collection,
buildUUID = _buildUUID](boost::optional<Timestamp> commitTime) {
// Note: this runs after the WUOW commits but before we release our X lock on the
@@ -256,4 +253,19 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) {
});
}
+const IndexCatalogEntry* IndexBuildBlock::getEntry(OperationContext* opCtx,
+ const Collection* collection) const {
+ auto descriptor = collection->getIndexCatalog()->findIndexByName(
+ opCtx, _indexName, true /* includeUnfinishedIndexes */);
+
+ return descriptor->getEntry();
+}
+
+IndexCatalogEntry* IndexBuildBlock::getEntry(OperationContext* opCtx, Collection* collection) {
+ auto descriptor = collection->getIndexCatalog()->findIndexByName(
+ opCtx, _indexName, true /* includeUnfinishedIndexes */);
+
+ return descriptor->getEntry();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_build_block.h b/src/mongo/db/catalog/index_build_block.h
index 22b65a4a541..3e539599be7 100644
--- a/src/mongo/db/catalog/index_build_block.h
+++ b/src/mongo/db/catalog/index_build_block.h
@@ -41,8 +41,7 @@ class IndexBuildBlock {
IndexBuildBlock& operator=(const IndexBuildBlock&) = delete;
public:
- IndexBuildBlock(IndexCatalog* indexCatalog,
- const NamespaceString& nss,
+ IndexBuildBlock(const NamespaceString& nss,
const BSONObj& spec,
IndexBuildMethod method,
// The index build UUID is only required for persisting to the catalog.
@@ -98,9 +97,8 @@ public:
*
* This entry is owned by the IndexCatalog.
*/
- IndexCatalogEntry* getEntry() {
- return _indexCatalogEntry;
- }
+ const IndexCatalogEntry* getEntry(OperationContext* opCtx, const Collection* collection) const;
+ IndexCatalogEntry* getEntry(OperationContext* opCtx, Collection* collection);
/**
* Returns the name of the index managed by this index builder.
@@ -119,7 +117,6 @@ public:
private:
void _completeInit(OperationContext* opCtx, Collection* collection);
- IndexCatalog* const _indexCatalog;
const NamespaceString _nss;
BSONObj _spec;
@@ -129,8 +126,6 @@ private:
std::string _indexName;
std::string _indexNamespace;
- IndexCatalogEntry* _indexCatalogEntry;
-
std::unique_ptr<IndexBuildInterceptor> _indexBuildInterceptor;
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index b6d8c11c8fc..273be3df5a6 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -130,8 +130,9 @@ Status IndexBuildsManager::startBuildingIndex(OperationContext* opCtx,
}
Status IndexBuildsManager::resumeBuildingIndexFromBulkLoadPhase(OperationContext* opCtx,
+ const Collection* collection,
const UUID& buildUUID) {
- return invariant(_getBuilder(buildUUID))->dumpInsertsFromBulk(opCtx);
+ return invariant(_getBuilder(buildUUID))->dumpInsertsFromBulk(opCtx, collection);
}
StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingIndexForRecovery(
@@ -230,7 +231,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingInd
// Delete duplicate record and insert it into local lost and found.
Status status = [&] {
if (repair == RepairData::kYes) {
- return builder->dumpInsertsFromBulk(opCtx, [&](const RecordId& rid) {
+ return builder->dumpInsertsFromBulk(opCtx, coll, [&](const RecordId& rid) {
auto moveStatus = _moveRecordToLostAndFound(opCtx, ns, lostAndFoundNss, rid);
if (moveStatus.isOK() && (moveStatus.getValue() > 0)) {
recordsRemoved++;
@@ -239,7 +240,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingInd
return moveStatus.getStatus();
});
} else {
- return builder->dumpInsertsFromBulk(opCtx);
+ return builder->dumpInsertsFromBulk(opCtx, coll);
}
}();
if (!status.isOK()) {
@@ -283,10 +284,11 @@ Status IndexBuildsManager::retrySkippedRecords(OperationContext* opCtx,
}
Status IndexBuildsManager::checkIndexConstraintViolations(OperationContext* opCtx,
+ const Collection* collection,
const UUID& buildUUID) {
auto builder = invariant(_getBuilder(buildUUID));
- return builder->checkConstraints(opCtx);
+ return builder->checkConstraints(opCtx, collection);
}
Status IndexBuildsManager::commitIndexBuild(OperationContext* opCtx,
@@ -345,7 +347,8 @@ bool IndexBuildsManager::abortIndexBuildWithoutCleanupForRollback(OperationConte
"Index build aborted without cleanup for rollback",
"buildUUID"_attr = buildUUID);
- if (auto resumeInfo = builder.getValue()->abortWithoutCleanupForRollback(opCtx, isResumable)) {
+ if (auto resumeInfo =
+ builder.getValue()->abortWithoutCleanupForRollback(opCtx, collection, isResumable)) {
_resumeInfos.push_back(std::move(*resumeInfo));
}
@@ -364,7 +367,7 @@ bool IndexBuildsManager::abortIndexBuildWithoutCleanupForShutdown(OperationConte
LOGV2(
4841500, "Index build aborted without cleanup for shutdown", "buildUUID"_attr = buildUUID);
- builder.getValue()->abortWithoutCleanupForShutdown(opCtx, isResumable);
+ builder.getValue()->abortWithoutCleanupForShutdown(opCtx, collection, isResumable);
return true;
}
diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h
index 7ce8b7dd63b..5928e088afb 100644
--- a/src/mongo/db/catalog/index_builds_manager.h
+++ b/src/mongo/db/catalog/index_builds_manager.h
@@ -101,7 +101,9 @@ public:
const UUID& buildUUID,
boost::optional<RecordId> resumeAfterRecordId = boost::none);
- Status resumeBuildingIndexFromBulkLoadPhase(OperationContext* opCtx, const UUID& buildUUID);
+ Status resumeBuildingIndexFromBulkLoadPhase(OperationContext* opCtx,
+ const Collection* collection,
+ const UUID& buildUUID);
/**
* Iterates through every record in the collection to index it. May also remove documents
@@ -132,7 +134,9 @@ public:
/**
* Runs the index constraint violation checking phase of the index build..
*/
- Status checkIndexConstraintViolations(OperationContext* opCtx, const UUID& buildUUID);
+ Status checkIndexConstraintViolations(OperationContext* opCtx,
+ const Collection* collection,
+ const UUID& buildUUID);
/**
* Persists information in the index catalog entry that the index is ready for use, as well as
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index a3a24f68abf..f0078b662af 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -76,9 +76,7 @@ public:
virtual bool isHybridBuilding() const = 0;
- virtual IndexBuildInterceptor* indexBuildInterceptor() = 0;
-
- virtual const IndexBuildInterceptor* indexBuildInterceptor() const = 0;
+ virtual IndexBuildInterceptor* indexBuildInterceptor() const = 0;
virtual void setIndexBuildInterceptor(IndexBuildInterceptor* interceptor) = 0;
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.h b/src/mongo/db/catalog/index_catalog_entry_impl.h
index 869b90c9c44..3fbe37fba10 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.h
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.h
@@ -89,11 +89,7 @@ public:
return _indexBuildInterceptor != nullptr;
}
- IndexBuildInterceptor* indexBuildInterceptor() final {
- return _indexBuildInterceptor;
- }
-
- const IndexBuildInterceptor* indexBuildInterceptor() const final {
+ IndexBuildInterceptor* indexBuildInterceptor() const final {
return _indexBuildInterceptor;
}
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 91268e56427..224b1defffc 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -490,13 +490,13 @@ StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationCont
// now going to touch disk
boost::optional<UUID> buildUUID = boost::none;
IndexBuildBlock indexBuildBlock(
- this, _collection->ns(), spec, IndexBuildMethod::kForeground, buildUUID);
+ _collection->ns(), spec, IndexBuildMethod::kForeground, buildUUID);
status = indexBuildBlock.init(opCtx, _collection);
if (!status.isOK())
return status;
// sanity checks, etc...
- IndexCatalogEntry* entry = indexBuildBlock.getEntry();
+ IndexCatalogEntry* entry = indexBuildBlock.getEntry(opCtx, _collection);
invariant(entry);
IndexDescriptor* descriptor = entry->descriptor();
invariant(descriptor);
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index ad75878d59f..b9c84e4a1d1 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -276,12 +276,8 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
boost::optional<IndexSorterInfo> sorterInfo;
IndexToBuild index;
- index.block = std::make_unique<IndexBuildBlock>(
- collection.getWritableCollection()->getIndexCatalog(),
- collection->ns(),
- info,
- _method,
- _buildUUID);
+ index.block =
+ std::make_unique<IndexBuildBlock>(collection->ns(), info, _method, _buildUUID);
if (resumeInfo) {
auto resumeInfoIndexes = resumeInfo->getIndexes();
// Find the resume information that corresponds to this spec.
@@ -312,7 +308,9 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
opCtx, TemporaryRecordStore::FinalizationAction::kDelete);
});
- index.real = index.block->getEntry()->accessMethod();
+ auto indexCatalogEntry =
+ index.block->getEntry(opCtx, collection.getWritableCollection());
+ index.real = indexCatalogEntry->accessMethod();
status = index.real->initializeAsEmpty(opCtx);
if (!status.isOK())
return status;
@@ -325,7 +323,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
? sorterInfo
: boost::none);
- const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
+ const IndexDescriptor* descriptor = indexCatalogEntry->descriptor();
collection->getIndexCatalog()->prepareInsertDeleteOptions(
opCtx, collection->ns(), descriptor, &index.options);
@@ -346,7 +344,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
"maxTemporaryMemoryUsageMB"_attr =
eachIndexBuildMaxMemoryUsageBytes / 1024 / 1024);
- index.filterExpression = index.block->getEntry()->getFilterExpression();
+ index.filterExpression = indexCatalogEntry->getFilterExpression();
if (!resumeInfo) {
// TODO SERVER-14888 Suppress this in cases we don't want to audit.
@@ -571,7 +569,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(
RecoveryUnit::toString(opCtx->recoveryUnit()->getTimestampReadSource()),
"duration"_attr = duration_cast<Milliseconds>(Seconds(t.seconds())));
- Status ret = dumpInsertsFromBulk(opCtx);
+ Status ret = dumpInsertsFromBulk(opCtx, collection);
if (!ret.isOK())
return ret;
@@ -606,12 +604,14 @@ Status MultiIndexBlock::insertSingleDocumentForInitialSyncOrRecovery(OperationCo
return Status::OK();
}
-Status MultiIndexBlock::dumpInsertsFromBulk(OperationContext* opCtx) {
- return dumpInsertsFromBulk(opCtx, nullptr);
+Status MultiIndexBlock::dumpInsertsFromBulk(OperationContext* opCtx, const Collection* collection) {
+ return dumpInsertsFromBulk(opCtx, collection, nullptr);
}
Status MultiIndexBlock::dumpInsertsFromBulk(
- OperationContext* opCtx, const IndexAccessMethod::RecordIdHandlerFn& onDuplicateRecord) {
+ OperationContext* opCtx,
+ const Collection* collection,
+ const IndexAccessMethod::RecordIdHandlerFn& onDuplicateRecord) {
invariant(!_buildIsCleanedUp);
invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork());
@@ -631,9 +631,9 @@ Status MultiIndexBlock::dumpInsertsFromBulk(
// When onDuplicateRecord is passed, 'dupsAllowed' should be passed to reflect whether or
// not the index is unique.
bool dupsAllowed = (onDuplicateRecord)
- ? !_indexes[i].block->getEntry()->descriptor()->unique()
+ ? !_indexes[i].block->getEntry(opCtx, collection)->descriptor()->unique()
: _indexes[i].options.dupsAllowed;
- IndexCatalogEntry* entry = _indexes[i].block->getEntry();
+ const IndexCatalogEntry* entry = _indexes[i].block->getEntry(opCtx, collection);
LOGV2_DEBUG(20392,
1,
"Index build: inserting from external sorter into index",
@@ -701,7 +701,7 @@ Status MultiIndexBlock::drainBackgroundWrites(
// Callers are responsible for stopping writes by holding an S or X lock while draining before
// completing the index build.
for (size_t i = 0; i < _indexes.size(); i++) {
- auto interceptor = _indexes[i].block->getEntry()->indexBuildInterceptor();
+ auto interceptor = _indexes[i].block->getEntry(opCtx, coll)->indexBuildInterceptor();
if (!interceptor)
continue;
@@ -721,7 +721,7 @@ Status MultiIndexBlock::drainBackgroundWrites(
Status MultiIndexBlock::retrySkippedRecords(OperationContext* opCtx, const Collection* collection) {
invariant(!_buildIsCleanedUp);
for (auto&& index : _indexes) {
- auto interceptor = index.block->getEntry()->indexBuildInterceptor();
+ auto interceptor = index.block->getEntry(opCtx, collection)->indexBuildInterceptor();
if (!interceptor)
continue;
@@ -733,14 +733,14 @@ Status MultiIndexBlock::retrySkippedRecords(OperationContext* opCtx, const Colle
return Status::OK();
}
-Status MultiIndexBlock::checkConstraints(OperationContext* opCtx) {
+Status MultiIndexBlock::checkConstraints(OperationContext* opCtx, const Collection* collection) {
invariant(!_buildIsCleanedUp);
// For each index that may be unique, check that no recorded duplicates still exist. This can
// only check what is visible on the index. Callers are responsible for ensuring all writes to
// the collection are visible.
for (size_t i = 0; i < _indexes.size(); i++) {
- auto interceptor = _indexes[i].block->getEntry()->indexBuildInterceptor();
+ auto interceptor = _indexes[i].block->getEntry(opCtx, collection)->indexBuildInterceptor();
if (!interceptor)
continue;
@@ -753,12 +753,14 @@ Status MultiIndexBlock::checkConstraints(OperationContext* opCtx) {
}
boost::optional<ResumeIndexInfo> MultiIndexBlock::abortWithoutCleanupForRollback(
- OperationContext* opCtx, bool isResumable) {
- return _abortWithoutCleanup(opCtx, false /* shutdown */, isResumable);
+ OperationContext* opCtx, const Collection* collection, bool isResumable) {
+ return _abortWithoutCleanup(opCtx, collection, false /* shutdown */, isResumable);
}
-void MultiIndexBlock::abortWithoutCleanupForShutdown(OperationContext* opCtx, bool isResumable) {
- _abortWithoutCleanup(opCtx, true /* shutdown */, isResumable);
+void MultiIndexBlock::abortWithoutCleanupForShutdown(OperationContext* opCtx,
+ const Collection* collection,
+ bool isResumable) {
+ _abortWithoutCleanup(opCtx, collection, true /* shutdown */, isResumable);
}
MultiIndexBlock::OnCreateEachFn MultiIndexBlock::kNoopOnCreateEachFn = [](const BSONObj& spec) {};
@@ -793,12 +795,12 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
// catalog entry. The interceptor will write multikey metadata keys into the index during
// IndexBuildInterceptor::sideWrite, so we only need to pass the cached MultikeyPaths into
// IndexCatalogEntry::setMultikey here.
- auto interceptor = _indexes[i].block->getEntry()->indexBuildInterceptor();
+ auto indexCatalogEntry = _indexes[i].block->getEntry(opCtx, collection);
+ auto interceptor = indexCatalogEntry->indexBuildInterceptor();
if (interceptor) {
auto multikeyPaths = interceptor->getMultikeyPaths();
if (multikeyPaths) {
- _indexes[i].block->getEntry()->setMultikey(
- opCtx, collection, {}, multikeyPaths.get());
+ indexCatalogEntry->setMultikey(opCtx, collection, {}, multikeyPaths.get());
}
}
@@ -809,8 +811,7 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
// MultikeyPaths into IndexCatalogEntry::setMultikey here.
const auto& bulkBuilder = _indexes[i].bulk;
if (bulkBuilder->isMultikey()) {
- _indexes[i].block->getEntry()->setMultikey(
- opCtx, collection, {}, bulkBuilder->getMultikeyPaths());
+ indexCatalogEntry->setMultikey(opCtx, collection, {}, bulkBuilder->getMultikeyPaths());
}
// The commit() function can be called multiple times on write conflict errors. Dropping the
@@ -840,6 +841,7 @@ void MultiIndexBlock::setIndexBuildMethod(IndexBuildMethod indexBuildMethod) {
}
boost::optional<ResumeIndexInfo> MultiIndexBlock::_abortWithoutCleanup(OperationContext* opCtx,
+ const Collection* collection,
bool shutdown,
bool isResumable) {
invariant(!_buildIsCleanedUp);
@@ -859,14 +861,15 @@ boost::optional<ResumeIndexInfo> MultiIndexBlock::_abortWithoutCleanup(Operation
invariant(_method == IndexBuildMethod::kHybrid);
if (shutdown) {
- _writeStateToDisk(opCtx);
+ _writeStateToDisk(opCtx, collection);
// TODO (SERVER-48419): Keep the temporary tables unconditionally of shutdown once
// rollback uses the resume information.
action = TemporaryRecordStore::FinalizationAction::kKeep;
} else {
- resumeInfo = ResumeIndexInfo::parse(
- IDLParserErrorContext("MultiIndexBlock::getResumeInfo"), _constructStateObject());
+ resumeInfo =
+ ResumeIndexInfo::parse(IDLParserErrorContext("MultiIndexBlock::getResumeInfo"),
+ _constructStateObject(opCtx, collection));
}
}
@@ -879,8 +882,9 @@ boost::optional<ResumeIndexInfo> MultiIndexBlock::_abortWithoutCleanup(Operation
return resumeInfo;
}
-void MultiIndexBlock::_writeStateToDisk(OperationContext* opCtx) const {
- auto obj = _constructStateObject();
+void MultiIndexBlock::_writeStateToDisk(OperationContext* opCtx,
+ const Collection* collection) const {
+ auto obj = _constructStateObject(opCtx, collection);
auto rs = opCtx->getServiceContext()
->getStorageEngine()
->makeTemporaryRecordStoreForResumableIndexBuild(opCtx);
@@ -908,7 +912,8 @@ void MultiIndexBlock::_writeStateToDisk(OperationContext* opCtx) const {
rs->finalizeTemporaryTable(opCtx, TemporaryRecordStore::FinalizationAction::kKeep);
}
-BSONObj MultiIndexBlock::_constructStateObject() const {
+BSONObj MultiIndexBlock::_constructStateObject(OperationContext* opCtx,
+ const Collection* collection) const {
BSONObjBuilder builder;
_buildUUID->appendToBuilder(&builder, "_id");
builder.append("phase", IndexBuildPhase_serializer(_phase));
@@ -949,7 +954,8 @@ BSONObj MultiIndexBlock::_constructStateObject() const {
ranges.done();
}
- auto indexBuildInterceptor = index.block->getEntry()->indexBuildInterceptor();
+ auto indexBuildInterceptor =
+ index.block->getEntry(opCtx, collection)->indexBuildInterceptor();
indexInfo.append("sideWritesTable", indexBuildInterceptor->getSideWritesTableIdent());
if (auto duplicateKeyTrackerTableIdent =
diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h
index 7aa68a65776..938f05156f4 100644
--- a/src/mongo/db/catalog/multi_index_block.h
+++ b/src/mongo/db/catalog/multi_index_block.h
@@ -177,8 +177,9 @@ public:
*
* Should not be called inside of a WriteUnitOfWork.
*/
- Status dumpInsertsFromBulk(OperationContext* opCtx);
+ Status dumpInsertsFromBulk(OperationContext* opCtx, const Collection* collection);
Status dumpInsertsFromBulk(OperationContext* opCtx,
+ const Collection* collection,
const IndexAccessMethod::RecordIdHandlerFn& onDuplicateRecord);
/**
* For background indexes using an IndexBuildInterceptor to capture inserts during a build,
@@ -216,7 +217,7 @@ public:
*
* Must not be in a WriteUnitOfWork.
*/
- Status checkConstraints(OperationContext* opCtx);
+ Status checkConstraints(OperationContext* opCtx, const Collection* collection);
/**
* Marks the index ready for use. Should only be called as the last method after
@@ -278,6 +279,7 @@ public:
* This should only be used during rollback.
*/
boost::optional<ResumeIndexInfo> abortWithoutCleanupForRollback(OperationContext* opCtx,
+ const Collection* collection,
bool isResumable);
/**
@@ -289,7 +291,9 @@ public:
*
* This should only be used during shutdown.
*/
- void abortWithoutCleanupForShutdown(OperationContext* opCtx, bool isResumable);
+ void abortWithoutCleanupForShutdown(OperationContext* opCtx,
+ const Collection* collection,
+ bool isResumable);
/**
* Returns true if this build block supports background writes while building an index. This is
@@ -317,12 +321,13 @@ private:
* supported.
*/
boost::optional<ResumeIndexInfo> _abortWithoutCleanup(OperationContext* opCtx,
+ const Collection* collection,
bool shutdown,
bool isResumable);
- void _writeStateToDisk(OperationContext* opCtx) const;
+ void _writeStateToDisk(OperationContext* opCtx, const Collection* collection) const;
- BSONObj _constructStateObject() const;
+ BSONObj _constructStateObject(OperationContext* opCtx, const Collection* collection) const;
// Is set during init() and ensures subsequent function calls act on the same Collection.
diff --git a/src/mongo/db/catalog/multi_index_block_test.cpp b/src/mongo/db/catalog/multi_index_block_test.cpp
index 5b258e4174e..a62ae34a574 100644
--- a/src/mongo/db/catalog/multi_index_block_test.cpp
+++ b/src/mongo/db/catalog/multi_index_block_test.cpp
@@ -97,8 +97,8 @@ TEST_F(MultiIndexBlockTest, CommitWithoutInsertingDocuments) {
operationContext(), coll, std::vector<BSONObj>(), MultiIndexBlock::kNoopOnInitFn));
ASSERT_EQUALS(0U, specs.size());
- ASSERT_OK(indexer->dumpInsertsFromBulk(operationContext()));
- ASSERT_OK(indexer->checkConstraints(operationContext()));
+ ASSERT_OK(indexer->dumpInsertsFromBulk(operationContext(), coll.get()));
+ ASSERT_OK(indexer->checkConstraints(operationContext(), coll.get()));
{
WriteUnitOfWork wunit(operationContext());
@@ -121,8 +121,8 @@ TEST_F(MultiIndexBlockTest, CommitAfterInsertingSingleDocument) {
ASSERT_EQUALS(0U, specs.size());
ASSERT_OK(indexer->insertSingleDocumentForInitialSyncOrRecovery(operationContext(), {}, {}));
- ASSERT_OK(indexer->dumpInsertsFromBulk(operationContext()));
- ASSERT_OK(indexer->checkConstraints(operationContext()));
+ ASSERT_OK(indexer->dumpInsertsFromBulk(operationContext(), coll.get()));
+ ASSERT_OK(indexer->checkConstraints(operationContext(), coll.get()));
{
WriteUnitOfWork wunit(operationContext());
@@ -148,7 +148,7 @@ TEST_F(MultiIndexBlockTest, AbortWithoutCleanupAfterInsertingSingleDocument) {
ASSERT_EQUALS(0U, specs.size());
ASSERT_OK(indexer->insertSingleDocumentForInitialSyncOrRecovery(operationContext(), {}, {}));
auto isResumable = false;
- indexer->abortWithoutCleanupForRollback(operationContext(), isResumable);
+ indexer->abortWithoutCleanupForRollback(operationContext(), coll.get(), isResumable);
}
TEST_F(MultiIndexBlockTest, InitWriteConflictException) {
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 04f31e0d99f..e8bd2670229 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -241,7 +241,7 @@ public:
// writeConflictRetry loop.
uassertStatusOK(indexer->insertAllDocumentsInCollection(opCtx, collection.get()));
- uassertStatusOK(indexer->checkConstraints(opCtx));
+ uassertStatusOK(indexer->checkConstraints(opCtx, collection.get()));
writeConflictRetry(opCtx, "commitReIndex", toReIndexNss.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index c0760c93e21..250270b03a9 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -152,7 +152,7 @@ void IndexBuildInterceptor::finalizeTemporaryTables(
}
Status IndexBuildInterceptor::recordDuplicateKey(OperationContext* opCtx,
- const KeyString::Value& key) {
+ const KeyString::Value& key) const {
invariant(_indexCatalogEntry->descriptor()->unique());
return _duplicateKeyTracker->recordKey(opCtx, key);
}
diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h
index a9215367feb..c981180db6b 100644
--- a/src/mongo/db/index/index_build_interceptor.h
+++ b/src/mongo/db/index/index_build_interceptor.h
@@ -113,7 +113,7 @@ public:
* Given a duplicate key, record the key for later verification by a call to
* checkDuplicateKeyConstraints();
*/
- Status recordDuplicateKey(OperationContext* opCtx, const KeyString::Value& key);
+ Status recordDuplicateKey(OperationContext* opCtx, const KeyString::Value& key) const;
/**
* Returns Status::OK if all previously recorded duplicate key constraint violations have been
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 8bee222b62d..80db3b04cd7 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -1799,7 +1799,8 @@ void IndexBuildsCoordinator::createIndex(OperationContext* opCtx,
uassertStatusOK(
_indexBuildsManager.retrySkippedRecords(opCtx, buildUUID, collection.get()));
}
- uassertStatusOK(_indexBuildsManager.checkIndexConstraintViolations(opCtx, buildUUID));
+ uassertStatusOK(
+ _indexBuildsManager.checkIndexConstraintViolations(opCtx, collection.get(), buildUUID));
auto opObserver = opCtx->getServiceContext()->getOpObserver();
auto onCreateEachFn = [&](const BSONObj& spec) {
@@ -2597,12 +2598,7 @@ void IndexBuildsCoordinator::_scanCollectionAndInsertSortedKeysIntoIndex(
const NamespaceStringOrUUID dbAndUUID(replState->dbName, replState->collectionUUID);
Lock::CollectionLock collLock(opCtx, dbAndUUID, MODE_IX);
- _setUpForScanCollectionAndInsertSortedKeysIntoIndex(opCtx, replState);
-
- // The collection object should always exist while an index build is registered.
- auto collection =
- CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, replState->collectionUUID);
- invariant(collection);
+ auto collection = _setUpForScanCollectionAndInsertSortedKeysIntoIndex(opCtx, replState);
uassertStatusOK(_indexBuildsManager.startBuildingIndex(
opCtx, collection, replState->buildUUID, resumeAfterRecordId));
@@ -2621,10 +2617,9 @@ void IndexBuildsCoordinator::_insertSortedKeysIntoIndexForResume(
const NamespaceStringOrUUID dbAndUUID(replState->dbName, replState->collectionUUID);
Lock::CollectionLock collLock(opCtx, dbAndUUID, MODE_IX);
- _setUpForScanCollectionAndInsertSortedKeysIntoIndex(opCtx, replState);
-
- uassertStatusOK(
- _indexBuildsManager.resumeBuildingIndexFromBulkLoadPhase(opCtx, replState->buildUUID));
+ auto collection = _setUpForScanCollectionAndInsertSortedKeysIntoIndex(opCtx, replState);
+ uassertStatusOK(_indexBuildsManager.resumeBuildingIndexFromBulkLoadPhase(
+ opCtx, collection, replState->buildUUID));
}
if (MONGO_unlikely(hangAfterIndexBuildDumpsInsertsFromBulk.shouldFail())) {
@@ -2633,22 +2628,25 @@ void IndexBuildsCoordinator::_insertSortedKeysIntoIndexForResume(
}
}
-void IndexBuildsCoordinator::_setUpForScanCollectionAndInsertSortedKeysIntoIndex(
+const Collection* IndexBuildsCoordinator::_setUpForScanCollectionAndInsertSortedKeysIntoIndex(
OperationContext* opCtx, std::shared_ptr<ReplIndexBuildState> replState) {
// Rebuilding system indexes during startup using the IndexBuildsCoordinator is done by all
// storage engines if they're missing.
invariant(_indexBuildsManager.isBackgroundBuilding(replState->buildUUID));
- auto nss = CollectionCatalog::get(opCtx).lookupNSSByUUID(opCtx, replState->collectionUUID);
- invariant(nss);
+ auto collection =
+ CollectionCatalog::get(opCtx).lookupCollectionByUUID(opCtx, replState->collectionUUID);
+ invariant(collection);
// Set up the thread's currentOp information to display createIndexes cmd information.
- updateCurOpOpDescription(opCtx, *nss, replState->indexSpecs);
+ updateCurOpOpDescription(opCtx, collection->ns(), replState->indexSpecs);
// Index builds can safely ignore prepare conflicts and perform writes. On secondaries,
// prepare operations wait for index builds to complete.
opCtx->recoveryUnit()->setPrepareConflictBehavior(
PrepareConflictBehavior::kIgnoreConflictsAllowWrites);
+
+ return collection;
}
/*
@@ -2817,8 +2815,8 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
!replCoord->getMemberState().startup2();
if (IndexBuildProtocol::kSinglePhase == replState->protocol ||
twoPhaseAndNotInitialSyncing) {
- uassertStatusOK(
- _indexBuildsManager.checkIndexConstraintViolations(opCtx, replState->buildUUID));
+ uassertStatusOK(_indexBuildsManager.checkIndexConstraintViolations(
+ opCtx, collection.get(), replState->buildUUID));
}
} catch (const ExceptionForCat<ErrorCategory::ShutdownError>& e) {
logFailure(e.toStatus(), collection->ns(), replState);
@@ -2928,8 +2926,8 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
RecoveryUnit::ReadSource::kNoTimestamp,
IndexBuildInterceptor::DrainYieldPolicy::kNoYield));
- uassertStatusOK(
- _indexBuildsManager.checkIndexConstraintViolations(opCtx, replState->buildUUID));
+ uassertStatusOK(_indexBuildsManager.checkIndexConstraintViolations(
+ opCtx, collection.get(), replState->buildUUID));
// Commit the index build.
uassertStatusOK(_indexBuildsManager.commitIndexBuild(opCtx,
diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h
index a6bd7847534..49388122054 100644
--- a/src/mongo/db/index_builds_coordinator.h
+++ b/src/mongo/db/index_builds_coordinator.h
@@ -661,7 +661,7 @@ protected:
*/
void _insertSortedKeysIntoIndexForResume(OperationContext* opCtx,
std::shared_ptr<ReplIndexBuildState> replState);
- void _setUpForScanCollectionAndInsertSortedKeysIntoIndex(
+ const Collection* _setUpForScanCollectionAndInsertSortedKeysIntoIndex(
OperationContext* opCtx, std::shared_ptr<ReplIndexBuildState> replState);
/**
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index 13dd6dae785..43a834a86c8 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -222,14 +222,16 @@ Status CollectionBulkLoaderImpl::commit() {
// Commit before deleting dups, so the dups will be removed from secondary indexes when
// deleted.
if (_secondaryIndexesBlock) {
- auto status = _secondaryIndexesBlock->dumpInsertsFromBulk(_opCtx.get());
+ auto status = _secondaryIndexesBlock->dumpInsertsFromBulk(_opCtx.get(),
+ _collection->getCollection());
if (!status.isOK()) {
return status;
}
// This should always return Status::OK() as secondary index builds ignore duplicate key
// constraints causing them to not be recorded.
- invariant(_secondaryIndexesBlock->checkConstraints(_opCtx.get()));
+ invariant(_secondaryIndexesBlock->checkConstraints(_opCtx.get(),
+ _collection->getCollection()));
status = writeConflictRetry(
_opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [this] {
@@ -252,8 +254,8 @@ Status CollectionBulkLoaderImpl::commit() {
if (_idIndexBlock) {
// Do not do inside a WriteUnitOfWork (required by dumpInsertsFromBulk).
- auto status =
- _idIndexBlock->dumpInsertsFromBulk(_opCtx.get(), [&](const RecordId& rid) {
+ auto status = _idIndexBlock->dumpInsertsFromBulk(
+ _opCtx.get(), _collection->getCollection(), [&](const RecordId& rid) {
return writeConflictRetry(
_opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [this, &rid] {
WriteUnitOfWork wunit(_opCtx.get());
@@ -287,7 +289,7 @@ Status CollectionBulkLoaderImpl::commit() {
return status;
}
- status = _idIndexBlock->checkConstraints(_opCtx.get());
+ status = _idIndexBlock->checkConstraints(_opCtx.get(), _collection->getCollection());
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index 5817d07bd2c..7c96c3919f3 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -183,7 +183,7 @@ Status buildMissingIdIndex(OperationContext* opCtx, Collection* collection) {
return status;
}
- status = indexer.checkConstraints(opCtx);
+ status = indexer.checkConstraints(opCtx, collection);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
index c6568bfc4bd..0c84bdac851 100644
--- a/src/mongo/dbtests/dbtests.cpp
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -142,7 +142,7 @@ Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj
if (!status.isOK()) {
return status;
}
- status = indexer.checkConstraints(opCtx);
+ status = indexer.checkConstraints(opCtx, coll);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 007b491d6b1..83ba1ca2541 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -165,7 +165,7 @@ public:
ASSERT_OK(indexer.init(_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus());
ASSERT_OK(indexer.insertAllDocumentsInCollection(_opCtx, coll.get()));
- ASSERT_OK(indexer.checkConstraints(_opCtx));
+ ASSERT_OK(indexer.checkConstraints(_opCtx, coll.get()));
WriteUnitOfWork wunit(_opCtx);
ASSERT_OK(indexer.commit(_opCtx,
@@ -226,7 +226,7 @@ public:
// Hybrid index builds check duplicates explicitly.
ASSERT_OK(indexer.insertAllDocumentsInCollection(_opCtx, coll.get()));
- auto status = indexer.checkConstraints(_opCtx);
+ auto status = indexer.checkConstraints(_opCtx, coll.get());
ASSERT_EQUALS(status.code(), ErrorCodes::DuplicateKey);
}
};
@@ -343,7 +343,7 @@ Status IndexBuildBase::createIndex(const BSONObj& indexSpec) {
if (!status.isOK()) {
return status;
}
- status = indexer.checkConstraints(_opCtx);
+ status = indexer.checkConstraints(_opCtx, collection().get());
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index eff028a5e1e..04bc9867fa0 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -120,7 +120,7 @@ protected:
indexer.drainBackgroundWrites(&_opCtx,
RecoveryUnit::ReadSource::kNoTimestamp,
IndexBuildInterceptor::DrainYieldPolicy::kNoYield));
- uassertStatusOK(indexer.checkConstraints(&_opCtx));
+ uassertStatusOK(indexer.checkConstraints(&_opCtx, collection.get()));
{
WriteUnitOfWork wunit(&_opCtx);
uassertStatusOK(indexer.commit(&_opCtx,
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index ec63b5ed2b2..e7c007b5835 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -286,7 +286,7 @@ public:
}
ASSERT_OK(indexer.insertAllDocumentsInCollection(_opCtx, coll.get()));
- ASSERT_OK(indexer.checkConstraints(_opCtx));
+ ASSERT_OK(indexer.checkConstraints(_opCtx, coll.get()));
{
WriteUnitOfWork wuow(_opCtx);
@@ -2120,7 +2120,7 @@ public:
// Inserting all the documents has the side-effect of setting internal state on the index
// builder that the index is multikey.
ASSERT_OK(indexer.insertAllDocumentsInCollection(_opCtx, autoColl.getCollection()));
- ASSERT_OK(indexer.checkConstraints(_opCtx));
+ ASSERT_OK(indexer.checkConstraints(_opCtx, autoColl.getCollection()));
{
WriteUnitOfWork wuow(_opCtx);
@@ -2312,7 +2312,7 @@ public:
ASSERT_TRUE(buildingIndex->indexBuildInterceptor()->areAllWritesApplied(_opCtx));
}
- ASSERT_OK(indexer.checkConstraints(_opCtx));
+ ASSERT_OK(indexer.checkConstraints(_opCtx, autoColl.getCollection()));
{
WriteUnitOfWork wuow(_opCtx);
@@ -3051,7 +3051,7 @@ public:
buildingIndex->indexBuildInterceptor()->getSkippedRecordTracker()->areAllRecordsApplied(
_opCtx));
ASSERT_TRUE(buildingIndex->indexBuildInterceptor()->areAllWritesApplied(_opCtx));
- ASSERT_OK(indexer.checkConstraints(_opCtx));
+ ASSERT_OK(indexer.checkConstraints(_opCtx, collection.get()));
{
WriteUnitOfWork wuow(_opCtx);
diff --git a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp
index 774c3c63091..5fd52a6ebf8 100644
--- a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp
+++ b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp
@@ -211,7 +211,7 @@ protected:
ASSERT_OK(
indexer.init(opCtx(), coll, indexSpec, MultiIndexBlock::kNoopOnInitFn).getStatus());
ASSERT_OK(indexer.insertAllDocumentsInCollection(opCtx(), coll.get()));
- ASSERT_OK(indexer.checkConstraints(opCtx()));
+ ASSERT_OK(indexer.checkConstraints(opCtx(), coll.get()));
WriteUnitOfWork wunit(opCtx());
ASSERT_OK(indexer.commit(opCtx(),