summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2021-05-11 14:30:26 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-05-20 19:18:53 +0000
commit11de948b0c50df7d12de09ae0f01e791fc5d70d7 (patch)
tree5a5a89cce0dc94f21778725184f8da3d5f76a13b /src/mongo/db
parentb2802257c7cd2cf253847d67da5ddcc780a5b85f (diff)
downloadmongo-11de948b0c50df7d12de09ae0f01e791fc5d70d7.tar.gz
SERVER-56002 SERVER-56023 Store Collection metadata in the Collection and reply on the copy-on-write machinery to keep it in sync with the durable catalog.
All updates to the metadata needs to happen through the Collection, moved interfaces from the DurableCatalog to the Collection. Removed back pointer to Collection in IndexCatalogEntryImpl, interfaces now correctly take a const or non-const Collection. This should make its iterface const-correct to avoid making bugs where the copy-on-write system for Collections are bypassed. Multikey handle is special as it needs to happen without exclusive access to the Collection. Implemented isolation for the Collection metadata when multikey is changed. It handles multi-doc transactions and is only commited to the Collection instance after the write to the durable catalog successfully commits. listCollections and listIndexes can now safetly read the metadata cache without needing to read from the durable catalog making them safe to do without Collection level locks.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/SConscript2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp4
-rw-r--r--src/mongo/db/catalog/capped_utils_test.cpp3
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp9
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp31
-rw-r--r--src/mongo/db/catalog/collection.h148
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp24
-rw-r--r--src/mongo/db/catalog/collection_catalog.h15
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp11
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp536
-rw-r--r--src/mongo/db/catalog/collection_impl.h88
-rw-r--r--src/mongo/db/catalog/collection_mock.h97
-rw-r--r--src/mongo/db/catalog/collection_options.cpp8
-rw-r--r--src/mongo/db/catalog/collection_options.h8
-rw-r--r--src/mongo/db/catalog/collection_validation.cpp9
-rw-r--r--src/mongo/db/catalog/create_collection.cpp7
-rw-r--r--src/mongo/db/catalog/create_collection_test.cpp3
-rw-r--r--src/mongo/db/catalog/database_impl.cpp26
-rw-r--r--src/mongo/db/catalog/database_test.cpp4
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp16
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp19
-rw-r--r--src/mongo/db/catalog/index_catalog.h40
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h10
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp39
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.h37
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp305
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.h84
-rw-r--r--src/mongo/db/catalog/index_signature_test.cpp3
-rw-r--r--src/mongo/db/catalog/list_indexes.cpp21
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp3
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp9
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp8
-rw-r--r--src/mongo/db/catalog/uncommitted_collections.cpp1
-rw-r--r--src/mongo/db/catalog/uncommitted_multikey.cpp45
-rw-r--r--src/mongo/db/catalog/uncommitted_multikey.h64
-rw-r--r--src/mongo/db/catalog/validate_state.cpp1
-rw-r--r--src/mongo/db/catalog/validate_state_test.cpp5
-rw-r--r--src/mongo/db/cloner.cpp6
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp10
-rw-r--r--src/mongo/db/commands/list_collections.cpp15
-rw-r--r--src/mongo/db/commands/mr_test.cpp2
-rw-r--r--src/mongo/db/commands/resize_oplog.cpp6
-rw-r--r--src/mongo/db/commands/validate.cpp1
-rw-r--r--src/mongo/db/commands/write_commands.cpp1
-rw-r--r--src/mongo/db/index/index_access_method.cpp15
-rw-r--r--src/mongo/db/index/index_access_method.h8
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp38
-rw-r--r--src/mongo/db/index_builds_coordinator.h1
-rw-r--r--src/mongo/db/op_observer_impl.cpp2
-rw-r--r--src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp5
-rw-r--r--src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp2
-rw-r--r--src/mongo/db/rebuild_indexes.cpp9
-rw-r--r--src/mongo/db/rebuild_indexes.h3
-rw-r--r--src/mongo/db/repair.cpp17
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp4
-rw-r--r--src/mongo/db/repl/dbcheck.cpp10
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.cpp10
-rw-r--r--src/mongo/db/repl/oplog.cpp7
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp5
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp19
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp15
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp37
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp13
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp2
-rw-r--r--src/mongo/db/s/shard_local.cpp1
-rw-r--r--src/mongo/db/startup_recovery.cpp23
-rw-r--r--src/mongo/db/storage/SConscript1
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp24
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h37
-rw-r--r--src/mongo/db/storage/durable_catalog.h184
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.cpp380
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.h94
-rw-r--r--src/mongo/db/storage/kv/durable_catalog_test.cpp271
-rw-r--r--src/mongo/db/storage/kv/storage_engine_test.cpp1
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp43
-rw-r--r--src/mongo/db/storage/storage_engine_test_fixture.h29
-rw-r--r--src/mongo/db/storage/storage_util.cpp14
-rw-r--r--src/mongo/db/storage/storage_util.h5
-rw-r--r--src/mongo/db/transaction_participant.cpp7
-rw-r--r--src/mongo/db/transaction_participant.h2
-rw-r--r--src/mongo/db/ttl.cpp13
82 files changed, 1746 insertions, 1391 deletions
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index 0725840ba80..97d52070ac0 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -291,12 +291,14 @@ env.Library(
source=[
'collection_catalog.cpp',
'uncommitted_collections.cpp',
+ 'uncommitted_multikey.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/namespace_string',
'$BUILD_DIR/mongo/db/profile_filter',
'$BUILD_DIR/mongo/db/service_context',
+ '$BUILD_DIR/mongo/db/storage/bson_collection_catalog_entry',
'$BUILD_DIR/mongo/db/storage/snapshot_helper',
'collection',
],
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index ba86a56e539..a12247d6b69 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -51,7 +51,6 @@
#include "mongo/db/query/plan_yield_policy.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/views/view_catalog.h"
#include "mongo/util/scopeguard.h"
@@ -145,8 +144,7 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
// create new collection
{
- auto options =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, fromCollection->getCatalogId());
+ auto options = fromCollection->getCollectionOptions();
// The capped collection will get its own new unique id, as the conversion isn't reversible,
// so it can't be rolled back.
options.uuid.reset();
diff --git a/src/mongo/db/catalog/capped_utils_test.cpp b/src/mongo/db/catalog/capped_utils_test.cpp
index 50c8b634197..0ada277fc4f 100644
--- a/src/mongo/db/catalog/capped_utils_test.cpp
+++ b/src/mongo/db/catalog/capped_utils_test.cpp
@@ -38,7 +38,6 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/unittest/unittest.h"
namespace {
@@ -99,7 +98,7 @@ CollectionOptions getCollectionOptions(OperationContext* opCtx, const NamespaceS
AutoGetCollectionForRead collection(opCtx, nss);
ASSERT_TRUE(collection) << "Unable to get collections options for " << nss
<< " because collection does not exist.";
- return DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
+ return collection->getCollectionOptions();
}
// Size of capped collection to be passed to convertToCapped() which accepts a double.
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index 04200591d50..c5c1bb95f63 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -127,12 +127,12 @@ void openCatalog(OperationContext* opCtx,
// Determine which indexes need to be rebuilt. rebuildIndexesOnCollection() requires that all
// indexes on that collection are done at once, so we use a map to group them together.
StringMap<IndexNameObjs> nsToIndexNameObjMap;
+ auto catalog = CollectionCatalog::get(opCtx);
for (StorageEngine::IndexIdentifier indexIdentifier : reconcileResult.indexesToRebuild) {
auto indexName = indexIdentifier.indexName;
- auto indexSpecs =
- getIndexNameObjs(opCtx,
- indexIdentifier.catalogId,
- [&indexName](const std::string& name) { return name == indexName; });
+ auto coll = catalog->lookupCollectionByNamespace(opCtx, indexIdentifier.nss);
+ auto indexSpecs = getIndexNameObjs(
+ coll, [&indexName](const std::string& name) { return name == indexName; });
if (!indexSpecs.isOK() || indexSpecs.getValue().first.empty()) {
fassert(40689,
{ErrorCodes::InternalError,
@@ -154,7 +154,6 @@ void openCatalog(OperationContext* opCtx,
ino.second.emplace_back(std::move(indexesToRebuild.second.back()));
}
- auto catalog = CollectionCatalog::get(opCtx);
for (const auto& entry : nsToIndexNameObjMap) {
NamespaceString collNss(entry.first);
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index a50aaf22edb..f0504822d73 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -55,7 +55,6 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/recovery_unit.h"
#include "mongo/db/ttl_collection_cache.h"
#include "mongo/db/views/view_catalog.h"
@@ -385,7 +384,7 @@ private:
void _setClusteredExpireAfterSeconds(OperationContext* opCtx,
const CollectionOptions& oldCollOptions,
- const CollectionPtr& coll,
+ Collection* coll,
const BSONElement& clusteredIndexExpireAfterSeconds) {
invariant(oldCollOptions.clusteredIndex.has_value());
@@ -400,8 +399,7 @@ void _setClusteredExpireAfterSeconds(OperationContext* opCtx,
return;
}
- DurableCatalog::get(opCtx)->updateClusteredIndexTTLSetting(
- opCtx, coll->getCatalogId(), boost::none);
+ coll->updateClusteredIndexTTLSetting(opCtx, boost::none);
return;
}
@@ -421,8 +419,7 @@ void _setClusteredExpireAfterSeconds(OperationContext* opCtx,
}
invariant(newExpireAfterSeconds >= 0);
- DurableCatalog::get(opCtx)->updateClusteredIndexTTLSetting(
- opCtx, coll->getCatalogId(), newExpireAfterSeconds);
+ coll->updateClusteredIndexTTLSetting(opCtx, newExpireAfterSeconds);
}
Status _collModInternal(OperationContext* opCtx,
@@ -526,15 +523,16 @@ Status _collModInternal(OperationContext* opCtx,
// options to provide to the OpObserver. TTL index updates aren't a part of collection
// options so we save the relevant TTL index data in a separate object.
- CollectionOptions oldCollOptions =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, coll->getCatalogId());
+ const CollectionOptions& oldCollOptions = coll->getCollectionOptions();
boost::optional<IndexCollModInfo> indexCollModInfo;
// Handle collMod operation type appropriately.
if (clusteredIndexExpireAfterSeconds) {
- _setClusteredExpireAfterSeconds(
- opCtx, oldCollOptions, coll.getCollection(), clusteredIndexExpireAfterSeconds);
+ _setClusteredExpireAfterSeconds(opCtx,
+ oldCollOptions,
+ coll.getWritableCollection(),
+ clusteredIndexExpireAfterSeconds);
}
if (indexExpireAfterSeconds || indexHidden) {
@@ -550,10 +548,8 @@ Status _collModInternal(OperationContext* opCtx,
if (SimpleBSONElementComparator::kInstance.evaluate(oldExpireSecs !=
newExpireSecs)) {
// Change the value of "expireAfterSeconds" on disk.
- DurableCatalog::get(opCtx)->updateTTLSetting(opCtx,
- coll->getCatalogId(),
- idx->indexName(),
- newExpireSecs.safeNumberLong());
+ coll.getWritableCollection()->updateTTLSetting(
+ opCtx, idx->indexName(), newExpireSecs.safeNumberLong());
}
}
@@ -564,8 +560,8 @@ Status _collModInternal(OperationContext* opCtx,
// Make sure when we set 'hidden' to false, we can remove the hidden field from
// catalog.
if (SimpleBSONElementComparator::kInstance.evaluate(oldHidden != newHidden)) {
- DurableCatalog::get(opCtx)->updateHiddenSetting(
- opCtx, coll->getCatalogId(), idx->indexName(), newHidden.booleanSafe());
+ coll.getWritableCollection()->updateHiddenSetting(
+ opCtx, idx->indexName(), newHidden.booleanSafe());
}
}
@@ -581,7 +577,8 @@ Status _collModInternal(OperationContext* opCtx,
// Notify the index catalog that the definition of this index changed. This will
// invalidate the local idx pointer. On rollback of this WUOW, the idx pointer in
// cmrNew will be invalidated and the local var idx pointer will be valid again.
- cmrNew.idx = coll.getWritableCollection()->getIndexCatalog()->refreshEntry(opCtx, idx);
+ cmrNew.idx = coll.getWritableCollection()->getIndexCatalog()->refreshEntry(
+ opCtx, coll.getWritableCollection(), idx);
opCtx->recoveryUnit()->registerChange(std::make_unique<CollModResultChange>(
oldExpireSecs, newExpireSecs, oldHidden, newHidden, result));
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index e95215b3832..9fdf42db69d 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -49,6 +49,7 @@
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/record_id.h"
#include "mongo/db/repl/oplog.h"
+#include "mongo/db/storage/bson_collection_catalog_entry.h"
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/snapshot.h"
@@ -194,6 +195,17 @@ public:
RecordId catalogId,
const CollectionOptions& options,
std::unique_ptr<RecordStore> rs) const = 0;
+
+ /**
+ * Constructs a Collection object. This does not persist any state to the storage engine,
+ * only constructs an in-memory representation of what already exists on disk.
+ */
+ virtual std::shared_ptr<Collection> make(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ RecordId catalogId,
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> metadata,
+ std::unique_ptr<RecordStore> rs) const = 0;
};
/**
@@ -288,11 +300,11 @@ public:
/**
* Sets a new namespace on this Collection, in the case that the Collection is being renamed.
* In general, reads and writes to Collection objects are synchronized using locks from the lock
- * manager. However, there is special synchronization for ns() and setNs() so that the
+ * manager. However, there is special synchronization for ns() and rename() so that the
* CollectionCatalog can perform UUID to namespace lookup without holding a Collection lock. See
- * CollectionCatalog::setCollectionNamespace().
+ * CollectionCatalog::onCollectionRename().
*/
- virtual void setNs(NamespaceString nss) = 0;
+ virtual Status rename(OperationContext* opCtx, const NamespaceString& nss, bool stayTemp) = 0;
virtual RecordId getCatalogId() const = 0;
@@ -499,11 +511,8 @@ public:
/**
* Returns true if this is a temporary collection.
- *
- * Calling this function is somewhat costly because it requires accessing the storage engine's
- * cache of collection information.
*/
- virtual bool isTemporary(OperationContext* opCtx) const = 0;
+ virtual bool isTemporary() const = 0;
/**
* Returns true if this collection is clustered on _id values. That is, its RecordIds are _id
@@ -511,9 +520,128 @@ public:
*/
virtual bool isClustered() const = 0;
+ /**
+ * Updates the expireAfterSeconds setting for a clustered TTL index in this Collection and the
+ * durable catalog.
+ */
+ virtual void updateClusteredIndexTTLSetting(OperationContext* opCtx,
+ boost::optional<int64_t> expireAfterSeconds) = 0;
+
virtual Status updateCappedSize(OperationContext* opCtx, long long newCappedSize) = 0;
//
+ // Index
+ //
+
+ /**
+ * Checks that the metadata for the index exists and matches the given spec.
+ */
+ virtual Status checkMetaDataForIndex(const std::string& indexName,
+ const BSONObj& spec) const = 0;
+
+ /*
+ * Updates the expireAfterSeconds field of the given index to the value in newExpireSecs.
+ * The specified index must already contain an expireAfterSeconds field, and the value in
+ * that field and newExpireSecs must both be numeric.
+ */
+ virtual void updateTTLSetting(OperationContext* opCtx,
+ StringData idxName,
+ long long newExpireSeconds) = 0;
+
+ /*
+ * Hide or unhide the given index. A hidden index will not be considered for use by the
+ * query planner.
+ */
+ virtual void updateHiddenSetting(OperationContext* opCtx, StringData idxName, bool hidden) = 0;
+
+ /**
+ * Updates the 'temp' setting for this collection.
+ */
+ virtual void setIsTemp(OperationContext* opCtx, bool isTemp) = 0;
+
+ /**
+ * Removes the index 'indexName' from the persisted collection catalog entry identified by
+ * 'catalogId'.
+ */
+ virtual void removeIndex(OperationContext* opCtx, StringData indexName) = 0;
+
+ /**
+ * Updates the persisted catalog entry for 'ns' with the new index and creates the index on
+ * disk.
+ *
+ * A passed 'buildUUID' implies that the index is part of a two-phase index build.
+ */
+ virtual Status prepareForIndexBuild(OperationContext* opCtx,
+ const IndexDescriptor* spec,
+ boost::optional<UUID> buildUUID,
+ bool isBackgroundSecondaryBuild) = 0;
+
+ /**
+ * Returns a UUID if the index is being built with the two-phase index build procedure.
+ */
+ virtual boost::optional<UUID> getIndexBuildUUID(StringData indexName) const = 0;
+
+ /**
+ * Returns true if the index identified by 'indexName' is multikey, and returns false otherwise.
+ *
+ * If the 'multikeyPaths' pointer is non-null, then it must point to an empty vector. If this
+ * index type supports tracking path-level multikey information in the catalog, then this
+ * function sets 'multikeyPaths' as the path components that cause this index to be multikey.
+ *
+ * In particular, if this function returns false and the index supports tracking path-level
+ * multikey information, then 'multikeyPaths' is initialized as a vector with size equal to the
+ * number of elements in the index key pattern of empty sets.
+ */
+ virtual bool isIndexMultikey(OperationContext* opCtx,
+ StringData indexName,
+ MultikeyPaths* multikeyPaths) const = 0;
+
+ /**
+ * Sets the index identified by 'indexName' to be multikey.
+ *
+ * If 'multikeyPaths' is non-empty, then it must be a vector with size equal to the number of
+ * elements in the index key pattern. Additionally, at least one path component of the indexed
+ * fields must cause this index to be multikey.
+ *
+ * This function returns true if the index metadata has changed, and returns false otherwise.
+ */
+ virtual bool setIndexIsMultikey(OperationContext* opCtx,
+ StringData indexName,
+ const MultikeyPaths& multikeyPaths) const = 0;
+
+ /**
+ * Sets the index to be multikey with the provided paths. This performs minimal validation of
+ * the inputs and is intended to be used internally to "correct" multikey metadata that drifts
+ * from the underlying collection data.
+ *
+ * When isMultikey is false, ignores multikeyPaths and resets the metadata appropriately based
+ * on the index descriptor. Otherwise, overwrites the existing multikeyPaths with the ones
+ * provided. This only writes multikey paths if the index type supports path-level tracking, and
+ * only sets the multikey boolean flag otherwise.
+ */
+ virtual void forceSetIndexIsMultikey(OperationContext* opCtx,
+ const IndexDescriptor* desc,
+ bool isMultikey,
+ const MultikeyPaths& multikeyPaths) const = 0;
+
+ virtual int getTotalIndexCount() const = 0;
+
+ virtual int getCompletedIndexCount() const = 0;
+
+ virtual BSONObj getIndexSpec(StringData indexName) const = 0;
+
+ virtual void getAllIndexes(std::vector<std::string>* names) const = 0;
+
+ virtual void getReadyIndexes(std::vector<std::string>* names) const = 0;
+
+ virtual bool isIndexPresent(StringData indexName) const = 0;
+
+ virtual bool isIndexReady(StringData indexName) const = 0;
+
+ virtual void replaceMetadata(OperationContext* opCtx,
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> md) = 0;
+
+ //
// Stats
//
@@ -578,6 +706,12 @@ public:
virtual const CollatorInterface* getDefaultCollator() const = 0;
/**
+ * Returns a cached version of the Collection MetaData that matches the version of this
+ * Collection instance.
+ */
+ virtual const CollectionOptions& getCollectionOptions() const = 0;
+
+ /**
* Fills in each index specification with collation information from this collection and returns
* the new index specifications.
*
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index 397bef7086f..6b99d3d9afb 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -536,16 +536,10 @@ void CollectionCatalog::write(OperationContext* opCtx,
}
-void CollectionCatalog::setCollectionNamespace(OperationContext* opCtx,
- Collection* coll,
- const NamespaceString& fromCollection,
- const NamespaceString& toCollection) const {
- // Rather than maintain, in addition to the UUID -> Collection* mapping, an auxiliary
- // data structure with the UUID -> namespace mapping, the CollectionCatalog relies on
- // Collection::ns() to provide UUID to namespace lookup. In addition, the CollectionCatalog
- // does not require callers to hold locks.
+void CollectionCatalog::onCollectionRename(OperationContext* opCtx,
+ Collection* coll,
+ const NamespaceString& fromCollection) const {
invariant(coll);
- coll->setNs(toCollection);
auto& uncommittedCatalogUpdates = getUncommittedCatalogUpdates(opCtx);
uncommittedCatalogUpdates.rename(coll, fromCollection);
@@ -563,18 +557,6 @@ void CollectionCatalog::dropCollection(OperationContext* opCtx, Collection* coll
PublishCatalogUpdates::ensureRegisteredWithRecoveryUnit(opCtx, uncommittedCatalogUpdates);
}
-void CollectionCatalog::dropCollection(OperationContext* opCtx, const CollectionPtr& coll) const {
- invariant(coll);
- invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_X));
-
- auto& uncommittedCatalogUpdates = getUncommittedCatalogUpdates(opCtx);
- uncommittedCatalogUpdates.drop(coll.get());
-
- // Ensure we have registered publish change if this collection haven't been made writable
- // previously
- PublishCatalogUpdates::ensureRegisteredWithRecoveryUnit(opCtx, uncommittedCatalogUpdates);
-}
-
void CollectionCatalog::onCloseDatabase(OperationContext* opCtx, std::string dbName) {
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
auto rid = ResourceId(RESOURCE_DATABASE, dbName);
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index 00a80372821..ff5fda03fdf 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -150,16 +150,14 @@ public:
static void write(OperationContext* opCtx, CatalogWriteFn job);
/**
- * This function is responsible for safely setting the namespace string inside 'coll' to the
- * value of 'toCollection'. The caller need not hold locks on the collection.
+ * This function is responsible for safely tracking a Collection rename within a
+ * WriteUnitOfWork.
*
- * Must be called within a WriteUnitOfWork. The Collection namespace will be set back to
- * 'fromCollection' if the WriteUnitOfWork aborts.
+ * Must be called within a WriteUnitOfWork.
*/
- void setCollectionNamespace(OperationContext* opCtx,
- Collection* coll,
- const NamespaceString& fromCollection,
- const NamespaceString& toCollection) const;
+ void onCollectionRename(OperationContext* opCtx,
+ Collection* coll,
+ const NamespaceString& fromCollection) const;
/**
* Marks a collection as dropped for this OperationContext. Will cause the collection
@@ -169,7 +167,6 @@ public:
* Must be called within a WriteUnitOfWork.
*/
void dropCollection(OperationContext* opCtx, Collection* coll) const;
- void dropCollection(OperationContext* opCtx, const CollectionPtr& coll) const;
void onCloseDatabase(OperationContext* opCtx, std::string dbName);
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 07a7a0e4602..9bee62814aa 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/concurrency/lock_manager_defs.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
@@ -444,7 +443,7 @@ TEST_F(CollectionCatalogTest, RenameCollection) {
ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, uuid), collection);
NamespaceString newNss(nss.db(), "newcol");
- collection->setNs(newNss);
+ ASSERT_OK(collection->rename(&opCtx, newNss, false));
ASSERT_EQ(collection->ns(), newNss);
ASSERT_EQUALS(catalog.lookupCollectionByUUID(&opCtx, uuid), collection);
}
@@ -704,9 +703,7 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDbWithPredicate) {
[&](const CollectionPtr& collection) {
ASSERT_TRUE(
opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_NONE));
- return DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, collection->getCatalogId())
- .temp;
+ return collection->getCollectionOptions().temp;
});
ASSERT_EQUALS(numCollectionsTraversed, 2);
@@ -728,9 +725,7 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDbWithPredicate) {
[&](const CollectionPtr& collection) {
ASSERT_TRUE(
opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_NONE));
- return !DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, collection->getCatalogId())
- .temp;
+ return !collection->getCollectionOptions().temp;
});
ASSERT_EQUALS(numCollectionsTraversed, 1);
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 7232e02c390..a7f84f1007c 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/catalog/index_catalog_impl.h"
#include "mongo/db/catalog/index_consistency.h"
#include "mongo/db/catalog/index_key_validate.h"
+#include "mongo/db/catalog/uncommitted_multikey.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/concurrency/d_concurrency.h"
@@ -237,6 +238,10 @@ private:
WriteUnitOfWork::RecoveryUnitState _originalRecoveryUnitState;
};
+bool indexTypeSupportsPathLevelMultikeyTracking(StringData accessMethod) {
+ return accessMethod == IndexNames::BTREE || accessMethod == IndexNames::GEO_2DSPHERE;
+}
+
} // namespace
CollectionImpl::SharedState::SharedState(CollectionImpl* collection,
@@ -248,8 +253,7 @@ CollectionImpl::SharedState::SharedState(CollectionImpl* collection,
: nullptr),
_needCappedLock(options.capped && collection->ns().db() != "local"),
_isCapped(options.capped),
- _cappedMaxDocs(options.cappedMaxDocs),
- _cappedMaxSize(options.cappedSize) {
+ _cappedMaxDocs(options.cappedMaxDocs) {
if (_cappedNotifier) {
_recordStore->setCappedCallback(this);
}
@@ -292,7 +296,16 @@ CollectionImpl::CollectionImpl(OperationContext* opCtx,
_catalogId(catalogId),
_uuid(options.uuid.get()),
_shared(std::make_shared<SharedState>(this, std::move(recordStore), options)),
- _indexCatalog(std::make_unique<IndexCatalogImpl>(this)) {}
+ _indexCatalog(std::make_unique<IndexCatalogImpl>()) {}
+
+CollectionImpl::CollectionImpl(OperationContext* opCtx,
+ const NamespaceString& nss,
+ RecordId catalogId,
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> metadata,
+ std::unique_ptr<RecordStore> recordStore)
+ : CollectionImpl(opCtx, nss, catalogId, metadata->options, std::move(recordStore)) {
+ _metadata = std::move(metadata);
+}
CollectionImpl::~CollectionImpl() {
_shared->instanceDeleted(this);
@@ -313,9 +326,18 @@ std::shared_ptr<Collection> CollectionImpl::FactoryImpl::make(
return std::make_shared<CollectionImpl>(opCtx, nss, catalogId, options, std::move(rs));
}
+std::shared_ptr<Collection> CollectionImpl::FactoryImpl::make(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ RecordId catalogId,
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> metadata,
+ std::unique_ptr<RecordStore> rs) const {
+ return std::make_shared<CollectionImpl>(
+ opCtx, nss, catalogId, std::move(metadata), std::move(rs));
+}
+
std::shared_ptr<Collection> CollectionImpl::clone() const {
auto cloned = std::make_shared<CollectionImpl>(*this);
- checked_cast<IndexCatalogImpl*>(cloned->_indexCatalog.get())->setCollection(cloned.get());
cloned->_shared->instanceCreated(cloned.get());
// We are per definition committed if we get cloned
cloned->_cachedCommitted = true;
@@ -327,8 +349,9 @@ SharedCollectionDecorations* CollectionImpl::getSharedDecorations() const {
}
void CollectionImpl::init(OperationContext* opCtx) {
- auto collectionOptions =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, getCatalogId());
+ _metadata = DurableCatalog::get(opCtx)->getMetaData(opCtx, getCatalogId());
+ const auto& collectionOptions = _metadata->options;
+
_shared->_collator = parseCollation(opCtx, _ns, collectionOptions.collation);
auto validatorDoc = collectionOptions.validator.getOwned();
@@ -337,11 +360,8 @@ void CollectionImpl::init(OperationContext* opCtx) {
// Make sure to copy the action and level before parsing MatchExpression, since certain features
// are not supported with certain combinations of action and level.
- _validationAction = collectionOptions.validationAction;
- _validationLevel = collectionOptions.validationLevel;
if (collectionOptions.recordPreImages) {
uassertStatusOK(validatePreImageRecording(opCtx, _ns));
- _recordPreImages = true;
}
// Store the result (OK / error) of parsing the validator, but do not enforce that the result is
@@ -383,7 +403,7 @@ void CollectionImpl::init(OperationContext* opCtx) {
}
}
- getIndexCatalog()->init(opCtx).transitional_ignore();
+ getIndexCatalog()->init(opCtx, this).transitional_ignore();
_initialized = true;
}
@@ -472,7 +492,7 @@ Status CollectionImpl::checkValidation(OperationContext* opCtx, const BSONObj& d
if (!validatorMatchExpr)
return Status::OK();
- if (validationLevelOrDefault(_validationLevel) == ValidationLevelEnum::off)
+ if (validationLevelOrDefault(_metadata->options.validationLevel) == ValidationLevelEnum::off)
return Status::OK();
if (DocumentValidationSettings::get(opCtx).isSchemaValidationDisabled())
@@ -505,7 +525,8 @@ Status CollectionImpl::checkValidation(OperationContext* opCtx, const BSONObj& d
// writes which result in the validator throwing an exception are accepted when we're in
// warn mode.
if (!isFCVAtLeast47 &&
- validationActionOrDefault(_validationAction) == ValidationActionEnum::error) {
+ validationActionOrDefault(_metadata->options.validationAction) ==
+ ValidationActionEnum::error) {
e.addContext("Document validation failed");
throw;
}
@@ -516,7 +537,8 @@ Status CollectionImpl::checkValidation(OperationContext* opCtx, const BSONObj& d
generatedError = doc_validation_error::generateError(*validatorMatchExpr, document);
}
- if (validationActionOrDefault(_validationAction) == ValidationActionEnum::warn) {
+ if (validationActionOrDefault(_metadata->options.validationAction) ==
+ ValidationActionEnum::warn) {
LOGV2_WARNING(20294,
"Document would fail validation",
"namespace"_attr = ns(),
@@ -568,8 +590,10 @@ Collection::Validator CollectionImpl::parseValidator(
// If the validation action is "warn" or the level is "moderate", then disallow any encryption
// keywords. This is to prevent any plaintext data from showing up in the logs.
- if (validationActionOrDefault(_validationAction) == ValidationActionEnum::warn ||
- validationLevelOrDefault(_validationLevel) == ValidationLevelEnum::moderate)
+ if (validationActionOrDefault(_metadata->options.validationAction) ==
+ ValidationActionEnum::warn ||
+ validationLevelOrDefault(_metadata->options.validationLevel) ==
+ ValidationLevelEnum::moderate)
allowedFeatures &= ~MatchExpressionParser::AllowedFeatures::kEncryptKeywords;
auto statusWithMatcher =
@@ -846,7 +870,7 @@ bool CollectionImpl::_cappedAndNeedDelete(OperationContext* opCtx) const {
return false;
}
- if (dataSize(opCtx) > _shared->_cappedMaxSize) {
+ if (dataSize(opCtx) > _shared->_collectionLatest->getCollectionOptions().cappedSize) {
return true;
}
@@ -921,8 +945,9 @@ void CollectionImpl::_cappedDeleteAsNeeded(OperationContext* opCtx,
const long long currentDataSize = dataSize(opCtx);
const long long currentNumRecords = numRecords(opCtx);
+ const auto cappedMaxSize = _shared->_collectionLatest->getCollectionOptions().cappedSize;
const long long sizeOverCap =
- (currentDataSize > _shared->_cappedMaxSize) ? currentDataSize - _shared->_cappedMaxSize : 0;
+ (currentDataSize > cappedMaxSize) ? currentDataSize - cappedMaxSize : 0;
const long long docsOverCap =
(_shared->_cappedMaxDocs != 0 && currentNumRecords > _shared->_cappedMaxDocs)
? currentNumRecords - _shared->_cappedMaxDocs
@@ -978,8 +1003,12 @@ void CollectionImpl::_cappedDeleteAsNeeded(OperationContext* opCtx,
}
int64_t unusedKeysDeleted = 0;
- _indexCatalog->unindexRecord(
- opCtx, doc, record->id, /*logIfError=*/false, &unusedKeysDeleted);
+ _indexCatalog->unindexRecord(opCtx,
+ CollectionPtr(this, CollectionPtr::NoYieldTag{}),
+ doc,
+ record->id,
+ /*logIfError=*/false,
+ &unusedKeysDeleted);
// We're about to delete the record our cursor is positioned on, so advance the cursor.
RecordId toDelete = record->id;
@@ -1040,7 +1069,8 @@ Status CollectionImpl::SharedState::aboutToDeleteCapped(OperationContext* opCtx,
RecordData data) {
BSONObj doc = data.releaseToBson();
int64_t* const nullKeysDeleted = nullptr;
- _collectionLatest->getIndexCatalog()->unindexRecord(opCtx, doc, loc, false, nullKeysDeleted);
+ _collectionLatest->getIndexCatalog()->unindexRecord(
+ opCtx, _collectionLatest, doc, loc, false, nullKeysDeleted);
// We are not capturing and reporting to OpDebug the 'keysDeleted' by unindexRecord(). It is
// questionable whether reporting will add diagnostic value to users and may instead be
@@ -1086,7 +1116,12 @@ void CollectionImpl::deleteDocument(OperationContext* opCtx,
}
int64_t keysDeleted;
- _indexCatalog->unindexRecord(opCtx, doc.value(), loc, noWarn, &keysDeleted);
+ _indexCatalog->unindexRecord(opCtx,
+ CollectionPtr(this, CollectionPtr::NoYieldTag{}),
+ doc.value(),
+ loc,
+ noWarn,
+ &keysDeleted);
_shared->_recordStore->deleteRecord(opCtx, loc);
OpObserver::OplogDeleteEntryArgs deleteArgs{nullptr, fromMigrate, getRecordPreImages()};
@@ -1113,7 +1148,8 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
{
auto status = checkValidation(opCtx, newDoc);
if (!status.isOK()) {
- if (validationLevelOrDefault(_validationLevel) == ValidationLevelEnum::strict) {
+ if (validationLevelOrDefault(_metadata->options.validationLevel) ==
+ ValidationLevelEnum::strict) {
uassertStatusOK(status);
}
// moderate means we have to check the old doc
@@ -1232,14 +1268,25 @@ StatusWith<RecordData> CollectionImpl::updateDocumentWithDamages(
return newRecStatus;
}
-bool CollectionImpl::isTemporary(OperationContext* opCtx) const {
- return DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, getCatalogId()).temp;
+bool CollectionImpl::isTemporary() const {
+ return _metadata->options.temp;
}
bool CollectionImpl::isClustered() const {
return _clustered;
}
+void CollectionImpl::updateClusteredIndexTTLSetting(OperationContext* opCtx,
+ boost::optional<int64_t> expireAfterSeconds) {
+ uassert(5401000,
+ "The collection doesn't have a clustered index",
+ _metadata->options.clusteredIndex);
+
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.options.clusteredIndex->setExpireAfterSeconds(expireAfterSeconds);
+ });
+}
+
Status CollectionImpl::updateCappedSize(OperationContext* opCtx, long long newCappedSize) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
@@ -1255,20 +1302,23 @@ Status CollectionImpl::updateCappedSize(OperationContext* opCtx, long long newCa
}
}
- _shared->_cappedMaxSize = newCappedSize;
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.options.cappedSize = newCappedSize;
+ });
return Status::OK();
}
bool CollectionImpl::getRecordPreImages() const {
- return _recordPreImages;
+ return _metadata->options.recordPreImages;
}
void CollectionImpl::setRecordPreImages(OperationContext* opCtx, bool val) {
if (val) {
uassertStatusOK(validatePreImageRecording(opCtx, _ns));
}
- DurableCatalog::get(opCtx)->setRecordPreImages(opCtx, getCatalogId(), val);
- _recordPreImages = val;
+
+ _writeMetadata(
+ opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) { md.options.recordPreImages = val; });
}
bool CollectionImpl::isCapped() const {
@@ -1280,7 +1330,7 @@ long long CollectionImpl::getCappedMaxDocs() const {
}
long long CollectionImpl::getCappedMaxSize() const {
- return _shared->_cappedMaxSize;
+ return _metadata->options.cappedSize;
}
CappedCallback* CollectionImpl::getCappedCallback() {
@@ -1390,7 +1440,7 @@ Status CollectionImpl::truncate(OperationContext* opCtx) {
}
// 2) drop indexes
- _indexCatalog->dropAllIndexes(opCtx, true);
+ _indexCatalog->dropAllIndexes(opCtx, this, true);
// 3) truncate record store
auto status = _shared->_recordStore->truncate(opCtx);
@@ -1399,7 +1449,8 @@ Status CollectionImpl::truncate(OperationContext* opCtx) {
// 4) re-create indexes
for (size_t i = 0; i < indexSpecs.size(); i++) {
- status = _indexCatalog->createIndexOnEmptyCollection(opCtx, indexSpecs[i]).getStatus();
+ status =
+ _indexCatalog->createIndexOnEmptyCollection(opCtx, this, indexSpecs[i]).getStatus();
if (!status.isOK())
return status;
}
@@ -1420,32 +1471,36 @@ void CollectionImpl::cappedTruncateAfter(OperationContext* opCtx,
void CollectionImpl::setValidator(OperationContext* opCtx, Validator validator) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
- DurableCatalog::get(opCtx)->updateValidator(opCtx,
- getCatalogId(),
- validator.validatorDoc.getOwned(),
- validationLevelOrDefault(_validationLevel),
- validationActionOrDefault(_validationAction));
+ auto validatorDoc = validator.validatorDoc.getOwned();
+ auto validationLevel = validationLevelOrDefault(_metadata->options.validationLevel);
+ auto validationAction = validationActionOrDefault(_metadata->options.validationAction);
+
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.options.validator = validatorDoc;
+ md.options.validationLevel = validationLevel;
+ md.options.validationAction = validationAction;
+ });
_validator = std::move(validator);
}
boost::optional<ValidationLevelEnum> CollectionImpl::getValidationLevel() const {
- return _validationLevel;
+ return _metadata->options.validationLevel;
}
boost::optional<ValidationActionEnum> CollectionImpl::getValidationAction() const {
- return _validationAction;
+ return _metadata->options.validationAction;
}
Status CollectionImpl::setValidationLevel(OperationContext* opCtx, ValidationLevelEnum newLevel) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
- _validationLevel = newLevel;
+ auto storedValidationLevel = validationLevelOrDefault(newLevel);
// Reparse the validator as there are some features which are only supported with certain
// validation levels.
auto allowedFeatures = MatchExpressionParser::kAllowAllSpecialFeatures;
- if (validationLevelOrDefault(_validationLevel) == ValidationLevelEnum::moderate)
+ if (storedValidationLevel == ValidationLevelEnum::moderate)
allowedFeatures &= ~MatchExpressionParser::AllowedFeatures::kEncryptKeywords;
_validator = parseValidator(opCtx, _validator.validatorDoc, allowedFeatures);
@@ -1453,11 +1508,9 @@ Status CollectionImpl::setValidationLevel(OperationContext* opCtx, ValidationLev
return _validator.getStatus();
}
- DurableCatalog::get(opCtx)->updateValidator(opCtx,
- getCatalogId(),
- _validator.validatorDoc,
- validationLevelOrDefault(_validationLevel),
- validationActionOrDefault(_validationAction));
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.options.validationLevel = storedValidationLevel;
+ });
return Status::OK();
}
@@ -1466,12 +1519,12 @@ Status CollectionImpl::setValidationAction(OperationContext* opCtx,
ValidationActionEnum newAction) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
- _validationAction = newAction;
+ auto storedValidationAction = validationActionOrDefault(newAction);
// Reparse the validator as there are some features which are only supported with certain
// validation actions.
auto allowedFeatures = MatchExpressionParser::kAllowAllSpecialFeatures;
- if (validationActionOrDefault(_validationAction) == ValidationActionEnum::warn)
+ if (storedValidationAction == ValidationActionEnum::warn)
allowedFeatures &= ~MatchExpressionParser::AllowedFeatures::kEncryptKeywords;
_validator = parseValidator(opCtx, _validator.validatorDoc, allowedFeatures);
@@ -1479,11 +1532,9 @@ Status CollectionImpl::setValidationAction(OperationContext* opCtx,
return _validator.getStatus();
}
- DurableCatalog::get(opCtx)->updateValidator(opCtx,
- getCatalogId(),
- _validator.validatorDoc,
- validationLevelOrDefault(_validationLevel),
- validationActionOrDefault(_validationAction));
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.options.validationAction = storedValidationAction;
+ });
return Status::OK();
}
@@ -1494,17 +1545,19 @@ Status CollectionImpl::updateValidator(OperationContext* opCtx,
boost::optional<ValidationActionEnum> newAction) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
- DurableCatalog::get(opCtx)->updateValidator(
- opCtx, getCatalogId(), newValidator, newLevel, newAction);
-
auto validator =
parseValidator(opCtx, newValidator, MatchExpressionParser::kAllowAllSpecialFeatures);
if (!validator.isOK()) {
return validator.getStatus();
}
+
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.options.validator = newValidator;
+ md.options.validationLevel = newLevel;
+ md.options.validationAction = newAction;
+ });
+
_validator = std::move(validator);
- _validationLevel = newLevel;
- _validationAction = newAction;
return Status::OK();
}
@@ -1516,6 +1569,10 @@ const CollatorInterface* CollectionImpl::getDefaultCollator() const {
return _shared->_collator.get();
}
+const CollectionOptions& CollectionImpl::getCollectionOptions() const {
+ return _metadata->options;
+}
+
StatusWith<std::vector<BSONObj>> CollectionImpl::addCollationDefaultsToIndexSpecsForCreate(
OperationContext* opCtx, const std::vector<BSONObj>& originalIndexSpecs) const {
std::vector<BSONObj> newIndexSpecs;
@@ -1575,14 +1632,35 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> CollectionImpl::makePlanExe
opCtx, &yieldableCollection, yieldPolicy, direction, resumeAfterRecordId);
}
-void CollectionImpl::setNs(NamespaceString nss) {
+Status CollectionImpl::rename(OperationContext* opCtx, const NamespaceString& nss, bool stayTemp) {
+ auto metadata = std::make_shared<BSONCollectionCatalogEntry::MetaData>(*_metadata);
+ metadata->ns = nss.ns();
+ if (!stayTemp)
+ metadata->options.temp = false;
+ Status status =
+ DurableCatalog::get(opCtx)->renameCollection(opCtx, getCatalogId(), nss, *metadata);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ _metadata = std::move(metadata);
_ns = std::move(nss);
_shared->_recordStore.get()->setNs(_ns);
+ return status;
}
void CollectionImpl::indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntry* index) {
- DurableCatalog::get(opCtx)->indexBuildSuccess(
- opCtx, getCatalogId(), index->descriptor()->indexName());
+ const auto& indexName = index->descriptor()->indexName();
+ int offset = _metadata->findIndexOffset(indexName);
+ invariant(offset >= 0,
+ str::stream() << "cannot mark index " << indexName << " as ready @ " << getCatalogId()
+ << " : " << _metadata->toBSON());
+
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.indexes[offset].ready = true;
+ md.indexes[offset].buildUUID = boost::none;
+ });
+
_indexCatalog->indexBuildSuccess(opCtx, this, index);
}
@@ -1590,4 +1668,342 @@ void CollectionImpl::establishOplogCollectionForLogging(OperationContext* opCtx)
repl::establishOplogCollectionForLogging(opCtx, this);
}
+Status CollectionImpl::checkMetaDataForIndex(const std::string& indexName,
+ const BSONObj& spec) const {
+ int offset = _metadata->findIndexOffset(indexName);
+ if (offset < 0) {
+ return {ErrorCodes::IndexNotFound,
+ str::stream() << "Index [" << indexName
+ << "] not found in metadata for recordId: " << getCatalogId()};
+ }
+
+ if (spec.woCompare(_metadata->indexes[offset].spec)) {
+ return {ErrorCodes::BadValue,
+ str::stream() << "Spec for index [" << indexName
+ << "] does not match spec in the metadata for recordId: "
+ << getCatalogId() << ". Spec: " << spec
+ << " metadata's spec: " << _metadata->indexes[offset].spec};
+ }
+
+ return Status::OK();
+}
+
+void CollectionImpl::updateTTLSetting(OperationContext* opCtx,
+ StringData idxName,
+ long long newExpireSeconds) {
+ int offset = _metadata->findIndexOffset(idxName);
+ invariant(offset >= 0,
+ str::stream() << "cannot update TTL setting for index " << idxName << " @ "
+ << getCatalogId() << " : " << _metadata->toBSON());
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.indexes[offset].updateTTLSetting(newExpireSeconds);
+ });
+}
+
+void CollectionImpl::updateHiddenSetting(OperationContext* opCtx, StringData idxName, bool hidden) {
+ int offset = _metadata->findIndexOffset(idxName);
+ invariant(offset >= 0);
+
+ _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
+ md.indexes[offset].updateHiddenSetting(hidden);
+ });
+}
+
+void CollectionImpl::setIsTemp(OperationContext* opCtx, bool isTemp) {
+ _writeMetadata(opCtx,
+ [&](BSONCollectionCatalogEntry::MetaData& md) { md.options.temp = isTemp; });
+}
+
+void CollectionImpl::removeIndex(OperationContext* opCtx, StringData indexName) {
+ if (_metadata->findIndexOffset(indexName) < 0)
+ return; // never had the index so nothing to do.
+
+ _writeMetadata(opCtx,
+ [&](BSONCollectionCatalogEntry::MetaData& md) { md.eraseIndex(indexName); });
+}
+
+Status CollectionImpl::prepareForIndexBuild(OperationContext* opCtx,
+ const IndexDescriptor* spec,
+ boost::optional<UUID> buildUUID,
+ bool isBackgroundSecondaryBuild) {
+
+ auto durableCatalog = DurableCatalog::get(opCtx);
+ auto imd = durableCatalog->prepareIndexMetaDataForIndexBuild(
+ opCtx, spec, buildUUID, isBackgroundSecondaryBuild);
+
+ // Confirm that our index is not already in the current metadata.
+ invariant(-1 == _metadata->findIndexOffset(imd.name()));
+
+ _writeMetadata(opCtx,
+ [&](BSONCollectionCatalogEntry::MetaData& md) { md.indexes.push_back(imd); });
+
+ return durableCatalog->createIndex(opCtx, getCatalogId(), getCollectionOptions(), spec);
+}
+
+boost::optional<UUID> CollectionImpl::getIndexBuildUUID(StringData indexName) const {
+ int offset = _metadata->findIndexOffset(indexName);
+ invariant(offset >= 0,
+ str::stream() << "cannot get build UUID for index " << indexName << " @ "
+ << getCatalogId() << " : " << _metadata->toBSON());
+ return _metadata->indexes[offset].buildUUID;
+}
+
+bool CollectionImpl::isIndexMultikey(OperationContext* opCtx,
+ StringData indexName,
+ MultikeyPaths* multikeyPaths) const {
+ auto isMultikey =
+ [this, multikeyPaths, indexName](const BSONCollectionCatalogEntry::MetaData& metadata) {
+ int offset = metadata.findIndexOffset(indexName);
+ invariant(offset >= 0,
+ str::stream() << "cannot get multikey for index " << indexName << " @ "
+ << getCatalogId() << " : " << metadata.toBSON());
+
+ const auto& index = metadata.indexes[offset];
+ stdx::lock_guard lock(index.multikeyMutex);
+ if (multikeyPaths && !index.multikeyPaths.empty()) {
+ *multikeyPaths = index.multikeyPaths;
+ }
+
+ return index.multikey;
+ };
+
+ const auto& uncommittedMultikeys = UncommittedMultikey::get(opCtx).resources();
+ if (uncommittedMultikeys) {
+ if (auto it = uncommittedMultikeys->find(this); it != uncommittedMultikeys->end()) {
+ return isMultikey(it->second);
+ }
+ }
+
+ return isMultikey(*_metadata);
+}
+
+bool CollectionImpl::setIndexIsMultikey(OperationContext* opCtx,
+ StringData indexName,
+ const MultikeyPaths& multikeyPaths) const {
+
+ auto setMultikey = [this, name = indexName.toString(), multikeyPaths](
+ const BSONCollectionCatalogEntry::MetaData& metadata) {
+ int offset = metadata.findIndexOffset(name);
+ invariant(offset >= 0,
+ str::stream() << "cannot set index " << name << " as multikey @ "
+ << getCatalogId() << " : " << metadata.toBSON());
+
+ {
+ const auto& index = metadata.indexes[offset];
+ stdx::lock_guard lock(index.multikeyMutex);
+
+ const bool tracksPathLevelMultikeyInfo =
+ !metadata.indexes[offset].multikeyPaths.empty();
+ if (tracksPathLevelMultikeyInfo) {
+ invariant(!multikeyPaths.empty());
+ invariant(multikeyPaths.size() == metadata.indexes[offset].multikeyPaths.size());
+ } else {
+ invariant(multikeyPaths.empty());
+
+ if (metadata.indexes[offset].multikey) {
+ // The index is already set as multikey and we aren't tracking path-level
+ // multikey information for it. We return false to indicate that the index
+ // metadata is unchanged.
+ return false;
+ }
+ }
+
+ index.multikey = true;
+
+ if (tracksPathLevelMultikeyInfo) {
+ bool newPathIsMultikey = false;
+ bool somePathIsMultikey = false;
+
+ // Store new path components that cause this index to be multikey in catalog's
+ // index metadata.
+ for (size_t i = 0; i < multikeyPaths.size(); ++i) {
+ MultikeyComponents& indexMultikeyComponents = index.multikeyPaths[i];
+ for (const auto multikeyComponent : multikeyPaths[i]) {
+ auto result = indexMultikeyComponents.insert(multikeyComponent);
+ newPathIsMultikey = newPathIsMultikey || result.second;
+ somePathIsMultikey = true;
+ }
+ }
+
+ // If all of the sets in the multikey paths vector were empty, then no component
+ // of any indexed field caused the index to be multikey. setIndexIsMultikey()
+ // therefore shouldn't have been called.
+ invariant(somePathIsMultikey);
+
+ if (!newPathIsMultikey) {
+ // We return false to indicate that the index metadata is unchanged.
+ return false;
+ }
+ }
+ }
+ return true;
+ };
+
+ // Make a copy that is safe to read without locks that we insert in the durable catalog, we only
+ // update the stored metadata on successful commit. The pending update is stored as a decoration
+ // on the OperationContext to allow us to read our own writes.
+ auto& uncommittedMultikeys = UncommittedMultikey::get(opCtx).resources();
+ if (!uncommittedMultikeys) {
+ uncommittedMultikeys = std::make_shared<UncommittedMultikey::MultikeyMap>();
+ }
+ BSONCollectionCatalogEntry::MetaData* metadata = nullptr;
+ if (auto it = uncommittedMultikeys->find(this); it != uncommittedMultikeys->end()) {
+ metadata = &it->second;
+ } else {
+ metadata = &uncommittedMultikeys->emplace(this, *_metadata).first->second;
+ }
+
+ if (!setMultikey(*metadata))
+ return false;
+ DurableCatalog::get(opCtx)->putMetaData(opCtx, getCatalogId(), *metadata);
+
+ opCtx->recoveryUnit()->onCommit(
+ [this, uncommittedMultikeys, setMultikey = std::move(setMultikey)](auto ts) {
+ // Merge in changes to this index, other indexes may have been updated since we made our
+ // copy. Don't check for result as another thread could be setting multikey at the same
+ // time
+ setMultikey(*_metadata);
+ uncommittedMultikeys->erase(this);
+ });
+ opCtx->recoveryUnit()->onRollback(
+ [this, uncommittedMultikeys]() { uncommittedMultikeys->erase(this); });
+ return true;
+}
+
+void CollectionImpl::forceSetIndexIsMultikey(OperationContext* opCtx,
+ const IndexDescriptor* desc,
+ bool isMultikey,
+ const MultikeyPaths& multikeyPaths) const {
+ auto forceSetMultikey = [this,
+ isMultikey,
+ indexName = desc->indexName(),
+ accessMethod = desc->getAccessMethodName(),
+ numKeyPatternFields = desc->keyPattern().nFields(),
+ multikeyPaths](const BSONCollectionCatalogEntry::MetaData& metadata) {
+ int offset = metadata.findIndexOffset(indexName);
+ invariant(offset >= 0,
+ str::stream() << "cannot set index " << indexName << " multikey state @ "
+ << getCatalogId() << " : " << metadata.toBSON());
+
+ const auto& index = metadata.indexes[offset];
+ stdx::lock_guard lock(index.multikeyMutex);
+ index.multikey = isMultikey;
+ if (indexTypeSupportsPathLevelMultikeyTracking(accessMethod)) {
+ if (isMultikey) {
+ index.multikeyPaths = multikeyPaths;
+ } else {
+ index.multikeyPaths = MultikeyPaths{static_cast<size_t>(numKeyPatternFields)};
+ }
+ }
+ };
+
+ // Make a copy that is safe to read without locks that we insert in the durable catalog, we only
+ // update the stored metadata on successful commit. The pending update is stored as a decoration
+ // on the OperationContext to allow us to read our own writes.
+ auto& uncommittedMultikeys = UncommittedMultikey::get(opCtx).resources();
+ if (!uncommittedMultikeys) {
+ uncommittedMultikeys = std::make_shared<UncommittedMultikey::MultikeyMap>();
+ }
+ BSONCollectionCatalogEntry::MetaData* metadata = nullptr;
+ if (auto it = uncommittedMultikeys->find(this); it != uncommittedMultikeys->end()) {
+ metadata = &it->second;
+ } else {
+ metadata = &uncommittedMultikeys->emplace(this, *_metadata).first->second;
+ }
+ forceSetMultikey(*metadata);
+ DurableCatalog::get(opCtx)->putMetaData(opCtx, getCatalogId(), *metadata);
+ opCtx->recoveryUnit()->onCommit(
+ [this, uncommittedMultikeys, forceSetMultikey = std::move(forceSetMultikey)](auto ts) {
+ // Merge in changes to this index, other indexes may have been updated since we made our
+ // copy.
+ forceSetMultikey(*_metadata);
+ uncommittedMultikeys->erase(this);
+ });
+ opCtx->recoveryUnit()->onRollback(
+ [this, uncommittedMultikeys]() { uncommittedMultikeys->erase(this); });
+}
+
+int CollectionImpl::getTotalIndexCount() const {
+ return static_cast<int>(_metadata->indexes.size());
+}
+
+int CollectionImpl::getCompletedIndexCount() const {
+ int num = 0;
+ for (unsigned i = 0; i < _metadata->indexes.size(); i++) {
+ if (_metadata->indexes[i].ready)
+ num++;
+ }
+ return num;
+}
+
+BSONObj CollectionImpl::getIndexSpec(StringData indexName) const {
+ int offset = _metadata->findIndexOffset(indexName);
+ invariant(offset >= 0,
+ str::stream() << "cannot get index spec for " << indexName << " @ " << getCatalogId()
+ << " : " << _metadata->toBSON());
+
+ return _metadata->indexes[offset].spec;
+}
+
+void CollectionImpl::getAllIndexes(std::vector<std::string>* names) const {
+ for (unsigned i = 0; i < _metadata->indexes.size(); i++) {
+ names->push_back(_metadata->indexes[i].spec["name"].String());
+ }
+}
+
+void CollectionImpl::getReadyIndexes(std::vector<std::string>* names) const {
+ for (unsigned i = 0; i < _metadata->indexes.size(); i++) {
+ if (_metadata->indexes[i].ready)
+ names->push_back(_metadata->indexes[i].spec["name"].String());
+ }
+}
+
+bool CollectionImpl::isIndexPresent(StringData indexName) const {
+ int offset = _metadata->findIndexOffset(indexName);
+ return offset >= 0;
+}
+
+bool CollectionImpl::isIndexReady(StringData indexName) const {
+ int offset = _metadata->findIndexOffset(indexName);
+ invariant(offset >= 0,
+ str::stream() << "cannot get ready status for index " << indexName << " @ "
+ << getCatalogId() << " : " << _metadata->toBSON());
+ return _metadata->indexes[offset].ready;
+}
+
+void CollectionImpl::replaceMetadata(OperationContext* opCtx,
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> md) {
+ DurableCatalog::get(opCtx)->putMetaData(opCtx, getCatalogId(), *md);
+ _metadata = std::move(md);
+}
+
+template <typename Func>
+void CollectionImpl::_writeMetadata(OperationContext* opCtx, Func func) {
+ // Even though we are holding an exclusive lock on the Collection there may be an ongoing
+ // multikey change on this OperationContext. Make sure we include that update when we copy the
+ // metadata for this operation.
+ const BSONCollectionCatalogEntry::MetaData* sourceMetadata = _metadata.get();
+ auto& uncommittedMultikeys = UncommittedMultikey::get(opCtx).resources();
+ if (uncommittedMultikeys) {
+ if (auto it = uncommittedMultikeys->find(this); it != uncommittedMultikeys->end()) {
+ sourceMetadata = &it->second;
+ }
+ }
+
+ // Copy metadata and apply provided function to make change.
+ auto metadata = std::make_shared<BSONCollectionCatalogEntry::MetaData>(*sourceMetadata);
+ func(*metadata);
+
+ // Remove the cached multikey change, it is now included in the copied metadata. If we left it
+ // here we could read stale data.
+ if (uncommittedMultikeys) {
+ uncommittedMultikeys->erase(this);
+ }
+
+ // Store in durable catalog and replace pointer with our copied instance.
+ DurableCatalog::get(opCtx)->putMetaData(opCtx, getCatalogId(), *metadata);
+ _metadata = std::move(metadata);
+}
+
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index 9571407a98b..b6a17849603 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -41,23 +41,38 @@ class CollectionCatalog;
class CollectionImpl final : public Collection {
public:
+ // TODO SERVER-56999: We should just need one API to create Collections
explicit CollectionImpl(OperationContext* opCtx,
const NamespaceString& nss,
RecordId catalogId,
const CollectionOptions& options,
std::unique_ptr<RecordStore> recordStore);
+ explicit CollectionImpl(OperationContext* opCtx,
+ const NamespaceString& nss,
+ RecordId catalogId,
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> metadata,
+ std::unique_ptr<RecordStore> recordStore);
+
~CollectionImpl();
std::shared_ptr<Collection> clone() const final;
class FactoryImpl : public Factory {
public:
+ // TODO SERVER-56999: We should just need one API to create Collections
std::shared_ptr<Collection> make(OperationContext* opCtx,
const NamespaceString& nss,
RecordId catalogId,
const CollectionOptions& options,
std::unique_ptr<RecordStore> rs) const final;
+
+ std::shared_ptr<Collection> make(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ RecordId catalogId,
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> metadata,
+ std::unique_ptr<RecordStore> rs) const final;
};
SharedCollectionDecorations* getSharedDecorations() const final;
@@ -71,7 +86,7 @@ public:
return _ns;
}
- void setNs(NamespaceString nss) final;
+ Status rename(OperationContext* opCtx, const NamespaceString& nss, bool stayTemp) final;
RecordId getCatalogId() const {
return _catalogId;
@@ -298,9 +313,11 @@ public:
bool getRecordPreImages() const final;
void setRecordPreImages(OperationContext* opCtx, bool val) final;
- bool isTemporary(OperationContext* opCtx) const final;
+ bool isTemporary() const final;
bool isClustered() const final;
+ void updateClusteredIndexTTLSetting(OperationContext* opCtx,
+ boost::optional<int64_t> expireAfterSeconds) final;
Status updateCappedSize(OperationContext* opCtx, long long newCappedSize) final;
@@ -373,6 +390,8 @@ public:
*/
const CollatorInterface* getDefaultCollator() const final;
+ const CollectionOptions& getCollectionOptions() const final;
+
StatusWith<std::vector<BSONObj>> addCollationDefaultsToIndexSpecsForCreate(
OperationContext* opCtx, const std::vector<BSONObj>& indexSpecs) const final;
@@ -388,6 +407,55 @@ public:
void establishOplogCollectionForLogging(OperationContext* opCtx) final;
void onDeregisterFromCatalog(OperationContext* opCtx) final;
+ Status checkMetaDataForIndex(const std::string& indexName, const BSONObj& spec) const final;
+
+ void updateTTLSetting(OperationContext* opCtx,
+ StringData idxName,
+ long long newExpireSeconds) final;
+
+ void updateHiddenSetting(OperationContext* opCtx, StringData idxName, bool hidden) final;
+
+ void setIsTemp(OperationContext* opCtx, bool isTemp) final;
+
+ void removeIndex(OperationContext* opCtx, StringData indexName) final;
+
+ Status prepareForIndexBuild(OperationContext* opCtx,
+ const IndexDescriptor* spec,
+ boost::optional<UUID> buildUUID,
+ bool isBackgroundSecondaryBuild) final;
+
+ boost::optional<UUID> getIndexBuildUUID(StringData indexName) const final;
+
+ bool isIndexMultikey(OperationContext* opCtx,
+ StringData indexName,
+ MultikeyPaths* multikeyPaths) const final;
+
+ bool setIndexIsMultikey(OperationContext* opCtx,
+ StringData indexName,
+ const MultikeyPaths& multikeyPaths) const final;
+
+ void forceSetIndexIsMultikey(OperationContext* opCtx,
+ const IndexDescriptor* desc,
+ bool isMultikey,
+ const MultikeyPaths& multikeyPaths) const final;
+
+ int getTotalIndexCount() const final;
+
+ int getCompletedIndexCount() const final;
+
+ BSONObj getIndexSpec(StringData indexName) const final;
+
+ void getAllIndexes(std::vector<std::string>* names) const final;
+
+ void getReadyIndexes(std::vector<std::string>* names) const final;
+
+ bool isIndexPresent(StringData indexName) const final;
+
+ bool isIndexReady(StringData indexName) const final;
+
+ void replaceMetadata(OperationContext* opCtx,
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> md) final;
+
private:
/**
* Returns a non-ok Status if document does not pass this collection's validator.
@@ -414,6 +482,13 @@ private:
void _cappedDeleteAsNeeded(OperationContext* opCtx, const RecordId& justInserted) const;
/**
+ * Writes metadata to the DurableCatalog. Func should have the function signature
+ * 'void(BSONCollectionCatalogEntry::MetaData&)'
+ */
+ template <typename Func>
+ void _writeMetadata(OperationContext* opCtx, Func func);
+
+ /**
* Holder of shared state between CollectionImpl clones. Also implements CappedCallback, a
* pointer to which is given to the RecordStore, so that the CappedCallback logic can always be
* performed on the latest CollectionImpl instance without needing to know about copy-on-write
@@ -475,7 +550,6 @@ private:
// Capped information.
const bool _isCapped;
const long long _cappedMaxDocs;
- long long _cappedMaxSize;
// For capped deletes performed on collections where '_needCappedLock' is false, the mutex
// below protects '_cappedFirstRecord'. Otherwise, when '_needCappedLock' is true, the
@@ -491,13 +565,15 @@ private:
bool _cachedCommitted = true;
std::shared_ptr<SharedState> _shared;
+ // Collection metadata cached from the DurableCatalog. Is kept separate from the SharedState
+ // because it may be updated.
+ std::shared_ptr<const BSONCollectionCatalogEntry::MetaData> _metadata;
+
clonable_ptr<IndexCatalog> _indexCatalog;
// The validator is using shared state internally. Collections share validator until a new
// validator is set in setValidator which sets a new instance.
Validator _validator;
- boost::optional<ValidationActionEnum> _validationAction;
- boost::optional<ValidationLevelEnum> _validationLevel;
// Whether or not this collection is clustered on _id values.
bool _clustered = false;
@@ -505,8 +581,6 @@ private:
// If this is a time-series buckets collection, the metadata for this collection.
boost::optional<TimeseriesOptions> _timeseriesOptions;
- bool _recordPreImages = false;
-
// The earliest snapshot that is allowed to use this collection.
boost::optional<Timestamp> _minVisibleSnapshot;
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index 244ecd571a9..7dfb7424494 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -73,8 +73,9 @@ public:
return _ns;
}
- void setNs(NamespaceString nss) final {
+ Status rename(OperationContext* opCtx, const NamespaceString& nss, bool stayTemp) final {
_ns = std::move(nss);
+ return Status::OK();
}
const IndexCatalog* getIndexCatalog() const {
@@ -228,7 +229,7 @@ public:
std::abort();
}
- bool isTemporary(OperationContext* opCtx) const {
+ bool isTemporary() const {
std::abort();
}
@@ -236,6 +237,11 @@ public:
std::abort();
}
+ void updateClusteredIndexTTLSetting(OperationContext* opCtx,
+ boost::optional<int64_t> expireAfterSeconds) {
+ std::abort();
+ }
+
Status updateCappedSize(OperationContext* opCtx, long long newCappedSize) {
std::abort();
}
@@ -311,6 +317,10 @@ public:
std::abort();
}
+ const CollectionOptions& getCollectionOptions() const {
+ std::abort();
+ }
+
StatusWith<std::vector<BSONObj>> addCollationDefaultsToIndexSpecsForCreate(
OperationContext* opCtx, const std::vector<BSONObj>& indexSpecs) const {
std::abort();
@@ -347,6 +357,89 @@ public:
std::abort();
}
+ Status checkMetaDataForIndex(const std::string& indexName, const BSONObj& spec) const {
+ std::abort();
+ }
+
+ void updateTTLSetting(OperationContext* opCtx, StringData idxName, long long newExpireSeconds) {
+ std::abort();
+ }
+
+ void updateHiddenSetting(OperationContext* opCtx, StringData idxName, bool hidden) {
+ std::abort();
+ }
+
+ void setIsTemp(OperationContext* opCtx, bool isTemp) {
+ std::abort();
+ }
+
+ void removeIndex(OperationContext* opCtx, StringData indexName) {
+ std::abort();
+ }
+
+ Status prepareForIndexBuild(OperationContext* opCtx,
+ const IndexDescriptor* spec,
+ boost::optional<UUID> buildUUID,
+ bool isBackgroundSecondaryBuild) {
+ std::abort();
+ }
+
+ boost::optional<UUID> getIndexBuildUUID(StringData indexName) const {
+ std::abort();
+ }
+
+ bool isIndexMultikey(OperationContext* opCtx,
+ StringData indexName,
+ MultikeyPaths* multikeyPaths) const {
+ std::abort();
+ }
+
+ bool setIndexIsMultikey(OperationContext* opCtx,
+ StringData indexName,
+ const MultikeyPaths& multikeyPaths) const {
+ std::abort();
+ }
+
+ void forceSetIndexIsMultikey(OperationContext* opCtx,
+ const IndexDescriptor* desc,
+ bool isMultikey,
+ const MultikeyPaths& multikeyPaths) const final {
+ std::abort();
+ }
+
+ int getTotalIndexCount() const {
+ std::abort();
+ }
+
+ int getCompletedIndexCount() const {
+ std::abort();
+ }
+
+ BSONObj getIndexSpec(StringData indexName) const {
+ std::abort();
+ }
+
+ void getAllIndexes(std::vector<std::string>* names) const {
+ std::abort();
+ }
+
+ void getReadyIndexes(std::vector<std::string>* names) const {
+ std::abort();
+ }
+
+ bool isIndexPresent(StringData indexName) const {
+ std::abort();
+ }
+
+ bool isIndexReady(StringData indexName) const {
+ std::abort();
+ }
+
+ void replaceMetadata(OperationContext* opCtx,
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> md) {
+ std::abort();
+ }
+
private:
UUID _uuid = UUID::gen();
NamespaceString _ns;
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index bd63a6af67d..a2149f9a0ba 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -312,14 +312,14 @@ CollectionOptions CollectionOptions::fromCreateCommand(const CreateCommand& cmd)
return options;
}
-BSONObj CollectionOptions::toBSON() const {
+BSONObj CollectionOptions::toBSON(bool includeUUID) const {
BSONObjBuilder b;
- appendBSON(&b);
+ appendBSON(&b, includeUUID);
return b.obj();
}
-void CollectionOptions::appendBSON(BSONObjBuilder* builder) const {
- if (uuid) {
+void CollectionOptions::appendBSON(BSONObjBuilder* builder, bool includeUUID) const {
+ if (uuid && includeUUID) {
builder->appendElements(uuid->toBSON());
}
diff --git a/src/mongo/db/catalog/collection_options.h b/src/mongo/db/catalog/collection_options.h
index 1a4bf368ea8..a8dcf0d28df 100644
--- a/src/mongo/db/catalog/collection_options.h
+++ b/src/mongo/db/catalog/collection_options.h
@@ -91,8 +91,12 @@ struct CollectionOptions {
*/
static CollectionOptions fromCreateCommand(const CreateCommand& cmd);
- void appendBSON(BSONObjBuilder* builder) const;
- BSONObj toBSON() const;
+ /**
+ * Serialize to BSON. The 'includeUUID' parameter is used for the listCollections command to do
+ * special formatting for the uuid.
+ */
+ void appendBSON(BSONObjBuilder* builder, bool includeUUID) const;
+ BSONObj toBSON(bool includeUUID = true) const;
/**
* Returns true if given options matches to this.
diff --git a/src/mongo/db/catalog/collection_validation.cpp b/src/mongo/db/catalog/collection_validation.cpp
index 241f9fe4f05..1d06c548670 100644
--- a/src/mongo/db/catalog/collection_validation.cpp
+++ b/src/mongo/db/catalog/collection_validation.cpp
@@ -42,7 +42,6 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/views/view_catalog.h"
#include "mongo/logv2/log.h"
#include "mongo/util/fail_point.h"
@@ -371,8 +370,7 @@ void _validateCatalogEntry(OperationContext* opCtx,
ValidateState* validateState,
ValidateResults* results) {
const auto& collection = validateState->getCollection();
- CollectionOptions options =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
+ const auto& options = collection->getCollectionOptions();
if (options.uuid) {
addErrorIfUnequal(*(options.uuid), validateState->uuid(), "UUID", results);
} else {
@@ -408,11 +406,10 @@ void _validateCatalogEntry(OperationContext* opCtx,
}
std::vector<std::string> indexes;
- DurableCatalog::get(opCtx)->getReadyIndexes(opCtx, collection->getCatalogId(), &indexes);
+ collection->getReadyIndexes(&indexes);
for (auto& index : indexes) {
MultikeyPaths multikeyPaths;
- const bool isMultikey = DurableCatalog::get(opCtx)->isIndexMultikey(
- opCtx, collection->getCatalogId(), index, &multikeyPaths);
+ const bool isMultikey = collection->isIndexMultikey(opCtx, index, &multikeyPaths);
const bool hasMultiKeyPaths = std::any_of(multikeyPaths.begin(),
multikeyPaths.end(),
[](auto& pathSet) { return pathSet.size() > 0; });
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 5239a29876a..b9c532386bd 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -277,11 +277,8 @@ Status _createTimeseries(OperationContext* opCtx,
// Compare CollectionOptions and eventual TTL index to see if this bucket collection
// may be reused for this request.
existingBucketCollectionIsCompatible =
- DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, coll->getCatalogId())
- .matchesStorageOptions(
- bucketsOptions,
- CollatorFactoryInterface::get(opCtx->getServiceContext()));
+ coll->getCollectionOptions().matchesStorageOptions(
+ bucketsOptions, CollatorFactoryInterface::get(opCtx->getServiceContext()));
if (expireAfterSeconds && !bucketsOptions.clusteredIndex) {
auto indexDescriptor =
coll->getIndexCatalog()->findIndexByName(opCtx, indexName, true);
diff --git a/src/mongo/db/catalog/create_collection_test.cpp b/src/mongo/db/catalog/create_collection_test.cpp
index e56a2fa58b0..1a12247c55d 100644
--- a/src/mongo/db/catalog/create_collection_test.cpp
+++ b/src/mongo/db/catalog/create_collection_test.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/uuid.h"
@@ -128,7 +127,7 @@ CollectionOptions getCollectionOptions(OperationContext* opCtx, const NamespaceS
AutoGetCollectionForRead collection(opCtx, nss);
ASSERT_TRUE(collection) << "Unable to get collections options for " << nss
<< " because collection does not exist.";
- return DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
+ return collection->getCollectionOptions();
}
/**
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 5c1d10e527d..d9f59b98ae5 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -230,9 +230,7 @@ void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
};
CollectionCatalog::CollectionInfoFn predicate = [&](const CollectionPtr& collection) {
- return DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, collection->getCatalogId())
- .temp;
+ return collection->getCollectionOptions().temp;
};
catalog::forEachCollectionFromDb(opCtx, name(), MODE_X, callback, predicate);
@@ -515,10 +513,9 @@ void DatabaseImpl::_dropCollectionIndexes(OperationContext* opCtx,
invariant(_name == nss.db());
LOGV2_DEBUG(
20316, 1, "dropCollection: {namespace} - dropAllIndexes start", "namespace"_attr = nss);
- collection->getIndexCatalog()->dropAllIndexes(opCtx, true);
+ collection->getIndexCatalog()->dropAllIndexes(opCtx, collection, true);
- invariant(DurableCatalog::get(opCtx)->getTotalIndexCount(opCtx, collection->getCatalogId()) ==
- 0);
+ invariant(collection->getTotalIndexCount() == 0);
LOGV2_DEBUG(
20317, 1, "dropCollection: {namespace} - dropAllIndexes done", "namespace"_attr = nss);
}
@@ -577,16 +574,15 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
Top::get(opCtx->getServiceContext()).collectionDropped(fromNss);
- Status status = DurableCatalog::get(opCtx)->renameCollection(
- opCtx, collToRename->getCatalogId(), toNss, stayTemp);
-
// Set the namespace of 'collToRename' from within the CollectionCatalog. This is necessary
- // because the CollectionCatalog mutex synchronizes concurrent access to the collection's
- // namespace for callers that may not hold a collection lock.
+ // because the CollectionCatalog manages the necessary isolation for this Collection until the
+ // WUOW commits.
auto writableCollection = collToRename.getWritableCollection();
+ Status status = writableCollection->rename(opCtx, toNss, stayTemp);
+ if (!status.isOK())
+ return status;
- CollectionCatalog::get(opCtx)->setCollectionNamespace(
- opCtx, writableCollection, fromNss, toNss);
+ CollectionCatalog::get(opCtx)->onCollectionRename(opCtx, writableCollection, fromNss);
opCtx->recoveryUnit()->onCommit([writableCollection](boost::optional<Timestamp> commitTime) {
// Ban reading from this collection on committed reads on snapshots before now.
@@ -746,7 +742,9 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
// initialized, so use the unsafe fCV getter here.
IndexCatalog* ic = collection->getIndexCatalog();
fullIdIndexSpec = uassertStatusOK(ic->createIndexOnEmptyCollection(
- opCtx, !idIndex.isEmpty() ? idIndex : ic->getDefaultIdIndexSpec()));
+ opCtx,
+ collection,
+ !idIndex.isEmpty() ? idIndex : ic->getDefaultIdIndexSpec(collection)));
} else {
// autoIndexId: false is only allowed on unreplicated collections.
uassert(50001,
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index dde410d6f24..e2d826fa05a 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -57,7 +57,6 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/storage_interface_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/scopeguard.h"
@@ -377,8 +376,7 @@ TEST_F(DatabaseTest, RenameCollectionPreservesUuidOfSourceCollectionAndUpdatesUu
auto toCollection = catalog->lookupCollectionByNamespace(opCtx, toNss);
ASSERT_TRUE(toCollection);
- auto toCollectionOptions =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, toCollection->getCatalogId());
+ const auto& toCollectionOptions = toCollection->getCollectionOptions();
auto toUuid = toCollectionOptions.uuid;
ASSERT_TRUE(toUuid);
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index b23c3120f2d..4af7afc812e 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -184,7 +184,7 @@ std::vector<UUID> abortIndexBuildByIndexNames(OperationContext* opCtx,
* Drops single index given a descriptor.
*/
Status dropIndexByDescriptor(OperationContext* opCtx,
- const CollectionPtr& collection,
+ Collection* collection,
IndexCatalog* indexCatalog,
const IndexDescriptor* desc) {
if (desc->isIdIndex()) {
@@ -195,14 +195,14 @@ Status dropIndexByDescriptor(OperationContext* opCtx,
// exist in standalone mode.
auto entry = indexCatalog->getEntry(desc);
if (entry->isFrozen()) {
- invariant(!entry->isReady(opCtx));
+ invariant(!entry->isReady(opCtx, collection));
invariant(getReplSetMemberInStandaloneMode(opCtx->getServiceContext()));
// Return here. No need to fall through to op observer on standalone.
- return indexCatalog->dropUnfinishedIndex(opCtx, desc);
+ return indexCatalog->dropUnfinishedIndex(opCtx, collection, desc);
}
// Do not allow dropping unfinished indexes that are not frozen.
- if (!entry->isReady(opCtx)) {
+ if (!entry->isReady(opCtx, collection)) {
return Status(ErrorCodes::IndexNotFound,
str::stream()
<< "can't drop unfinished index with name: " << desc->indexName());
@@ -214,7 +214,7 @@ Status dropIndexByDescriptor(OperationContext* opCtx,
opCtx->getServiceContext()->getOpObserver()->onDropIndex(
opCtx, collection->ns(), collection->uuid(), desc->indexName(), desc->infoObj());
- auto s = indexCatalog->dropIndex(opCtx, desc);
+ auto s = indexCatalog->dropIndex(opCtx, collection, desc);
if (!s.isOK()) {
return s;
}
@@ -255,7 +255,7 @@ void dropReadyIndexes(OperationContext* opCtx,
IndexCatalog* indexCatalog = collection->getIndexCatalog();
if (indexNames.front() == "*") {
indexCatalog->dropAllIndexes(
- opCtx, false, [opCtx, collection](const IndexDescriptor* desc) {
+ opCtx, collection, false, [opCtx, collection](const IndexDescriptor* desc) {
opCtx->getServiceContext()->getOpObserver()->onDropIndex(opCtx,
collection->ns(),
collection->uuid(),
@@ -427,8 +427,8 @@ DropIndexesReply dropIndexes(OperationContext* opCtx,
continue;
}
- uassertStatusOK(
- dropIndexByDescriptor(opCtx, collection->getCollection(), indexCatalog, desc));
+ uassertStatusOK(dropIndexByDescriptor(
+ opCtx, collection->getWritableCollection(), indexCatalog, desc));
}
wuow.commit();
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index e34c309340f..48a95c05af1 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -98,7 +98,7 @@ Status IndexBuildBlock::initForResume(OperationContext* opCtx,
// A bulk cursor can only be opened on a fresh table, so we drop the table that was created
// before shutdown and recreate it.
auto status = DurableCatalog::get(opCtx)->dropAndRecreateIndexIdentForResume(
- opCtx, collection->getCatalogId(), descriptor, indexCatalogEntry->getIdent());
+ opCtx, collection->getCollectionOptions(), descriptor, indexCatalogEntry->getIdent());
if (!status.isOK())
return status;
}
@@ -145,16 +145,13 @@ Status IndexBuildBlock::init(OperationContext* opCtx, Collection* collection) {
}
// Setup on-disk structures.
- Status status = DurableCatalog::get(opCtx)->prepareForIndexBuild(opCtx,
- collection->getCatalogId(),
- descriptor.get(),
- _buildUUID,
- isBackgroundSecondaryBuild);
+ Status status = collection->prepareForIndexBuild(
+ opCtx, descriptor.get(), _buildUUID, isBackgroundSecondaryBuild);
if (!status.isOK())
return status;
auto indexCatalogEntry = collection->getIndexCatalog()->createIndexEntry(
- opCtx, std::move(descriptor), CreateIndexEntryFlags::kNone);
+ opCtx, collection, std::move(descriptor), CreateIndexEntryFlags::kNone);
if (_method == IndexBuildMethod::kHybrid) {
_indexBuildInterceptor = std::make_unique<IndexBuildInterceptor>(opCtx, indexCatalogEntry);
@@ -184,8 +181,6 @@ void IndexBuildBlock::fail(OperationContext* opCtx, Collection* collection) {
// Being in a WUOW means all timestamping responsibility can be pushed up to the caller.
invariant(opCtx->lockState()->inAWriteUnitOfWork());
- invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_X));
-
// Audit that the index build is being aborted.
audit::logCreateIndex(opCtx->getClient(),
&_spec,
@@ -196,12 +191,14 @@ void IndexBuildBlock::fail(OperationContext* opCtx, Collection* collection) {
auto indexCatalogEntry = getEntry(opCtx, collection);
if (indexCatalogEntry) {
- invariant(collection->getIndexCatalog()->dropIndexEntry(opCtx, indexCatalogEntry).isOK());
+ invariant(collection->getIndexCatalog()
+ ->dropIndexEntry(opCtx, collection, indexCatalogEntry)
+ .isOK());
if (_indexBuildInterceptor) {
indexCatalogEntry->setIndexBuildInterceptor(nullptr);
}
} else {
- collection->getIndexCatalog()->deleteIndexFromDisk(opCtx, _indexName);
+ collection->getIndexCatalog()->deleteIndexFromDisk(opCtx, collection, _indexName);
}
}
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index ae0edad4c25..c07d386fec9 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -187,7 +187,7 @@ public:
virtual std::unique_ptr<IndexCatalog> clone() const = 0;
// Must be called before used.
- virtual Status init(OperationContext* const opCtx) = 0;
+ virtual Status init(OperationContext* const opCtx, Collection* collection) = 0;
// ---- accessors -----
@@ -206,7 +206,7 @@ public:
/**
* Returns the spec for the id index to create by default for this collection.
*/
- virtual BSONObj getDefaultIdIndexSpec() const = 0;
+ virtual BSONObj getDefaultIdIndexSpec(const CollectionPtr& collection) const = 0;
virtual const IndexDescriptor* findIdIndex(OperationContext* const opCtx) const = 0;
@@ -279,6 +279,7 @@ public:
* on the collection.
*/
virtual const IndexDescriptor* refreshEntry(OperationContext* const opCtx,
+ Collection* collection,
const IndexDescriptor* const oldDesc) = 0;
/**
@@ -314,6 +315,7 @@ public:
*/
virtual IndexCatalogEntry* createIndexEntry(OperationContext* opCtx,
+ Collection* collection,
std::unique_ptr<IndexDescriptor> descriptor,
CreateIndexEntryFlags flags) = 0;
@@ -323,6 +325,7 @@ public:
* of the created index, as it is stored in this index catalog.
*/
virtual StatusWith<BSONObj> createIndexOnEmptyCollection(OperationContext* const opCtx,
+ Collection* collection,
const BSONObj spec) = 0;
/**
@@ -335,6 +338,7 @@ public:
*/
virtual StatusWith<BSONObj> prepareSpecForCreate(
OperationContext* const opCtx,
+ const CollectionPtr& collection,
const BSONObj& original,
const boost::optional<ResumeIndexInfo>& resumeInfo) const = 0;
@@ -354,6 +358,7 @@ public:
*/
virtual std::vector<BSONObj> removeExistingIndexes(
OperationContext* const opCtx,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& indexSpecsToBuild,
const bool removeIndexBuildsToo) const = 0;
@@ -368,7 +373,9 @@ public:
* via replica set cloning or chunk migrations.
*/
virtual std::vector<BSONObj> removeExistingIndexesNoChecks(
- OperationContext* const opCtx, const std::vector<BSONObj>& indexSpecsToBuild) const = 0;
+ OperationContext* const opCtx,
+ const CollectionPtr& collection,
+ const std::vector<BSONObj>& indexSpecsToBuild) const = 0;
/**
* Drops all indexes in the index catalog, optionally dropping the id index depending on the
@@ -376,9 +383,12 @@ public:
* index is dropped to allow timestamping each individual drop.
*/
virtual void dropAllIndexes(OperationContext* opCtx,
+ Collection* collection,
bool includingIdIndex,
std::function<void(const IndexDescriptor*)> onDropFn) = 0;
- virtual void dropAllIndexes(OperationContext* opCtx, bool includingIdIndex) = 0;
+ virtual void dropAllIndexes(OperationContext* opCtx,
+ Collection* collection,
+ bool includingIdIndex) = 0;
/**
* Drops the index given its descriptor.
@@ -386,7 +396,9 @@ public:
* The caller must hold the collection X lock and ensure no index builds are in progress on the
* collection.
*/
- virtual Status dropIndex(OperationContext* const opCtx, const IndexDescriptor* const desc) = 0;
+ virtual Status dropIndex(OperationContext* const opCtx,
+ Collection* collection,
+ const IndexDescriptor* const desc) = 0;
/**
* Drops an unfinished index given its descriptor.
@@ -394,6 +406,7 @@ public:
* The caller must hold the collection X lock.
*/
virtual Status dropUnfinishedIndex(OperationContext* const opCtx,
+ Collection* collection,
const IndexDescriptor* const desc) = 0;
/**
@@ -401,12 +414,16 @@ public:
*
* The caller must hold the collection X lock.
*/
- virtual Status dropIndexEntry(OperationContext* opCtx, IndexCatalogEntry* entry) = 0;
+ virtual Status dropIndexEntry(OperationContext* opCtx,
+ Collection* collection,
+ IndexCatalogEntry* entry) = 0;
/**
* Deletes the index from the durable catalog on disk.
*/
- virtual void deleteIndexFromDisk(OperationContext* opCtx, const std::string& indexName) = 0;
+ virtual void deleteIndexFromDisk(OperationContext* opCtx,
+ Collection* collection,
+ const std::string& indexName) = 0;
// ---- modify single index
@@ -432,7 +449,7 @@ public:
virtual Status indexRecords(OperationContext* const opCtx,
const CollectionPtr& collection,
const std::vector<BsonRecord>& bsonRecords,
- int64_t* const keysInsertedOut) = 0;
+ int64_t* const keysInsertedOut) const = 0;
/**
* Both 'keysInsertedOut' and 'keysDeletedOut' are required and will be set to the number of
@@ -446,17 +463,18 @@ public:
const BSONObj& newDoc,
const RecordId& recordId,
int64_t* const keysInsertedOut,
- int64_t* const keysDeletedOut) = 0;
+ int64_t* const keysDeletedOut) const = 0;
/**
* When 'keysDeletedOut' is not null, it will be set to the number of index keys removed by
* this operation.
*/
virtual void unindexRecord(OperationContext* const opCtx,
+ const CollectionPtr& collection,
const BSONObj& obj,
const RecordId& loc,
const bool noWarn,
- int64_t* const keysDeletedOut) = 0;
+ int64_t* const keysDeletedOut) const = 0;
/*
* Attempt compaction on all ready indexes to regain disk space, if the storage engine's index
@@ -489,7 +507,7 @@ public:
InsertDeleteOptions* options) const = 0;
virtual void indexBuildSuccess(OperationContext* opCtx,
- const CollectionPtr& coll,
+ Collection* coll,
IndexCatalogEntry* index) = 0;
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index 59658f6db32..5dd6acb6bd8 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -73,9 +73,7 @@ public:
virtual const IndexDescriptor* descriptor() const = 0;
- virtual IndexAccessMethod* accessMethod() = 0;
-
- virtual const IndexAccessMethod* accessMethod() const = 0;
+ virtual IndexAccessMethod* accessMethod() const = 0;
virtual bool isHybridBuilding() const = 0;
@@ -137,7 +135,7 @@ public:
virtual void setMultikey(OperationContext* const opCtx,
const CollectionPtr& coll,
const KeyStringSet& multikeyMetadataKeys,
- const MultikeyPaths& multikeyPaths) = 0;
+ const MultikeyPaths& multikeyPaths) const = 0;
/**
* Sets the index to be multikey with the provided paths. This performs minimal validation of
@@ -150,14 +148,14 @@ public:
virtual void forceSetMultikey(OperationContext* const opCtx,
const CollectionPtr& coll,
bool isMultikey,
- const MultikeyPaths& multikeyPaths) = 0;
+ const MultikeyPaths& multikeyPaths) const = 0;
/**
* Returns whether this index is ready for queries. This is potentially unsafe in that it does
* not consider whether the index is visible or ready in the current storage snapshot. For
* that, use isReadyInMySnapshot() or isPresentInMySnapshot().
*/
- virtual bool isReady(OperationContext* const opCtx) const = 0;
+ virtual bool isReady(OperationContext* opCtx, const CollectionPtr& collection) const = 0;
/**
* Safely check whether this index is visible in the durable catalog in the current storage
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 79c6f6f5466..c20bc831c88 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -61,24 +61,24 @@ namespace mongo {
using std::string;
IndexCatalogEntryImpl::IndexCatalogEntryImpl(OperationContext* const opCtx,
- RecordId catalogId,
+ const CollectionPtr& collection,
const std::string& ident,
std::unique_ptr<IndexDescriptor> descriptor,
bool isFrozen)
: _ident(ident),
_descriptor(std::move(descriptor)),
- _catalogId(catalogId),
+ _catalogId(collection->getCatalogId()),
_ordering(Ordering::make(_descriptor->keyPattern())),
_isReady(false),
_isFrozen(isFrozen),
_isDropped(false) {
_descriptor->_entry = this;
- _isReady = isReadyInMySnapshot(opCtx);
+ _isReady = collection->isIndexReady(_descriptor->indexName());
{
stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
- const bool isMultikey = _catalogIsMultikey(opCtx, &_indexMultikeyPathsForWrite);
+ const bool isMultikey = _catalogIsMultikey(opCtx, collection, &_indexMultikeyPathsForWrite);
_isMultikeyForRead.store(isMultikey);
_isMultikeyForWrite.store(isMultikey);
_indexMultikeyPathsForRead = _indexMultikeyPathsForWrite;
@@ -124,7 +124,8 @@ void IndexCatalogEntryImpl::init(std::unique_ptr<IndexAccessMethod> accessMethod
_accessMethod = std::move(accessMethod);
}
-bool IndexCatalogEntryImpl::isReady(OperationContext* opCtx) const {
+bool IndexCatalogEntryImpl::isReady(OperationContext* opCtx,
+ const CollectionPtr& collection) const {
// For multi-document transactions, we can open a snapshot prior to checking the
// minimumSnapshotVersion on a collection. This means we are unprotected from reading
// out-of-sync index catalog entries. To fix this, we uassert if we detect that the
@@ -171,7 +172,7 @@ void IndexCatalogEntryImpl::setIsReady(bool newIsReady) {
void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
const CollectionPtr& collection,
const KeyStringSet& multikeyMetadataKeys,
- const MultikeyPaths& multikeyPaths) {
+ const MultikeyPaths& multikeyPaths) const {
// An index can either track path-level multikey information in the catalog or as metadata keys
// in the index itself, but not both.
invariant(!(_indexTracksMultikeyPathsInCatalog && multikeyMetadataKeys.size() > 0));
@@ -257,7 +258,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
void IndexCatalogEntryImpl::forceSetMultikey(OperationContext* const opCtx,
const CollectionPtr& coll,
bool isMultikey,
- const MultikeyPaths& multikeyPaths) {
+ const MultikeyPaths& multikeyPaths) const {
invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_X));
// Don't check _indexTracksMultikeyPathsInCatalog because the caller may be intentionally trying
@@ -266,17 +267,17 @@ void IndexCatalogEntryImpl::forceSetMultikey(OperationContext* const opCtx,
// caller wants to upgrade this index because it knows exactly which paths are multikey. We rely
// on the following function to make sure this upgrade only takes place on index types that
// currently support path-level multikey path tracking.
- DurableCatalog::get(opCtx)->forceSetIndexIsMultikey(
- opCtx, _catalogId, _descriptor.get(), isMultikey, multikeyPaths);
+ coll->forceSetIndexIsMultikey(opCtx, _descriptor.get(), isMultikey, multikeyPaths);
// The prior call to set the multikey metadata in the catalog does some validation and clean up
// based on the inputs, so reset the multikey variables based on what is actually in the durable
// catalog entry.
{
stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
- const bool isMultikey = _catalogIsMultikey(opCtx, &_indexMultikeyPathsForWrite);
- _isMultikeyForRead.store(isMultikey);
- _isMultikeyForWrite.store(isMultikey);
+ const bool isMultikeyInCatalog =
+ _catalogIsMultikey(opCtx, coll, &_indexMultikeyPathsForWrite);
+ _isMultikeyForRead.store(isMultikeyInCatalog);
+ _isMultikeyForWrite.store(isMultikeyInCatalog);
_indexMultikeyPathsForRead = _indexMultikeyPathsForWrite;
_indexTracksMultikeyPathsInCatalog = !_indexMultikeyPathsForWrite.empty();
}
@@ -286,7 +287,9 @@ void IndexCatalogEntryImpl::forceSetMultikey(OperationContext* const opCtx,
}
Status IndexCatalogEntryImpl::_setMultikeyInMultiDocumentTransaction(
- OperationContext* opCtx, const CollectionPtr& collection, const MultikeyPaths& multikeyPaths) {
+ OperationContext* opCtx,
+ const CollectionPtr& collection,
+ const MultikeyPaths& multikeyPaths) const {
// If we are inside a multi-document transaction, we write the on-disk multikey update in a
// separate transaction so that it will not generate prepare conflicts with other operations
// that try to set the multikey flag. In general, it should always be safe to update the
@@ -367,21 +370,21 @@ bool IndexCatalogEntryImpl::isPresentInMySnapshot(OperationContext* opCtx) const
}
bool IndexCatalogEntryImpl::_catalogIsMultikey(OperationContext* opCtx,
+ const CollectionPtr& collection,
MultikeyPaths* multikeyPaths) const {
- return DurableCatalog::get(opCtx)->isIndexMultikey(
- opCtx, _catalogId, _descriptor->indexName(), multikeyPaths);
+ return collection->isIndexMultikey(opCtx, _descriptor->indexName(), multikeyPaths);
}
void IndexCatalogEntryImpl::_catalogSetMultikey(OperationContext* opCtx,
const CollectionPtr& collection,
- const MultikeyPaths& multikeyPaths) {
+ const MultikeyPaths& multikeyPaths) const {
// It's possible that the index type (e.g. ascending/descending index) supports tracking
// path-level multikey information, but this particular index doesn't.
// CollectionCatalogEntry::setIndexIsMultikey() requires that we discard the path-level
// multikey information in order to avoid unintentionally setting path-level multikey
// information on an index created before 3.4.
- auto indexMetadataHasChanged = DurableCatalog::get(opCtx)->setIndexIsMultikey(
- opCtx, _catalogId, _descriptor->indexName(), multikeyPaths);
+ auto indexMetadataHasChanged =
+ collection->setIndexIsMultikey(opCtx, _descriptor->indexName(), multikeyPaths);
// In the absence of using the storage engine to read from the catalog, we must set multikey
// prior to the storage engine transaction committing.
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.h b/src/mongo/db/catalog/index_catalog_entry_impl.h
index e2024294c7c..13bd86209fe 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.h
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.h
@@ -57,7 +57,7 @@ class IndexCatalogEntryImpl : public IndexCatalogEntry {
public:
IndexCatalogEntryImpl(OperationContext* opCtx,
- RecordId catalogId,
+ const CollectionPtr& collection,
const std::string& ident,
std::unique_ptr<IndexDescriptor> descriptor, // ownership passes to me
bool isFrozen);
@@ -77,10 +77,7 @@ public:
return _descriptor.get();
}
- IndexAccessMethod* accessMethod() final {
- return _accessMethod.get();
- }
- const IndexAccessMethod* accessMethod() const final {
+ IndexAccessMethod* accessMethod() const final {
return _accessMethod.get();
}
@@ -158,14 +155,14 @@ public:
void setMultikey(OperationContext* opCtx,
const CollectionPtr& coll,
const KeyStringSet& multikeyMetadataKeys,
- const MultikeyPaths& multikeyPaths) final;
+ const MultikeyPaths& multikeyPaths) const final;
void forceSetMultikey(OperationContext* const opCtx,
const CollectionPtr& coll,
bool isMultikey,
- const MultikeyPaths& multikeyPaths) final;
+ const MultikeyPaths& multikeyPaths) const final;
- bool isReady(OperationContext* opCtx) const final;
+ bool isReady(OperationContext* opCtx, const CollectionPtr& collection) const final;
bool isFrozen() const final;
@@ -194,21 +191,23 @@ private:
*/
Status _setMultikeyInMultiDocumentTransaction(OperationContext* opCtx,
const CollectionPtr& collection,
- const MultikeyPaths& multikeyPaths);
+ const MultikeyPaths& multikeyPaths) const;
/**
* Retrieves the multikey information associated with this index from '_collection',
*
* See CollectionCatalogEntry::isIndexMultikey() for more details.
*/
- bool _catalogIsMultikey(OperationContext* opCtx, MultikeyPaths* multikeyPaths) const;
+ bool _catalogIsMultikey(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ MultikeyPaths* multikeyPaths) const;
/**
* Sets on-disk multikey flag for this index.
*/
void _catalogSetMultikey(OperationContext* opCtx,
const CollectionPtr& collection,
- const MultikeyPaths& multikeyPaths);
+ const MultikeyPaths& multikeyPaths) const;
// -----
@@ -234,16 +233,20 @@ private:
bool _isFrozen;
AtomicWord<bool> _isDropped; // Whether the index drop is committed.
+ // Members for multikey are mutable so they can be changed in const functions. They are
+ // synchronized with the '_indexMultikeyPathsMutex' mutex or are atomic. We don't have the ABA
+ // problem as multikey may only go from disabled to enabled. When multikey it stays multikey.
+
// Set to true if this index can track path-level multikey information in the catalog. This
// member is effectively const after IndexCatalogEntry::init() is called.
- bool _indexTracksMultikeyPathsInCatalog = false;
+ mutable bool _indexTracksMultikeyPathsInCatalog = false;
// Set to true if this index may contain multikey data.
- AtomicWord<bool> _isMultikeyForRead;
+ mutable AtomicWord<bool> _isMultikeyForRead;
// Set to true after a transaction commit successfully updates multikey on the catalog data. At
// this point, future writers do not need to update the catalog.
- AtomicWord<bool> _isMultikeyForWrite;
+ mutable AtomicWord<bool> _isMultikeyForWrite;
// Controls concurrent access to '_indexMultikeyPathsForRead' and '_indexMultikeyPathsForWrite'.
// We acquire this mutex rather than the RESOURCE_METADATA lock as a performance optimization
@@ -258,8 +261,10 @@ private:
// in the index key pattern. Each element in the vector is an ordered set of positions (starting
// at 0) into the corresponding indexed field that represent what prefixes of the indexed field
// causes the index to be multikey.
- MultikeyPaths _indexMultikeyPathsForRead; // May include paths not committed to catalog.
- MultikeyPaths _indexMultikeyPathsForWrite; // Paths in catalog updated by a transaction commit.
+ mutable MultikeyPaths
+ _indexMultikeyPathsForRead; // May include paths not committed to catalog.
+ mutable MultikeyPaths
+ _indexMultikeyPathsForWrite; // Paths in catalog updated by a transaction commit.
// The earliest snapshot that is allowed to read this index.
boost::optional<Timestamp> _minVisibleSnapshot;
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 815cafcf3c2..8ca5416cd37 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -97,20 +97,13 @@ const BSONObj IndexCatalogImpl::_idObj = BSON("_id" << 1);
// -------------
-IndexCatalogImpl::IndexCatalogImpl(Collection* collection) : _collection(collection) {}
-
std::unique_ptr<IndexCatalog> IndexCatalogImpl::clone() const {
return std::make_unique<IndexCatalogImpl>(*this);
}
-void IndexCatalogImpl::setCollection(Collection* collection) {
- _collection = collection;
-}
-
-Status IndexCatalogImpl::init(OperationContext* opCtx) {
+Status IndexCatalogImpl::init(OperationContext* opCtx, Collection* collection) {
vector<string> indexNames;
- auto durableCatalog = DurableCatalog::get(opCtx);
- durableCatalog->getAllIndexes(opCtx, _collection->getCatalogId(), &indexNames);
+ collection->getAllIndexes(&indexNames);
const bool replSetMemberInStandaloneMode =
getReplSetMemberInStandaloneMode(opCtx->getServiceContext());
@@ -122,8 +115,7 @@ Status IndexCatalogImpl::init(OperationContext* opCtx) {
for (size_t i = 0; i < indexNames.size(); i++) {
const string& indexName = indexNames[i];
- BSONObj spec =
- durableCatalog->getIndexSpec(opCtx, _collection->getCatalogId(), indexName).getOwned();
+ BSONObj spec = collection->getIndexSpec(indexName).getOwned();
BSONObj keyPattern = spec.getObjectField("key");
// TODO SERVER-51871: Delete this block once 5.0 becomes last-lts.
@@ -138,18 +130,16 @@ Status IndexCatalogImpl::init(OperationContext* opCtx) {
// TTL indexes are not compatible with capped collections.
if (spec.hasField(IndexDescriptor::kExpireAfterSecondsFieldName) &&
- !_collection->isCapped()) {
+ !collection->isCapped()) {
TTLCollectionCache::get(opCtx->getServiceContext())
- .registerTTLInfo(_collection->uuid(), indexName);
+ .registerTTLInfo(collection->uuid(), indexName);
}
- bool ready = durableCatalog->isIndexReady(opCtx, _collection->getCatalogId(), indexName);
+ bool ready = collection->isIndexReady(indexName);
if (!ready) {
- auto buildUUID =
- durableCatalog->getIndexBuildUUID(opCtx, _collection->getCatalogId(), indexName);
+ auto buildUUID = collection->getIndexBuildUUID(indexName);
invariant(buildUUID,
- str::stream()
- << "collection: " << _collection->ns() << "index:" << indexName);
+ str::stream() << "collection: " << collection->ns() << "index:" << indexName);
// We intentionally do not drop or rebuild unfinished two-phase index builds before
// initializing the IndexCatalog when starting a replica set member in standalone mode.
// This is because the index build cannot complete until it receives a replicated commit
@@ -158,18 +148,21 @@ Status IndexCatalogImpl::init(OperationContext* opCtx) {
// Indicate that this index is "frozen". It is not ready but is not currently in
// progress either. These indexes may be dropped.
auto flags = CreateIndexEntryFlags::kInitFromDisk | CreateIndexEntryFlags::kFrozen;
- IndexCatalogEntry* entry = createIndexEntry(opCtx, std::move(descriptor), flags);
- fassert(31433, !entry->isReady(opCtx));
+ IndexCatalogEntry* entry =
+ createIndexEntry(opCtx, collection, std::move(descriptor), flags);
+ fassert(31433, !entry->isReady(opCtx, collection));
} else {
// Initializing with unfinished indexes may occur during rollback or startup.
auto flags = CreateIndexEntryFlags::kInitFromDisk;
- IndexCatalogEntry* entry = createIndexEntry(opCtx, std::move(descriptor), flags);
- fassert(4505500, !entry->isReady(opCtx));
+ IndexCatalogEntry* entry =
+ createIndexEntry(opCtx, collection, std::move(descriptor), flags);
+ fassert(4505500, !entry->isReady(opCtx, collection));
}
} else {
auto flags = CreateIndexEntryFlags::kInitFromDisk | CreateIndexEntryFlags::kIsReady;
- IndexCatalogEntry* entry = createIndexEntry(opCtx, std::move(descriptor), flags);
- fassert(17340, entry->isReady(opCtx));
+ IndexCatalogEntry* entry =
+ createIndexEntry(opCtx, collection, std::move(descriptor), flags);
+ fassert(17340, entry->isReady(opCtx, collection));
// When initializing indexes from disk, we conservatively set the minimumVisibleSnapshot
// to non _id indexes to the recovery timestamp. The _id index is left visible. It's
@@ -180,7 +173,7 @@ Status IndexCatalogImpl::init(OperationContext* opCtx) {
}
}
- CollectionQueryInfo::get(_collection).init(opCtx, _collection);
+ CollectionQueryInfo::get(collection).init(opCtx, collection);
return Status::OK();
}
@@ -227,19 +220,20 @@ string IndexCatalogImpl::_getAccessMethodName(const BSONObj& keyPattern) const {
// ---------------------------
StatusWith<BSONObj> IndexCatalogImpl::_validateAndFixIndexSpec(OperationContext* opCtx,
+ const CollectionPtr& collection,
const BSONObj& original) const {
- Status status = _isSpecOk(opCtx, original);
+ Status status = _isSpecOk(opCtx, collection, original);
if (!status.isOK()) {
return status;
}
- auto swFixed = _fixIndexSpec(opCtx, _collection, original);
+ auto swFixed = _fixIndexSpec(opCtx, collection, original);
if (!swFixed.isOK()) {
return swFixed;
}
// we double check with new index spec
- status = _isSpecOk(opCtx, swFixed.getValue());
+ status = _isSpecOk(opCtx, collection, swFixed.getValue());
if (!status.isOK()) {
return status;
}
@@ -266,10 +260,11 @@ Status IndexCatalogImpl::_isNonIDIndexAndNotAllowedToBuild(OperationContext* opC
}
void IndexCatalogImpl::_logInternalState(OperationContext* opCtx,
+ const CollectionPtr& collection,
long long numIndexesInCollectionCatalogEntry,
const std::vector<std::string>& indexNamesToDrop,
bool haveIdIndex) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X));
LOGV2_ERROR(20365,
"Internal Index Catalog state",
@@ -302,40 +297,37 @@ void IndexCatalogImpl::_logInternalState(OperationContext* opCtx,
std::vector<std::string> allIndexes;
std::vector<std::string> readyIndexes;
- auto durableCatalog = DurableCatalog::get(opCtx);
- durableCatalog->getAllIndexes(opCtx, _collection->getCatalogId(), &allIndexes);
- durableCatalog->getReadyIndexes(opCtx, _collection->getCatalogId(), &readyIndexes);
+ collection->getAllIndexes(&allIndexes);
+ collection->getReadyIndexes(&readyIndexes);
for (const auto& index : allIndexes) {
LOGV2_ERROR(20372,
"allIndexes",
"index"_attr = index,
- "spec"_attr = redact(
- durableCatalog->getIndexSpec(opCtx, _collection->getCatalogId(), index)));
+ "spec"_attr = redact(collection->getIndexSpec(index)));
}
for (const auto& index : readyIndexes) {
LOGV2_ERROR(20374,
"readyIndexes",
"index"_attr = index,
- "spec"_attr = redact(
- durableCatalog->getIndexSpec(opCtx, _collection->getCatalogId(), index)));
+ "spec"_attr = redact(collection->getIndexSpec(index)));
}
for (const auto& indexNameToDrop : indexNamesToDrop) {
LOGV2_ERROR(20376,
"indexNamesToDrop",
"index"_attr = indexNameToDrop,
- "spec"_attr = redact(durableCatalog->getIndexSpec(
- opCtx, _collection->getCatalogId(), indexNameToDrop)));
+ "spec"_attr = redact(collection->getIndexSpec(indexNameToDrop)));
}
}
StatusWith<BSONObj> IndexCatalogImpl::prepareSpecForCreate(
OperationContext* opCtx,
+ const CollectionPtr& collection,
const BSONObj& original,
const boost::optional<ResumeIndexInfo>& resumeInfo) const {
- auto swValidatedAndFixed = _validateAndFixIndexSpec(opCtx, original);
+ auto swValidatedAndFixed = _validateAndFixIndexSpec(opCtx, collection, original);
if (!swValidatedAndFixed.isOK()) {
return swValidatedAndFixed.getStatus().withContext(
str::stream() << "Error in specification " << original.toString());
@@ -360,7 +352,7 @@ StatusWith<BSONObj> IndexCatalogImpl::prepareSpecForCreate(
}
// First check against only the ready indexes for conflicts.
- status = _doesSpecConflictWithExisting(opCtx, validatedSpec, false);
+ status = _doesSpecConflictWithExisting(opCtx, collection, validatedSpec, false);
if (!status.isOK()) {
return status;
}
@@ -376,7 +368,7 @@ StatusWith<BSONObj> IndexCatalogImpl::prepareSpecForCreate(
// The index catalog cannot currently iterate over only in-progress indexes. So by previously
// checking against only ready indexes without error, we know that any errors encountered
// checking against all indexes occurred due to an in-progress index.
- status = _doesSpecConflictWithExisting(opCtx, validatedSpec, true);
+ status = _doesSpecConflictWithExisting(opCtx, collection, validatedSpec, true);
if (!status.isOK()) {
if (ErrorCodes::IndexAlreadyExists == status.code()) {
// Callers need to be able to distinguish conflicts against ready indexes versus
@@ -390,7 +382,9 @@ StatusWith<BSONObj> IndexCatalogImpl::prepareSpecForCreate(
}
std::vector<BSONObj> IndexCatalogImpl::removeExistingIndexesNoChecks(
- OperationContext* const opCtx, const std::vector<BSONObj>& indexSpecsToBuild) const {
+ OperationContext* const opCtx,
+ const CollectionPtr& collection,
+ const std::vector<BSONObj>& indexSpecsToBuild) const {
std::vector<BSONObj> result;
// Filter out ready and in-progress index builds, and any non-_id indexes if 'buildIndexes' is
// set to false in the replica set's config.
@@ -403,7 +397,8 @@ std::vector<BSONObj> IndexCatalogImpl::removeExistingIndexesNoChecks(
// _doesSpecConflictWithExisting currently does more work than we require here: we are only
// interested in the index already exists error.
if (ErrorCodes::IndexAlreadyExists ==
- _doesSpecConflictWithExisting(opCtx, spec, true /*includeUnfinishedIndexes*/)) {
+ _doesSpecConflictWithExisting(
+ opCtx, collection, spec, true /*includeUnfinishedIndexes*/)) {
continue;
}
@@ -414,11 +409,12 @@ std::vector<BSONObj> IndexCatalogImpl::removeExistingIndexesNoChecks(
std::vector<BSONObj> IndexCatalogImpl::removeExistingIndexes(
OperationContext* const opCtx,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& indexSpecsToBuild,
const bool removeIndexBuildsToo) const {
std::vector<BSONObj> result;
for (const auto& spec : indexSpecsToBuild) {
- auto prepareResult = prepareSpecForCreate(opCtx, spec);
+ auto prepareResult = prepareSpecForCreate(opCtx, collection, spec);
if (prepareResult == ErrorCodes::IndexAlreadyExists ||
(removeIndexBuildsToo && prepareResult == ErrorCodes::IndexBuildAlreadyInProgress)) {
continue;
@@ -430,20 +426,21 @@ std::vector<BSONObj> IndexCatalogImpl::removeExistingIndexes(
}
IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx,
+ Collection* collection,
std::unique_ptr<IndexDescriptor> descriptor,
CreateIndexEntryFlags flags) {
- Status status = _isSpecOk(opCtx, descriptor->infoObj());
+ Status status = _isSpecOk(opCtx, collection, descriptor->infoObj());
if (!status.isOK()) {
LOGV2_FATAL_NOTRACE(28782,
"Found an invalid index",
"descriptor"_attr = descriptor->infoObj(),
- "namespace"_attr = _collection->ns(),
+ "namespace"_attr = collection->ns(),
"error"_attr = redact(status));
}
auto engine = opCtx->getServiceContext()->getStorageEngine();
std::string ident = engine->getCatalog()->getIndexIdent(
- opCtx, _collection->getCatalogId(), descriptor->indexName());
+ opCtx, collection->getCatalogId(), descriptor->indexName());
bool isReadyIndex = CreateIndexEntryFlags::kIsReady & flags;
bool frozen = CreateIndexEntryFlags::kFrozen & flags;
@@ -451,11 +448,10 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx,
auto* const descriptorPtr = descriptor.get();
auto entry = std::make_shared<IndexCatalogEntryImpl>(
- opCtx, _collection->getCatalogId(), ident, std::move(descriptor), frozen);
+ opCtx, collection, ident, std::move(descriptor), frozen);
IndexDescriptor* desc = entry->descriptor();
- auto collOptions =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, _collection->getCatalogId());
+ const auto& collOptions = collection->getCollectionOptions();
std::unique_ptr<SortedDataInterface> sdi =
engine->getEngine()->getSortedDataInterface(opCtx, collOptions, ident, desc);
@@ -473,10 +469,10 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx,
}
bool initFromDisk = CreateIndexEntryFlags::kInitFromDisk & flags;
- if (!initFromDisk && UncommittedCollections::getForTxn(opCtx, _collection->ns()) == nullptr) {
+ if (!initFromDisk && UncommittedCollections::getForTxn(opCtx, collection->ns()) == nullptr) {
const std::string indexName = descriptorPtr->indexName();
opCtx->recoveryUnit()->onRollback(
- [collectionDecorations = _collection->getSharedDecorations(),
+ [collectionDecorations = collection->getSharedDecorations(),
indexName = std::move(indexName)] {
CollectionIndexUsageTrackerDecoration::get(collectionDecorations)
.unregisterIndex(indexName);
@@ -487,15 +483,17 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx,
}
StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationContext* opCtx,
+ Collection* collection,
BSONObj spec) {
+ invariant(collection->uuid() == collection->uuid());
UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx,
- _collection->ns());
- invariant(_collection->isEmpty(opCtx),
- str::stream() << "Collection must be empty. Collection: " << _collection->ns()
- << " UUID: " << _collection->uuid()
- << " Count (from size storer): " << _collection->numRecords(opCtx));
+ collection->ns());
+ invariant(collection->isEmpty(opCtx),
+ str::stream() << "Collection must be empty. Collection: " << collection->ns()
+ << " UUID: " << collection->uuid()
+ << " Count (from size storer): " << collection->numRecords(opCtx));
- StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate(opCtx, spec);
+ StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate(opCtx, collection, spec);
Status status = statusWithSpec.getStatus();
if (!status.isOK())
return status;
@@ -504,13 +502,13 @@ StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationCont
// now going to touch disk
boost::optional<UUID> buildUUID = boost::none;
IndexBuildBlock indexBuildBlock(
- _collection->ns(), spec, IndexBuildMethod::kForeground, buildUUID);
- status = indexBuildBlock.init(opCtx, _collection);
+ collection->ns(), spec, IndexBuildMethod::kForeground, buildUUID);
+ status = indexBuildBlock.init(opCtx, collection);
if (!status.isOK())
return status;
// sanity checks, etc...
- IndexCatalogEntry* entry = indexBuildBlock.getEntry(opCtx, _collection);
+ IndexCatalogEntry* entry = indexBuildBlock.getEntry(opCtx, collection);
invariant(entry);
IndexDescriptor* descriptor = entry->descriptor();
invariant(descriptor);
@@ -518,11 +516,10 @@ StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationCont
status = entry->accessMethod()->initializeAsEmpty(opCtx);
if (!status.isOK())
return status;
- indexBuildBlock.success(opCtx, _collection);
+ indexBuildBlock.success(opCtx, collection);
// sanity check
- invariant(DurableCatalog::get(opCtx)->isIndexReady(
- opCtx, _collection->getCatalogId(), descriptor->indexName()));
+ invariant(collection->isIndexReady(descriptor->indexName()));
return spec;
@@ -586,8 +583,10 @@ StatusWith<BSONObj> adjustIndexSpecObject(const BSONObj& obj) {
} // namespace
-Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec) const {
- const NamespaceString& nss = _collection->ns();
+Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ const BSONObj& spec) const {
+ const NamespaceString& nss = collection->ns();
BSONElement vElt = spec["v"];
if (!vElt) {
@@ -747,22 +746,22 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
uassert(ErrorCodes::InvalidOptions,
"Partial indexes are not supported on collections clustered by _id",
- !_collection->isClustered() || !spec[IndexDescriptor::kPartialFilterExprFieldName]);
+ !collection->isClustered() || !spec[IndexDescriptor::kPartialFilterExprFieldName]);
uassert(ErrorCodes::InvalidOptions,
"Unique indexes are not supported on collections clustered by _id",
- !_collection->isClustered() || !spec[IndexDescriptor::kUniqueFieldName].trueValue());
+ !collection->isClustered() || !spec[IndexDescriptor::kUniqueFieldName].trueValue());
uassert(ErrorCodes::InvalidOptions,
"TTL indexes are not supported on collections clustered by _id",
- !_collection->isClustered() || !spec[IndexDescriptor::kExpireAfterSecondsFieldName]);
+ !collection->isClustered() || !spec[IndexDescriptor::kExpireAfterSecondsFieldName]);
uassert(ErrorCodes::InvalidOptions,
"Text indexes are not supported on collections clustered by _id",
- !_collection->isClustered() || pluginName != IndexNames::TEXT);
+ !collection->isClustered() || pluginName != IndexNames::TEXT);
if (IndexDescriptor::isIdIndexPattern(key)) {
- if (_collection->isClustered()) {
+ if (collection->isClustered()) {
return Status(ErrorCodes::CannotCreateIndex,
"cannot create an _id index on a collection already clustered by _id");
}
@@ -782,7 +781,7 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (collationElement &&
!CollatorInterface::collatorsMatch(expCtx->getCollator(),
- _collection->getDefaultCollator())) {
+ collection->getDefaultCollator())) {
return Status(ErrorCodes::CannotCreateIndex,
"_id index must have the collection default collation");
}
@@ -816,6 +815,7 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
}
Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
+ const CollectionPtr& collection,
const BSONObj& spec,
const bool includeUnfinishedIndexes) const {
const char* name = spec.getStringField(IndexDescriptor::kIndexNameFieldName);
@@ -831,7 +831,7 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
// Index already exists with same name. Check whether the options are the same as well.
IndexDescriptor candidate(_getAccessMethodName(key), spec);
auto indexComparison =
- candidate.compareIndexOptions(opCtx, _collection->ns(), getEntry(desc));
+ candidate.compareIndexOptions(opCtx, collection->ns(), getEntry(desc));
// Key pattern or another uniquely-identifying option differs. We can build this index,
// but not with the specified (duplicate) name. User must specify another index name.
@@ -893,7 +893,7 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
// informative error message.
IndexDescriptor candidate(_getAccessMethodName(key), spec);
auto indexComparison =
- candidate.compareIndexOptions(opCtx, _collection->ns(), getEntry(desc));
+ candidate.compareIndexOptions(opCtx, collection->ns(), getEntry(desc));
// The candidate's key and uniquely-identifying options are equivalent to an existing
// index, but some other options are not identical. Return a message to that effect.
@@ -914,11 +914,11 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
}
if (numIndexesTotal(opCtx) >= kMaxNumIndexesAllowed) {
- string s = str::stream() << "add index fails, too many indexes for " << _collection->ns()
+ string s = str::stream() << "add index fails, too many indexes for " << collection->ns()
<< " key:" << key;
LOGV2(20354,
"Exceeded maximum number of indexes",
- "namespace"_attr = _collection->ns(),
+ "namespace"_attr = collection->ns(),
"key"_attr = key,
"maxNumIndexes"_attr = kMaxNumIndexesAllowed);
return Status(ErrorCodes::CannotCreateIndex, s);
@@ -940,7 +940,7 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
return Status::OK();
}
-BSONObj IndexCatalogImpl::getDefaultIdIndexSpec() const {
+BSONObj IndexCatalogImpl::getDefaultIdIndexSpec(const CollectionPtr& collection) const {
dassert(_idObj["_id"].type() == NumberInt);
const auto indexVersion = IndexDescriptor::getDefaultIndexVersion();
@@ -949,18 +949,17 @@ BSONObj IndexCatalogImpl::getDefaultIdIndexSpec() const {
b.append("v", static_cast<int>(indexVersion));
b.append("name", "_id_");
b.append("key", _idObj);
- if (_collection->getDefaultCollator() && indexVersion >= IndexVersion::kV2) {
+ if (collection->getDefaultCollator() && indexVersion >= IndexVersion::kV2) {
// Creating an index with the "collation" option requires a v=2 index.
- b.append("collation", _collection->getDefaultCollator()->getSpec().toBSON());
+ b.append("collation", collection->getDefaultCollator()->getSpec().toBSON());
}
return b.obj();
}
void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx,
+ Collection* collection,
bool includingIdIndex,
std::function<void(const IndexDescriptor*)> onDropFn) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
-
uassert(ErrorCodes::BackgroundOperationInProgressForNamespace,
str::stream() << "cannot perform operation: an index build is currently running",
!haveAnyIndexesInProgress());
@@ -997,13 +996,12 @@ void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx,
if (onDropFn) {
onDropFn(desc);
}
- invariant(dropIndexEntry(opCtx, entry).isOK());
+ invariant(dropIndexEntry(opCtx, collection, entry).isOK());
}
// verify state is sane post cleaning
- long long numIndexesInCollectionCatalogEntry =
- DurableCatalog::get(opCtx)->getTotalIndexCount(opCtx, _collection->getCatalogId());
+ long long numIndexesInCollectionCatalogEntry = collection->getTotalIndexCount();
if (haveIdIndex) {
fassert(17324, numIndexesTotal(opCtx) == 1);
@@ -1012,8 +1010,11 @@ void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx,
fassert(17336, _readyIndexes.size() == 1);
} else {
if (numIndexesTotal(opCtx) || numIndexesInCollectionCatalogEntry || _readyIndexes.size()) {
- _logInternalState(
- opCtx, numIndexesInCollectionCatalogEntry, indexNamesToDrop, haveIdIndex);
+ _logInternalState(opCtx,
+ collection,
+ numIndexesInCollectionCatalogEntry,
+ indexNamesToDrop,
+ haveIdIndex);
}
fassert(17327, numIndexesTotal(opCtx) == 0);
fassert(17328, numIndexesInCollectionCatalogEntry == 0);
@@ -1021,36 +1022,38 @@ void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx,
}
}
-void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx, bool includingIdIndex) {
- dropAllIndexes(opCtx, includingIdIndex, {});
+void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx,
+ Collection* collection,
+ bool includingIdIndex) {
+ dropAllIndexes(opCtx, collection, includingIdIndex, {});
}
-Status IndexCatalogImpl::dropIndex(OperationContext* opCtx, const IndexDescriptor* desc) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
-
+Status IndexCatalogImpl::dropIndex(OperationContext* opCtx,
+ Collection* collection,
+ const IndexDescriptor* desc) {
IndexCatalogEntry* entry = desc->getEntry();
if (!entry)
return Status(ErrorCodes::InternalError, "cannot find index to delete");
- if (!entry->isReady(opCtx))
+ if (!entry->isReady(opCtx, collection))
return Status(ErrorCodes::InternalError, "cannot delete not ready index");
- return dropIndexEntry(opCtx, entry);
+ return dropIndexEntry(opCtx, collection, entry);
}
-Status IndexCatalogImpl::dropUnfinishedIndex(OperationContext* opCtx, const IndexDescriptor* desc) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
-
+Status IndexCatalogImpl::dropUnfinishedIndex(OperationContext* opCtx,
+ Collection* collection,
+ const IndexDescriptor* desc) {
IndexCatalogEntry* entry = desc->getEntry();
if (!entry)
return Status(ErrorCodes::InternalError, "cannot find index to delete");
- if (entry->isReady(opCtx))
+ if (entry->isReady(opCtx, collection))
return Status(ErrorCodes::InternalError, "expected unfinished index, but it is ready");
- return dropIndexEntry(opCtx, entry);
+ return dropIndexEntry(opCtx, collection, entry);
}
namespace {
@@ -1083,50 +1086,48 @@ private:
};
} // namespace
-Status IndexCatalogImpl::dropIndexEntry(OperationContext* opCtx, IndexCatalogEntry* entry) {
+Status IndexCatalogImpl::dropIndexEntry(OperationContext* opCtx,
+ Collection* collection,
+ IndexCatalogEntry* entry) {
invariant(entry);
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
// Pulling indexName out as it is needed post descriptor release.
string indexName = entry->descriptor()->indexName();
- audit::logDropIndex(opCtx->getClient(), indexName, _collection->ns());
+ audit::logDropIndex(opCtx->getClient(), indexName, collection->ns());
auto released = _readyIndexes.release(entry->descriptor());
if (released) {
invariant(released.get() == entry);
opCtx->recoveryUnit()->registerChange(std::make_unique<IndexRemoveChange>(
- &_readyIndexes, std::move(released), _collection->getSharedDecorations()));
+ &_readyIndexes, std::move(released), collection->getSharedDecorations()));
} else {
released = _buildingIndexes.release(entry->descriptor());
invariant(released.get() == entry);
opCtx->recoveryUnit()->registerChange(std::make_unique<IndexRemoveChange>(
- &_buildingIndexes, std::move(released), _collection->getSharedDecorations()));
+ &_buildingIndexes, std::move(released), collection->getSharedDecorations()));
}
- CollectionQueryInfo::get(_collection).rebuildIndexData(opCtx, _collection);
- CollectionIndexUsageTrackerDecoration::get(_collection->getSharedDecorations())
+ CollectionQueryInfo::get(collection).rebuildIndexData(opCtx, collection);
+ CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations())
.unregisterIndex(indexName);
- _deleteIndexFromDisk(opCtx, indexName, entry->getSharedIdent());
+ _deleteIndexFromDisk(opCtx, collection, indexName, entry->getSharedIdent());
return Status::OK();
}
-void IndexCatalogImpl::deleteIndexFromDisk(OperationContext* opCtx, const string& indexName) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
- _deleteIndexFromDisk(opCtx, indexName, nullptr);
+void IndexCatalogImpl::deleteIndexFromDisk(OperationContext* opCtx,
+ Collection* collection,
+ const string& indexName) {
+ _deleteIndexFromDisk(opCtx, collection, indexName, nullptr);
}
void IndexCatalogImpl::_deleteIndexFromDisk(OperationContext* opCtx,
+ Collection* collection,
const string& indexName,
std::shared_ptr<Ident> ident) {
invariant(!findIndexByName(opCtx, indexName, true /* includeUnfinishedIndexes*/));
- catalog::removeIndex(opCtx,
- indexName,
- _collection->getCatalogId(),
- _collection->uuid(),
- _collection->ns(),
- std::move(ident));
+ catalog::removeIndex(opCtx, indexName, collection, std::move(ident));
}
void IndexCatalogImpl::setMultikeyPaths(OperationContext* const opCtx,
@@ -1276,13 +1277,12 @@ std::vector<std::shared_ptr<const IndexCatalogEntry>> IndexCatalogImpl::getAllRe
}
const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
+ Collection* collection,
const IndexDescriptor* oldDesc) {
- invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
invariant(_buildingIndexes.size() == 0);
const std::string indexName = oldDesc->indexName();
- auto durableCatalog = DurableCatalog::get(opCtx);
- invariant(durableCatalog->isIndexReady(opCtx, _collection->getCatalogId(), indexName));
+ invariant(collection->isIndexReady(indexName));
// Delete the IndexCatalogEntry that owns this descriptor. After deletion, 'oldDesc' is
// invalid and should not be dereferenced. Also, invalidate the index from the
@@ -1290,26 +1290,26 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
auto oldEntry = _readyIndexes.release(oldDesc);
invariant(oldEntry);
opCtx->recoveryUnit()->registerChange(std::make_unique<IndexRemoveChange>(
- &_readyIndexes, std::move(oldEntry), _collection->getSharedDecorations()));
- CollectionIndexUsageTrackerDecoration::get(_collection->getSharedDecorations())
+ &_readyIndexes, std::move(oldEntry), collection->getSharedDecorations()));
+ CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations())
.unregisterIndex(indexName);
// Ask the CollectionCatalogEntry for the new index spec.
- BSONObj spec =
- durableCatalog->getIndexSpec(opCtx, _collection->getCatalogId(), indexName).getOwned();
+ BSONObj spec = collection->getIndexSpec(indexName).getOwned();
BSONObj keyPattern = spec.getObjectField("key");
// Re-register this index in the index catalog with the new spec. Also, add the new index
// to the CollectionIndexUsageTrackerDecoration (shared state among Collection instances).
auto newDesc = std::make_unique<IndexDescriptor>(_getAccessMethodName(keyPattern), spec);
- auto newEntry = createIndexEntry(opCtx, std::move(newDesc), CreateIndexEntryFlags::kIsReady);
- invariant(newEntry->isReady(opCtx));
+ auto newEntry =
+ createIndexEntry(opCtx, collection, std::move(newDesc), CreateIndexEntryFlags::kIsReady);
+ invariant(newEntry->isReady(opCtx, collection));
auto desc = newEntry->descriptor();
- CollectionIndexUsageTrackerDecoration::get(_collection->getSharedDecorations())
+ CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations())
.registerIndex(desc->indexName(), desc->keyPattern());
// Last rebuild index data for CollectionQueryInfo for this Collection.
- CollectionQueryInfo::get(_collection).rebuildIndexData(opCtx, _collection);
+ CollectionQueryInfo::get(collection).rebuildIndexData(opCtx, collection);
opCtx->recoveryUnit()->onCommit([newEntry](auto commitTime) {
if (commitTime) {
@@ -1325,14 +1325,14 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
Status IndexCatalogImpl::_indexKeys(OperationContext* opCtx,
const CollectionPtr& coll,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const KeyStringSet& keys,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths,
const BSONObj& obj,
RecordId loc,
const InsertDeleteOptions& options,
- int64_t* keysInsertedOut) {
+ int64_t* keysInsertedOut) const {
Status status = Status::OK();
if (index->isHybridBuilding()) {
// The side table interface accepts only records that meet the criteria for this partial
@@ -1378,9 +1378,9 @@ Status IndexCatalogImpl::_indexKeys(OperationContext* opCtx,
Status IndexCatalogImpl::_indexFilteredRecords(OperationContext* opCtx,
const CollectionPtr& coll,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
- int64_t* keysInsertedOut) {
+ int64_t* keysInsertedOut) const {
auto& executionCtx = StorageExecutionContext::get(opCtx);
InsertDeleteOptions options;
@@ -1429,9 +1429,9 @@ Status IndexCatalogImpl::_indexFilteredRecords(OperationContext* opCtx,
Status IndexCatalogImpl::_indexRecords(OperationContext* opCtx,
const CollectionPtr& coll,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
- int64_t* keysInsertedOut) {
+ int64_t* keysInsertedOut) const {
if (MONGO_unlikely(skipIndexNewRecords.shouldFail())) {
return Status::OK();
}
@@ -1451,12 +1451,12 @@ Status IndexCatalogImpl::_indexRecords(OperationContext* opCtx,
Status IndexCatalogImpl::_updateRecord(OperationContext* const opCtx,
const CollectionPtr& coll,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const BSONObj& oldDoc,
const BSONObj& newDoc,
const RecordId& recordId,
int64_t* const keysInsertedOut,
- int64_t* const keysDeletedOut) {
+ int64_t* const keysDeletedOut) const {
IndexAccessMethod* iam = index->accessMethod();
InsertDeleteOptions options;
@@ -1470,10 +1470,10 @@ Status IndexCatalogImpl::_updateRecord(OperationContext* const opCtx,
int64_t keysDeleted = 0;
auto status = Status::OK();
- if (index->isHybridBuilding() || !index->isReady(opCtx)) {
+ if (index->isHybridBuilding() || !index->isReady(opCtx, coll)) {
bool logIfError = false;
_unindexKeys(
- opCtx, index, updateTicket.removed, oldDoc, recordId, logIfError, &keysDeleted);
+ opCtx, coll, index, updateTicket.removed, oldDoc, recordId, logIfError, &keysDeleted);
status = _indexKeys(opCtx,
coll,
index,
@@ -1498,14 +1498,15 @@ Status IndexCatalogImpl::_updateRecord(OperationContext* const opCtx,
}
void IndexCatalogImpl::_unindexKeys(OperationContext* opCtx,
- IndexCatalogEntry* index,
+ const CollectionPtr& collection,
+ const IndexCatalogEntry* index,
const KeyStringSet& keys,
const BSONObj& obj,
RecordId loc,
bool logIfError,
- int64_t* const keysDeletedOut) {
+ int64_t* const keysDeletedOut) const {
InsertDeleteOptions options;
- prepareInsertDeleteOptions(opCtx, _collection->ns(), index->descriptor(), &options);
+ prepareInsertDeleteOptions(opCtx, collection->ns(), index->descriptor(), &options);
options.logIfError = logIfError;
if (index->isHybridBuilding()) {
@@ -1537,7 +1538,7 @@ void IndexCatalogImpl::_unindexKeys(OperationContext* opCtx,
// We need to disable blind-deletes for in-progress indexes, in order to force recordid-matching
// for unindex operations, since initial sync can build an index over a collection with
// duplicates. See SERVER-17487 for more details.
- options.dupsAllowed = options.dupsAllowed || !index->isReady(opCtx);
+ options.dupsAllowed = options.dupsAllowed || !index->isReady(opCtx, collection);
int64_t removed;
Status status = index->accessMethod()->removeKeys(opCtx, keys, loc, options, &removed);
@@ -1547,7 +1548,7 @@ void IndexCatalogImpl::_unindexKeys(OperationContext* opCtx,
"Couldn't unindex record {obj} from collection {namespace}: {error}",
"Couldn't unindex record",
"record"_attr = redact(obj),
- "namespace"_attr = _collection->ns(),
+ "namespace"_attr = collection->ns(),
"error"_attr = redact(status));
}
@@ -1557,11 +1558,12 @@ void IndexCatalogImpl::_unindexKeys(OperationContext* opCtx,
}
void IndexCatalogImpl::_unindexRecord(OperationContext* opCtx,
- IndexCatalogEntry* entry,
+ const CollectionPtr& collection,
+ const IndexCatalogEntry* entry,
const BSONObj& obj,
const RecordId& loc,
bool logIfError,
- int64_t* keysDeletedOut) {
+ int64_t* keysDeletedOut) const {
auto& executionCtx = StorageExecutionContext::get(opCtx);
// There's no need to compute the prefixes of the indexed fields that cause the index to be
@@ -1587,13 +1589,13 @@ void IndexCatalogImpl::_unindexRecord(OperationContext* opCtx,
return;
}
}
- _unindexKeys(opCtx, entry, *keys, obj, loc, logIfError, keysDeletedOut);
+ _unindexKeys(opCtx, collection, entry, *keys, obj, loc, logIfError, keysDeletedOut);
}
Status IndexCatalogImpl::indexRecords(OperationContext* opCtx,
const CollectionPtr& coll,
const std::vector<BsonRecord>& bsonRecords,
- int64_t* keysInsertedOut) {
+ int64_t* keysInsertedOut) const {
if (keysInsertedOut) {
*keysInsertedOut = 0;
}
@@ -1619,7 +1621,7 @@ Status IndexCatalogImpl::updateRecord(OperationContext* const opCtx,
const BSONObj& newDoc,
const RecordId& recordId,
int64_t* const keysInsertedOut,
- int64_t* const keysDeletedOut) {
+ int64_t* const keysDeletedOut) const {
*keysInsertedOut = 0;
*keysDeletedOut = 0;
@@ -1648,10 +1650,11 @@ Status IndexCatalogImpl::updateRecord(OperationContext* const opCtx,
}
void IndexCatalogImpl::unindexRecord(OperationContext* opCtx,
+ const CollectionPtr& collection,
const BSONObj& obj,
const RecordId& loc,
bool noWarn,
- int64_t* keysDeletedOut) {
+ int64_t* keysDeletedOut) const {
if (keysDeletedOut) {
*keysDeletedOut = 0;
}
@@ -1662,7 +1665,7 @@ void IndexCatalogImpl::unindexRecord(OperationContext* opCtx,
IndexCatalogEntry* entry = it->get();
bool logIfError = !noWarn;
- _unindexRecord(opCtx, entry, obj, loc, logIfError, keysDeletedOut);
+ _unindexRecord(opCtx, collection, entry, obj, loc, logIfError, keysDeletedOut);
}
for (IndexCatalogEntryContainer::const_iterator it = _buildingIndexes.begin();
@@ -1671,8 +1674,8 @@ void IndexCatalogImpl::unindexRecord(OperationContext* opCtx,
IndexCatalogEntry* entry = it->get();
// If it's a background index, we DO NOT want to log anything.
- bool logIfError = entry->isReady(opCtx) ? !noWarn : false;
- _unindexRecord(opCtx, entry, obj, loc, logIfError, keysDeletedOut);
+ bool logIfError = entry->isReady(opCtx, collection) ? !noWarn : false;
+ _unindexRecord(opCtx, collection, entry, obj, loc, logIfError, keysDeletedOut);
}
}
@@ -1740,7 +1743,7 @@ void IndexCatalogImpl::prepareInsertDeleteOptions(OperationContext* opCtx,
}
void IndexCatalogImpl::indexBuildSuccess(OperationContext* opCtx,
- const CollectionPtr& coll,
+ Collection* coll,
IndexCatalogEntry* index) {
auto releasedEntry = _buildingIndexes.release(index->descriptor());
invariant(releasedEntry.get() == index);
diff --git a/src/mongo/db/catalog/index_catalog_impl.h b/src/mongo/db/catalog/index_catalog_impl.h
index 83711ee9d92..1e8799889e3 100644
--- a/src/mongo/db/catalog/index_catalog_impl.h
+++ b/src/mongo/db/catalog/index_catalog_impl.h
@@ -58,25 +58,14 @@ struct InsertDeleteOptions;
*/
class IndexCatalogImpl : public IndexCatalog {
public:
- explicit IndexCatalogImpl(Collection* collection);
- IndexCatalogImpl(const IndexCatalogImpl& other) = default;
-
/**
* Creates a cloned IndexCatalogImpl. Will make shallow copies of IndexCatalogEntryContainers so
* the IndexCatalogEntry will be shared across IndexCatalogImpl instances'
- *
- * Must call setCollection() after cloning to set the correct Collection backpointer
*/
std::unique_ptr<IndexCatalog> clone() const override;
- /**
- * Must be called after clone() to set the backpointer to the correct Collection instance.
- * This is required due to limitations in cloned_ptr.
- */
- void setCollection(Collection* collection);
-
// must be called before used
- Status init(OperationContext* opCtx) override;
+ Status init(OperationContext* opCtx, Collection* collection) override;
// ---- accessors -----
@@ -98,7 +87,7 @@ public:
/**
* Returns the spec for the id index to create by default for this collection.
*/
- BSONObj getDefaultIdIndexSpec() const override;
+ BSONObj getDefaultIdIndexSpec(const CollectionPtr& collection) const override;
const IndexDescriptor* findIdIndex(OperationContext* opCtx) const override;
@@ -170,6 +159,7 @@ public:
* on the collection.
*/
const IndexDescriptor* refreshEntry(OperationContext* opCtx,
+ Collection* collection,
const IndexDescriptor* oldDesc) override;
const IndexCatalogEntry* getEntry(const IndexDescriptor* desc) const override;
@@ -185,6 +175,7 @@ public:
// ---- index set modifiers ------
IndexCatalogEntry* createIndexEntry(OperationContext* opCtx,
+ Collection* collection,
std::unique_ptr<IndexDescriptor> descriptor,
CreateIndexEntryFlags flags) override;
@@ -194,19 +185,23 @@ public:
* of the created index, as it is stored in this index catalog.
*/
StatusWith<BSONObj> createIndexOnEmptyCollection(OperationContext* opCtx,
+ Collection* collection,
BSONObj spec) override;
StatusWith<BSONObj> prepareSpecForCreate(
OperationContext* opCtx,
+ const CollectionPtr& collection,
const BSONObj& original,
const boost::optional<ResumeIndexInfo>& resumeInfo = boost::none) const override;
std::vector<BSONObj> removeExistingIndexes(OperationContext* const opCtx,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& indexSpecsToBuild,
const bool removeIndexBuildsToo) const override;
std::vector<BSONObj> removeExistingIndexesNoChecks(
OperationContext* const opCtx,
+ const CollectionPtr& collection,
const std::vector<BSONObj>& indexSpecsToBuild) const override;
/**
@@ -215,19 +210,29 @@ public:
* it is filled with the names and index info of the dropped indexes.
*/
void dropAllIndexes(OperationContext* opCtx,
+ Collection* collection,
bool includingIdIndex,
std::function<void(const IndexDescriptor*)> onDropFn) override;
- void dropAllIndexes(OperationContext* opCtx, bool includingIdIndex) override;
+ void dropAllIndexes(OperationContext* opCtx,
+ Collection* collection,
+ bool includingIdIndex) override;
- Status dropIndex(OperationContext* opCtx, const IndexDescriptor* desc) override;
+ Status dropIndex(OperationContext* opCtx,
+ Collection* collection,
+ const IndexDescriptor* desc) override;
Status dropUnfinishedIndex(OperationContext* const opCtx,
+ Collection* collection,
const IndexDescriptor* const desc) override;
- Status dropIndexEntry(OperationContext* opCtx, IndexCatalogEntry* entry) override;
+ Status dropIndexEntry(OperationContext* opCtx,
+ Collection* collection,
+ IndexCatalogEntry* entry) override;
- void deleteIndexFromDisk(OperationContext* opCtx, const std::string& indexName) override;
+ void deleteIndexFromDisk(OperationContext* opCtx,
+ Collection* collection,
+ const std::string& indexName) override;
struct IndexKillCriteria {
std::string ns;
@@ -254,7 +259,7 @@ public:
Status indexRecords(OperationContext* opCtx,
const CollectionPtr& coll,
const std::vector<BsonRecord>& bsonRecords,
- int64_t* keysInsertedOut) override;
+ int64_t* keysInsertedOut) const override;
/**
* See IndexCatalog::updateRecord
@@ -265,16 +270,17 @@ public:
const BSONObj& newDoc,
const RecordId& recordId,
int64_t* const keysInsertedOut,
- int64_t* const keysDeletedOut) override;
+ int64_t* const keysDeletedOut) const override;
/**
* When 'keysDeletedOut' is not null, it will be set to the number of index keys removed by
* this operation.
*/
void unindexRecord(OperationContext* opCtx,
+ const CollectionPtr& collection,
const BSONObj& obj,
const RecordId& loc,
bool noWarn,
- int64_t* keysDeletedOut) override;
+ int64_t* keysDeletedOut) const override;
Status compactIndexes(OperationContext* opCtx) const override;
@@ -298,7 +304,7 @@ public:
InsertDeleteOptions* options) const override;
void indexBuildSuccess(OperationContext* opCtx,
- const CollectionPtr& collection,
+ Collection* coll,
IndexCatalogEntry* index) override;
private:
@@ -314,56 +320,59 @@ private:
Status _indexKeys(OperationContext* opCtx,
const CollectionPtr& coll,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const KeyStringSet& keys,
const KeyStringSet& multikeyMetadataKeys,
const MultikeyPaths& multikeyPaths,
const BSONObj& obj,
RecordId loc,
const InsertDeleteOptions& options,
- int64_t* keysInsertedOut);
+ int64_t* keysInsertedOut) const;
Status _indexFilteredRecords(OperationContext* opCtx,
const CollectionPtr& coll,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
- int64_t* keysInsertedOut);
+ int64_t* keysInsertedOut) const;
Status _indexRecords(OperationContext* opCtx,
const CollectionPtr& coll,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
- int64_t* keysInsertedOut);
+ int64_t* keysInsertedOut) const;
Status _updateRecord(OperationContext* const opCtx,
const CollectionPtr& coll,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const BSONObj& oldDoc,
const BSONObj& newDoc,
const RecordId& recordId,
int64_t* const keysInsertedOut,
- int64_t* const keysDeletedOut);
+ int64_t* const keysDeletedOut) const;
void _unindexKeys(OperationContext* opCtx,
- IndexCatalogEntry* index,
+ const CollectionPtr& collection,
+ const IndexCatalogEntry* index,
const KeyStringSet& keys,
const BSONObj& obj,
RecordId loc,
bool logIfError,
- int64_t* const keysDeletedOut);
+ int64_t* const keysDeletedOut) const;
void _unindexRecord(OperationContext* opCtx,
- IndexCatalogEntry* entry,
+ const CollectionPtr& collection,
+ const IndexCatalogEntry* entry,
const BSONObj& obj,
const RecordId& loc,
bool logIfError,
- int64_t* keysDeletedOut);
+ int64_t* keysDeletedOut) const;
/**
* Helper to remove the index from disk.
* The index should be removed from the in-memory catalog beforehand.
*/
void _deleteIndexFromDisk(OperationContext* opCtx,
+ Collection* collection,
const std::string& indexName,
std::shared_ptr<Ident> ident);
@@ -376,7 +385,9 @@ private:
const CollectionPtr& collection,
const BSONObj& spec) const;
- Status _isSpecOk(OperationContext* opCtx, const BSONObj& spec) const;
+ Status _isSpecOk(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ const BSONObj& spec) const;
/**
* Validates the 'original' index specification, alters any legacy fields and does plugin-level
@@ -384,6 +395,7 @@ private:
* error.
*/
StatusWith<BSONObj> _validateAndFixIndexSpec(OperationContext* opCtx,
+ const CollectionPtr& collection,
const BSONObj& original) const;
/**
@@ -396,6 +408,7 @@ private:
* errors.
*/
Status _doesSpecConflictWithExisting(OperationContext* opCtx,
+ const CollectionPtr& collection,
const BSONObj& spec,
const bool includeUnfinishedIndexes) const;
@@ -407,12 +420,11 @@ private:
Status _isNonIDIndexAndNotAllowedToBuild(OperationContext* opCtx, const BSONObj& spec) const;
void _logInternalState(OperationContext* opCtx,
+ const CollectionPtr& collection,
long long numIndexesInCollectionCatalogEntry,
const std::vector<std::string>& indexNamesToDrop,
bool haveIdIndex);
- Collection* _collection;
-
IndexCatalogEntryContainer _readyIndexes;
IndexCatalogEntryContainer _buildingIndexes;
};
diff --git a/src/mongo/db/catalog/index_signature_test.cpp b/src/mongo/db/catalog/index_signature_test.cpp
index e6bd673f1bb..fcf1d3d0b21 100644
--- a/src/mongo/db/catalog/index_signature_test.cpp
+++ b/src/mongo/db/catalog/index_signature_test.cpp
@@ -49,7 +49,8 @@ public:
WriteUnitOfWork wuow(opCtx());
// Get the index catalog associated with the test collection.
auto* indexCatalog = _coll->getWritableCollection()->getIndexCatalog();
- auto status = indexCatalog->createIndexOnEmptyCollection(opCtx(), spec);
+ auto status = indexCatalog->createIndexOnEmptyCollection(
+ opCtx(), _coll->getWritableCollection(), spec);
if (!status.isOK()) {
return status.getStatus();
}
diff --git a/src/mongo/db/catalog/list_indexes.cpp b/src/mongo/db/catalog/list_indexes.cpp
index 376d90ae9e4..c53f384bc0e 100644
--- a/src/mongo/db/catalog/list_indexes.cpp
+++ b/src/mongo/db/catalog/list_indexes.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/util/uuid.h"
@@ -69,37 +68,29 @@ std::list<BSONObj> listIndexesInLock(OperationContext* opCtx,
boost::optional<bool> includeBuildUUIDs) {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IS));
- auto durableCatalog = DurableCatalog::get(opCtx);
-
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangBeforeListIndexes, opCtx, "hangBeforeListIndexes", []() {}, nss);
return writeConflictRetry(opCtx, "listIndexes", nss.ns(), [&] {
std::vector<std::string> indexNames;
std::list<BSONObj> indexSpecs;
- durableCatalog->getAllIndexes(opCtx, collection->getCatalogId(), &indexNames);
+ collection->getAllIndexes(&indexNames);
for (size_t i = 0; i < indexNames.size(); i++) {
- if (!includeBuildUUIDs.value_or(false) ||
- durableCatalog->isIndexReady(opCtx, collection->getCatalogId(), indexNames[i])) {
- indexSpecs.push_back(
- durableCatalog->getIndexSpec(opCtx, collection->getCatalogId(), indexNames[i]));
+ if (!includeBuildUUIDs.value_or(false) || collection->isIndexReady(indexNames[i])) {
+ indexSpecs.push_back(collection->getIndexSpec(indexNames[i]));
continue;
}
// The durable catalog will not have a build UUID for the given index name if it was
// not being built with two-phase.
- const auto durableBuildUUID = DurableCatalog::get(opCtx)->getIndexBuildUUID(
- opCtx, collection->getCatalogId(), indexNames[i]);
+ const auto durableBuildUUID = collection->getIndexBuildUUID(indexNames[i]);
if (!durableBuildUUID) {
- indexSpecs.push_back(
- durableCatalog->getIndexSpec(opCtx, collection->getCatalogId(), indexNames[i]));
+ indexSpecs.push_back(collection->getIndexSpec(indexNames[i]));
continue;
}
BSONObjBuilder builder;
- builder.append(
- "spec"_sd,
- durableCatalog->getIndexSpec(opCtx, collection->getCatalogId(), indexNames[i]));
+ builder.append("spec"_sd, collection->getIndexSpec(indexNames[i]));
durableBuildUUID->appendToBuilder(&builder, "buildUUID"_sd);
indexSpecs.push_back(builder.obj());
}
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index fb51a69d4e7..af7e7868243 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -231,7 +231,8 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(
for (size_t i = 0; i < indexSpecs.size(); i++) {
BSONObj info = indexSpecs[i];
StatusWith<BSONObj> statusWithInfo =
- collection->getIndexCatalog()->prepareSpecForCreate(opCtx, info, resumeInfo);
+ collection->getIndexCatalog()->prepareSpecForCreate(
+ opCtx, collection.get(), info, resumeInfo);
Status status = statusWithInfo.getStatus();
if (!status.isOK()) {
// If we were given two identical indexes to build, we will run into an error trying
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 66892ad359e..429f8100455 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -57,7 +57,6 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/views/view_catalog.h"
#include "mongo/logv2/log.h"
#include "mongo/util/fail_point.h"
@@ -561,8 +560,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
Collection* tmpColl = nullptr;
{
- auto collectionOptions =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, sourceColl->getCatalogId());
+ auto collectionOptions = sourceColl->getCollectionOptions();
// Renaming across databases will result in a new UUID.
collectionOptions.uuid = UUID::gen();
@@ -760,10 +758,7 @@ void doLocalRenameIfOptionsAndIndexesHaveNotChanged(OperationContext* opCtx,
// collection was dropped and recreated, as long as the new target collection has the same
// options and indexes as the original one did. This is mainly to support concurrent $out
// to the same collection.
- collectionOptions = DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, collection->getCatalogId())
- .toBSON()
- .removeField("uuid");
+ collectionOptions = collection->getCollectionOptions().toBSON().removeField("uuid");
}
uassert(ErrorCodes::CommandFailed,
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index 35eacf51294..39bb83b448e 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -57,7 +57,6 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/storage_interface_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
@@ -424,7 +423,7 @@ CollectionOptions _getCollectionOptions(OperationContext* opCtx, const Namespace
AutoGetCollectionForRead collection(opCtx, nss);
ASSERT_TRUE(collection) << "Unable to get collections options for " << nss
<< " because collection does not exist.";
- return DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
+ return collection->getCollectionOptions();
}
/**
@@ -472,7 +471,10 @@ void _createIndexOnEmptyCollection(OperationContext* opCtx,
WriteUnitOfWork wuow(opCtx);
auto indexCatalog = collection.getWritableCollection()->getIndexCatalog();
- ASSERT_OK(indexCatalog->createIndexOnEmptyCollection(opCtx, indexInfoObj).getStatus());
+ ASSERT_OK(indexCatalog
+ ->createIndexOnEmptyCollection(
+ opCtx, collection.getWritableCollection(), indexInfoObj)
+ .getStatus());
wuow.commit();
});
diff --git a/src/mongo/db/catalog/uncommitted_collections.cpp b/src/mongo/db/catalog/uncommitted_collections.cpp
index ad87bce352b..31ace2c192e 100644
--- a/src/mongo/db/catalog/uncommitted_collections.cpp
+++ b/src/mongo/db/catalog/uncommitted_collections.cpp
@@ -33,7 +33,6 @@
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/uncommitted_collections.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/util/assert_util.h"
namespace mongo {
diff --git a/src/mongo/db/catalog/uncommitted_multikey.cpp b/src/mongo/db/catalog/uncommitted_multikey.cpp
new file mode 100644
index 00000000000..b7a48769912
--- /dev/null
+++ b/src/mongo/db/catalog/uncommitted_multikey.cpp
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/uncommitted_multikey.h"
+#include "mongo/db/operation_context.h"
+
+namespace mongo {
+namespace {
+
+const auto getUncommittedMultikey = OperationContext::declareDecoration<UncommittedMultikey>();
+} // namespace
+
+UncommittedMultikey& UncommittedMultikey::get(OperationContext* opCtx) {
+ return getUncommittedMultikey(opCtx);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/uncommitted_multikey.h b/src/mongo/db/catalog/uncommitted_multikey.h
new file mode 100644
index 00000000000..ff10f3f599e
--- /dev/null
+++ b/src/mongo/db/catalog/uncommitted_multikey.h
@@ -0,0 +1,64 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/storage/bson_collection_catalog_entry.h"
+#include <map>
+
+namespace mongo {
+class Collection;
+class UncommittedMultikey {
+public:
+ /**
+ * Wrapper class for the resources used by UncommittedMultikey
+ * Store uncommitted multikey updates as a decoration on the OperationContext. We can use the
+ * raw Collection pointer as a key as there cannot be any concurrent MODE_X writer that clones
+ * the Collection into a new instance.
+ */
+ using MultikeyMap = std::map<const Collection*, BSONCollectionCatalogEntry::MetaData>;
+
+ std::shared_ptr<MultikeyMap> releaseResources() {
+ return std::move(_resourcesPtr);
+ }
+
+ void receiveResources(std::shared_ptr<MultikeyMap> resources) {
+ _resourcesPtr = std::move(resources);
+ }
+
+ static UncommittedMultikey& get(OperationContext* opCtx);
+
+ std::shared_ptr<MultikeyMap>& resources() {
+ return _resourcesPtr;
+ }
+
+private:
+ std::shared_ptr<MultikeyMap> _resourcesPtr;
+};
+} // namespace mongo
diff --git a/src/mongo/db/catalog/validate_state.cpp b/src/mongo/db/catalog/validate_state.cpp
index 19dc5dbfb3a..3a3a3f1b4cc 100644
--- a/src/mongo/db/catalog/validate_state.cpp
+++ b/src/mongo/db/catalog/validate_state.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/views/view_catalog.h"
#include "mongo/logv2/log.h"
#include "mongo/util/fail_point.h"
diff --git a/src/mongo/db/catalog/validate_state_test.cpp b/src/mongo/db/catalog/validate_state_test.cpp
index ac023d807dd..bedbec3b99e 100644
--- a/src/mongo/db/catalog/validate_state_test.cpp
+++ b/src/mongo/db/catalog/validate_state_test.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/op_observer_impl.h"
#include "mongo/db/op_observer_registry.h"
#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/snapshot_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/util/fail_point.h"
@@ -143,8 +142,8 @@ void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const std::s
auto indexDescriptor = collection->getIndexCatalog()->findIndexByName(opCtx, indexName);
ASSERT(indexDescriptor);
- ASSERT_OK(
- collection.getWritableCollection()->getIndexCatalog()->dropIndex(opCtx, indexDescriptor));
+ ASSERT_OK(collection.getWritableCollection()->getIndexCatalog()->dropIndex(
+ opCtx, collection.getWritableCollection(), indexDescriptor));
wuow.commit();
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 199dcf331ea..e0ff1859e51 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -61,7 +61,6 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/logv2/log.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point.h"
@@ -294,7 +293,7 @@ void Cloner::_copyIndexes(OperationContext* opCtx,
auto indexCatalog = collection->getIndexCatalog();
auto indexesToBuild = indexCatalog->removeExistingIndexesNoChecks(
- opCtx, {std::begin(from_indexes), std::end(from_indexes)});
+ opCtx, collection.get(), {std::begin(from_indexes), std::end(from_indexes)});
if (indexesToBuild.empty()) {
return;
}
@@ -382,8 +381,7 @@ Status Cloner::_createCollectionsForDb(
// exists on the target, we check if the existing collection's UUID matches
// that of the one we're trying to create. If it does, we treat the create
// as a no-op; if it doesn't match, we return an error.
- auto existingOpts = DurableCatalog::get(opCtx)->getCollectionOptions(
- opCtx, collection->getCatalogId());
+ const auto& existingOpts = collection->getCollectionOptions();
const UUID clonedUUID =
uassertStatusOK(UUID::parse(params.collectionInfo["info"]["uuid"]));
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 74997b4ad5e..25563a57492 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -220,7 +220,7 @@ std::vector<BSONObj> resolveDefaultsAndRemoveExistingIndexes(OperationContext* o
auto normalSpecs = IndexBuildsCoordinator::normalizeIndexSpecs(opCtx, collection, indexSpecs);
return collection->getIndexCatalog()->removeExistingIndexes(
- opCtx, normalSpecs, false /*removeIndexBuildsToo*/);
+ opCtx, collection, normalSpecs, false /*removeIndexBuildsToo*/);
}
/**
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 30b2b1612e7..544709fec9d 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -51,7 +51,6 @@
#include "mongo/db/index_builds_coordinator.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h"
#include "mongo/db/timeseries/timeseries_lookup.h"
#include "mongo/db/vector_clock.h"
@@ -210,8 +209,7 @@ public:
std::vector<std::string> indexNames;
writeConflictRetry(opCtx, "listIndexes", toReIndexNss.ns(), [&] {
indexNames.clear();
- DurableCatalog::get(opCtx)->getAllIndexes(
- opCtx, collection->getCatalogId(), &indexNames);
+ collection->getAllIndexes(&indexNames);
});
all.reserve(indexNames.size());
@@ -219,8 +217,7 @@ public:
for (size_t i = 0; i < indexNames.size(); i++) {
const std::string& name = indexNames[i];
BSONObj spec = writeConflictRetry(opCtx, "getIndexSpec", toReIndexNss.ns(), [&] {
- return DurableCatalog::get(opCtx)->getIndexSpec(
- opCtx, collection->getCatalogId(), name);
+ return collection->getIndexSpec(name);
});
{
@@ -261,7 +258,8 @@ public:
"Uninitialized");
writeConflictRetry(opCtx, "dropAllIndexes", toReIndexNss.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
- collection.getWritableCollection()->getIndexCatalog()->dropAllIndexes(opCtx, true);
+ collection.getWritableCollection()->getIndexCatalog()->dropAllIndexes(
+ opCtx, collection.getWritableCollection(), true);
swIndexesToRebuild =
indexer->init(opCtx, collection, all, MultiIndexBlock::kNoopOnInitFn);
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index c8fa5630521..03c012f06fa 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -61,7 +61,6 @@
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/plan_executor_factory.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/views/view_catalog.h"
@@ -192,19 +191,17 @@ BSONObj buildCollectionBson(OperationContext* opCtx,
return b.obj();
}
- CollectionOptions options =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
+ const auto& options = collection->getCollectionOptions();
// While the UUID is stored as a collection option, from the user's perspective it is an
- // unsettable read-only property, so put it in the 'info' section.
- auto uuid = options.uuid;
- options.uuid.reset();
- b.append("options", options.toBSON());
+ // unsettable read-only property, so put it in the 'info' section. Pass 'false' to toBSON so it
+ // doesn't include 'uuid' here.
+ b.append("options", options.toBSON(false));
BSONObjBuilder infoBuilder;
infoBuilder.append("readOnly", storageGlobalParams.readOnly);
- if (uuid)
- infoBuilder.appendElements(uuid->toBSON());
+ if (options.uuid)
+ infoBuilder.appendElements(options.uuid->toBSON());
b.append("info", infoBuilder.obj());
auto idIndex = collection->getIndexCatalog()->findIdIndex(opCtx);
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index eaada81ab68..1dd86a06946 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -537,7 +537,7 @@ TEST_F(MapReduceCommandTest, ReplacingExistingOutputCollectionPreservesIndexes)
WriteUnitOfWork wuow(_opCtx.get());
ASSERT_OK(
coll.getWritableCollection()->getIndexCatalog()->createIndexOnEmptyCollection(
- _opCtx.get(), indexSpec));
+ _opCtx.get(), coll.getWritableCollection(), indexSpec));
wuow.commit();
});
}
diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp
index e8a54b818e8..5476c4ea249 100644
--- a/src/mongo/db/commands/resize_oplog.cpp
+++ b/src/mongo/db/commands/resize_oplog.cpp
@@ -98,8 +98,6 @@ public:
if (auto sizeMB = params.getSize()) {
const long long sizeBytes = *sizeMB * 1024 * 1024;
uassertStatusOK(coll.getWritableCollection()->updateCappedSize(opCtx, sizeBytes));
- DurableCatalog::get(opCtx)->updateCappedSize(
- opCtx, coll->getCatalogId(), sizeBytes);
}
if (auto minRetentionHoursOpt = params.getMinRetentionHours()) {
@@ -109,9 +107,7 @@ public:
LOGV2(20497,
"replSetResizeOplog success",
- "size"_attr = DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, coll->getCatalogId())
- .cappedSize,
+ "size"_attr = coll->getCollectionOptions().cappedSize,
"minRetentionHours"_attr = storageGlobalParams.oplogMinRetentionHours.load());
return true;
});
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index bdde2c3156a..6e2889eb741 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -36,7 +36,6 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/query/internal_plans.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/logv2/log.h"
#include "mongo/util/fail_point.h"
diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp
index 04efea03dc5..2e3caeb78dd 100644
--- a/src/mongo/db/commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands.cpp
@@ -63,7 +63,6 @@
#include "mongo/db/retryable_writes_stats.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/storage/duplicate_key_error_info.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/timeseries/bucket_catalog.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/db/views/view_catalog.h"
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 57a609f1afd..b906b593150 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -50,7 +50,6 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/timestamp_block.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/execution_context.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/logv2/log.h"
@@ -108,7 +107,7 @@ struct BtreeExternalSortComparison {
}
};
-AbstractIndexAccessMethod::AbstractIndexAccessMethod(IndexCatalogEntry* btreeState,
+AbstractIndexAccessMethod::AbstractIndexAccessMethod(const IndexCatalogEntry* btreeState,
std::unique_ptr<SortedDataInterface> btree)
: _indexCatalogEntry(btreeState),
_descriptor(btreeState->descriptor()),
@@ -377,7 +376,7 @@ pair<KeyStringSet, KeyStringSet> AbstractIndexAccessMethod::setDifference(
}
void AbstractIndexAccessMethod::prepareUpdate(OperationContext* opCtx,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const BSONObj& from,
const BSONObj& to,
const RecordId& record,
@@ -476,11 +475,11 @@ Status AbstractIndexAccessMethod::compact(OperationContext* opCtx) {
class AbstractIndexAccessMethod::BulkBuilderImpl : public IndexAccessMethod::BulkBuilder {
public:
- BulkBuilderImpl(IndexCatalogEntry* indexCatalogEntry,
+ BulkBuilderImpl(const IndexCatalogEntry* indexCatalogEntry,
size_t maxMemoryUsageBytes,
StringData dbName);
- BulkBuilderImpl(IndexCatalogEntry* index,
+ BulkBuilderImpl(const IndexCatalogEntry* index,
size_t maxMemoryUsageBytes,
const IndexStateInfo& stateInfo,
StringData dbName);
@@ -515,7 +514,7 @@ private:
Sorter::Settings _makeSorterSettings() const;
- IndexCatalogEntry* _indexCatalogEntry;
+ const IndexCatalogEntry* _indexCatalogEntry;
std::unique_ptr<Sorter> _sorter;
int64_t _keysInserted = 0;
@@ -542,12 +541,12 @@ std::unique_ptr<IndexAccessMethod::BulkBuilder> AbstractIndexAccessMethod::initi
: std::make_unique<BulkBuilderImpl>(_indexCatalogEntry, maxMemoryUsageBytes, dbName);
}
-AbstractIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(IndexCatalogEntry* index,
+AbstractIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(const IndexCatalogEntry* index,
size_t maxMemoryUsageBytes,
StringData dbName)
: _indexCatalogEntry(index), _sorter(_makeSorter(maxMemoryUsageBytes, dbName)) {}
-AbstractIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(IndexCatalogEntry* index,
+AbstractIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(const IndexCatalogEntry* index,
size_t maxMemoryUsageBytes,
const IndexStateInfo& stateInfo,
StringData dbName)
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 5e3c9376ea7..417af5798b0 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -138,7 +138,7 @@ public:
* Provides a ticket for actually performing the update.
*/
virtual void prepareUpdate(OperationContext* opCtx,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const BSONObj& from,
const BSONObj& to,
const RecordId& loc,
@@ -454,7 +454,7 @@ public:
static std::pair<KeyStringSet, KeyStringSet> setDifference(const KeyStringSet& left,
const KeyStringSet& right);
- AbstractIndexAccessMethod(IndexCatalogEntry* btreeState,
+ AbstractIndexAccessMethod(const IndexCatalogEntry* btreeState,
std::unique_ptr<SortedDataInterface> btree);
Status insert(OperationContext* opCtx,
@@ -490,7 +490,7 @@ public:
int64_t* numDeleted) final;
void prepareUpdate(OperationContext* opCtx,
- IndexCatalogEntry* index,
+ const IndexCatalogEntry* index,
const BSONObj& from,
const BSONObj& to,
const RecordId& loc,
@@ -579,7 +579,7 @@ protected:
MultikeyPaths* multikeyPaths,
boost::optional<RecordId> id) const = 0;
- IndexCatalogEntry* const _indexCatalogEntry; // owned by IndexCatalog
+ const IndexCatalogEntry* const _indexCatalogEntry; // owned by IndexCatalog
const IndexDescriptor* const _descriptor;
private:
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index ba9d2d46a56..c3cd97d8853 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -546,7 +546,8 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
auto descriptor =
indexCatalog->findIndexByName(opCtx, indexNames[i], includeUnfinished);
if (descriptor) {
- Status s = indexCatalog->dropIndex(opCtx, descriptor);
+ Status s =
+ indexCatalog->dropIndex(opCtx, collection.getWritableCollection(), descriptor);
if (!s.isOK()) {
return s;
}
@@ -556,8 +557,7 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
// If the index is not present in the catalog, then we are trying to drop an already
// aborted index. This may happen when rollback-via-refetch restarts an index build
// after an abort has been rolled back.
- if (!DurableCatalog::get(opCtx)->isIndexPresent(
- opCtx, collection->getCatalogId(), indexNames[i])) {
+ if (!collection->isIndexPresent(indexNames[i])) {
LOGV2(20652,
"An index was not found in the catalog while trying to drop the index during "
"recovery",
@@ -566,8 +566,7 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
continue;
}
- const auto durableBuildUUID = DurableCatalog::get(opCtx)->getIndexBuildUUID(
- opCtx, collection->getCatalogId(), indexNames[i]);
+ const auto durableBuildUUID = collection->getIndexBuildUUID(indexNames[i]);
// A build UUID is present if and only if we are rebuilding a two-phase build.
invariant((protocol == IndexBuildProtocol::kTwoPhase) ==
@@ -584,7 +583,8 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
includeUnfinished = true;
descriptor = indexCatalog->findIndexByName(opCtx, indexNames[i], includeUnfinished);
if (descriptor) {
- Status s = indexCatalog->dropUnfinishedIndex(opCtx, descriptor);
+ Status s = indexCatalog->dropUnfinishedIndex(
+ opCtx, collection.getWritableCollection(), descriptor);
if (!s.isOK()) {
return s;
}
@@ -592,12 +592,8 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
// There are no concurrent users of the index during startup recovery, so it is OK
// to pass in a nullptr for the index 'ident', promising that the index is not in
// use.
- catalog::removeIndex(opCtx,
- indexNames[i],
- collection->getCatalogId(),
- collection->uuid(),
- collection->ns(),
- nullptr /* ident */);
+ catalog::removeIndex(
+ opCtx, indexNames[i], collection.getWritableCollection(), nullptr /* ident */);
}
}
@@ -664,10 +660,9 @@ Status IndexBuildsCoordinator::_setUpResumeIndexBuild(OperationContext* opCtx,
// Check that the information in the durable catalog matches the resume info.
uassert(4841702,
"Index not found in durable catalog while attempting to resume index build",
- durableCatalog->isIndexPresent(opCtx, collection->getCatalogId(), indexName));
+ collection->isIndexPresent(indexName));
- const auto durableBuildUUID =
- durableCatalog->getIndexBuildUUID(opCtx, collection->getCatalogId(), indexName);
+ const auto durableBuildUUID = collection->getIndexBuildUUID(indexName);
uassert(ErrorCodes::IndexNotFound,
str::stream() << "Cannot resume index build with a buildUUID: " << buildUUID
<< " that did not match the buildUUID in the durable catalog: "
@@ -682,8 +677,7 @@ Status IndexBuildsCoordinator::_setUpResumeIndexBuild(OperationContext* opCtx,
<< indexName,
indexIdent.size() > 0);
- uassertStatusOK(durableCatalog->checkMetaDataForIndex(
- opCtx, collection->getCatalogId(), indexName, spec));
+ uassertStatusOK(collection->checkMetaDataForIndex(indexName, spec));
}
if (!collection->isInitialized()) {
@@ -859,7 +853,8 @@ void IndexBuildsCoordinator::applyStartIndexBuild(OperationContext* opCtx,
!name.empty());
if (auto desc = indexCatalog->findIndexByName(opCtx, name, includeUnfinished)) {
- uassertStatusOK(indexCatalog->dropIndex(opCtx, desc));
+ uassertStatusOK(
+ indexCatalog->dropIndex(opCtx, coll.getWritableCollection(), desc));
}
}
@@ -1629,7 +1624,8 @@ void IndexBuildsCoordinator::createIndexesOnEmptyCollection(OperationContext* op
// Each index will be added to the mdb catalog using the preceding createIndexes
// timestamp.
opObserver->onCreateIndex(opCtx, nss, collectionUUID, spec, fromMigrate);
- uassertStatusOK(indexCatalog->createIndexOnEmptyCollection(opCtx, spec));
+ uassertStatusOK(indexCatalog->createIndexOnEmptyCollection(
+ opCtx, collection.getWritableCollection(), spec));
}
}
@@ -2756,8 +2752,8 @@ std::vector<BSONObj> IndexBuildsCoordinator::prepareSpecListForCreate(
// Remove any index specifications which already exist in the catalog.
auto indexCatalog = collection->getIndexCatalog();
- auto resultSpecs =
- indexCatalog->removeExistingIndexes(opCtx, normalSpecs, true /*removeIndexBuildsToo*/);
+ auto resultSpecs = indexCatalog->removeExistingIndexes(
+ opCtx, collection, normalSpecs, true /*removeIndexBuildsToo*/);
// Verify that each spec is compatible with the collection's sharding state.
for (const BSONObj& spec : resultSpecs) {
diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h
index cf73da60d78..a899477ca17 100644
--- a/src/mongo/db/index_builds_coordinator.h
+++ b/src/mongo/db/index_builds_coordinator.h
@@ -48,7 +48,6 @@
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/repl_index_build_state.h"
#include "mongo/db/resumable_index_builds_gen.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/executor/task_executor.h"
#include "mongo/executor/thread_pool_task_executor.h"
#include "mongo/platform/mutex.h"
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 22fa6f66d03..d1267aa5768 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -63,7 +63,6 @@
#include "mongo/db/s/resharding_util.h"
#include "mongo/db/server_options.h"
#include "mongo/db/session_catalog_mongod.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/timeseries/bucket_catalog.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/db/transaction_participant_gen.h"
@@ -896,7 +895,6 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx,
CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss);
invariant(coll->uuid() == uuid);
- invariant(DurableCatalog::get(opCtx)->isEqualToMetadataUUID(opCtx, coll->getCatalogId(), uuid));
}
void OpObserverImpl::onDropDatabase(OperationContext* opCtx, const std::string& dbName) {
diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
index 719d242dd1a..95f4e91e925 100644
--- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
@@ -65,7 +65,6 @@
#include "mongo/db/stats/fill_locker_info.h"
#include "mongo/db/stats/storage_stats.h"
#include "mongo/db/storage/backup_cursor_hooks.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/transaction_history_iterator.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/logv2/log.h"
@@ -285,9 +284,7 @@ BSONObj CommonMongodProcessInterface::getCollectionOptions(OperationContext* opC
return collectionOptions;
}
- collectionOptions = DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, collection->getCatalogId())
- .toBSON();
+ collectionOptions = collection->getCollectionOptions().toBSON();
return collectionOptions;
}
diff --git a/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp b/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp
index 26ae6cda451..3dc53e3e900 100644
--- a/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp
@@ -132,7 +132,7 @@ void NonShardServerProcessInterface::createIndexesOnEmptyCollection(
// primary.
auto removeIndexBuildsToo = false;
auto filteredIndexes = collection->getIndexCatalog()->removeExistingIndexes(
- opCtx, indexSpecs, removeIndexBuildsToo);
+ opCtx, collection.get(), indexSpecs, removeIndexBuildsToo);
if (filteredIndexes.empty()) {
return;
}
diff --git a/src/mongo/db/rebuild_indexes.cpp b/src/mongo/db/rebuild_indexes.cpp
index 52962679162..c2986396d6a 100644
--- a/src/mongo/db/rebuild_indexes.cpp
+++ b/src/mongo/db/rebuild_indexes.cpp
@@ -40,20 +40,17 @@
#include "mongo/db/catalog/index_key_validate.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_builds_coordinator.h"
-#include "mongo/db/storage/durable_catalog.h"
namespace mongo {
-StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx,
- RecordId catalogId,
+StatusWith<IndexNameObjs> getIndexNameObjs(const CollectionPtr& collection,
std::function<bool(const std::string&)> filter) {
IndexNameObjs ret;
std::vector<std::string>& indexNames = ret.first;
std::vector<BSONObj>& indexSpecs = ret.second;
- auto durableCatalog = DurableCatalog::get(opCtx);
{
// Fetch all indexes
- durableCatalog->getAllIndexes(opCtx, catalogId, &indexNames);
+ collection->getAllIndexes(&indexNames);
auto newEnd =
std::remove_if(indexNames.begin(),
indexNames.end(),
@@ -64,7 +61,7 @@ StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx,
for (const auto& name : indexNames) {
- BSONObj spec = durableCatalog->getIndexSpec(opCtx, catalogId, name);
+ BSONObj spec = collection->getIndexSpec(name);
using IndexVersion = IndexDescriptor::IndexVersion;
IndexVersion indexVersion = IndexVersion::kV1;
if (auto indexVersionElem = spec[IndexDescriptor::kIndexVersionFieldName]) {
diff --git a/src/mongo/db/rebuild_indexes.h b/src/mongo/db/rebuild_indexes.h
index ea6c0cc2a94..239781a4817 100644
--- a/src/mongo/db/rebuild_indexes.h
+++ b/src/mongo/db/rebuild_indexes.h
@@ -49,8 +49,7 @@ typedef std::pair<std::vector<std::string>, std::vector<BSONObj>> IndexNameObjs;
* @param filter is a predicate that is passed in an index name, returning true if the index
* should be included in the result.
*/
-StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx,
- RecordId catalogId,
+StatusWith<IndexNameObjs> getIndexNameObjs(const CollectionPtr& collection,
std::function<bool(const std::string&)> filter =
[](const std::string& indexName) { return true; });
diff --git a/src/mongo/db/repair.cpp b/src/mongo/db/repair.cpp
index c58c300a234..700346f8eac 100644
--- a/src/mongo/db/repair.cpp
+++ b/src/mongo/db/repair.cpp
@@ -56,7 +56,6 @@
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/rebuild_indexes.h"
#include "mongo/db/repl_set_member_in_standalone_mode.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/storage_repair_observer.h"
#include "mongo/db/storage/storage_util.h"
@@ -73,7 +72,7 @@ Status rebuildIndexesForNamespace(OperationContext* opCtx,
StorageEngine* engine) {
opCtx->checkForInterrupt();
auto collection = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss);
- auto swIndexNameObjs = getIndexNameObjs(opCtx, collection->getCatalogId());
+ auto swIndexNameObjs = getIndexNameObjs(collection);
if (!swIndexNameObjs.isOK())
return swIndexNameObjs.getStatus();
@@ -87,12 +86,11 @@ Status rebuildIndexesForNamespace(OperationContext* opCtx,
}
namespace {
-Status dropUnfinishedIndexes(OperationContext* opCtx, const CollectionPtr& collection) {
+Status dropUnfinishedIndexes(OperationContext* opCtx, Collection* collection) {
std::vector<std::string> indexNames;
- auto durableCatalog = DurableCatalog::get(opCtx);
- durableCatalog->getAllIndexes(opCtx, collection->getCatalogId(), &indexNames);
+ collection->getAllIndexes(&indexNames);
for (const auto& indexName : indexNames) {
- if (!durableCatalog->isIndexReady(opCtx, collection->getCatalogId(), indexName)) {
+ if (!collection->isIndexReady(indexName)) {
LOGV2(3871400,
"Dropping unfinished index '{name}' after collection was modified by "
"repair",
@@ -102,12 +100,7 @@ Status dropUnfinishedIndexes(OperationContext* opCtx, const CollectionPtr& colle
WriteUnitOfWork wuow(opCtx);
// There are no concurrent users of the index while --repair is running, so it is OK to
// pass in a nullptr for the index 'ident', promising that the index is not in use.
- catalog::removeIndex(opCtx,
- indexName,
- collection->getCatalogId(),
- collection->uuid(),
- collection->ns(),
- nullptr /*ident */);
+ catalog::removeIndex(opCtx, indexName, collection, nullptr /*ident */);
wuow.commit();
StorageRepairObserver::get(opCtx->getServiceContext())
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index 903f2d3e692..ed2c1aa4233 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -87,8 +87,8 @@ Status CollectionBulkLoaderImpl::init(const std::vector<BSONObj>& secondaryIndex
// This enforces the buildIndexes setting in the replica set configuration.
CollectionWriter collWriter(*_collection);
auto indexCatalog = collWriter.getWritableCollection()->getIndexCatalog();
- auto specs =
- indexCatalog->removeExistingIndexesNoChecks(_opCtx.get(), secondaryIndexSpecs);
+ auto specs = indexCatalog->removeExistingIndexesNoChecks(
+ _opCtx.get(), collWriter.get(), secondaryIndexSpecs);
if (specs.size()) {
_secondaryIndexesBlock->ignoreUniqueConstraint();
auto status =
diff --git a/src/mongo/db/repl/dbcheck.cpp b/src/mongo/db/repl/dbcheck.cpp
index cd46bf05173..3a2f7d0db1f 100644
--- a/src/mongo/db/repl/dbcheck.cpp
+++ b/src/mongo/db/repl/dbcheck.cpp
@@ -43,7 +43,6 @@
#include "mongo/db/repl/dbcheck_gen.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/optime.h"
-#include "mongo/db/storage/durable_catalog.h"
namespace mongo {
@@ -355,12 +354,11 @@ std::vector<BSONObj> collectionIndexInfo(OperationContext* opCtx, const Collecti
std::vector<std::string> names;
// List the indices,
- auto durableCatalog = DurableCatalog::get(opCtx);
- durableCatalog->getAllIndexes(opCtx, collection->getCatalogId(), &names);
+ collection->getAllIndexes(&names);
// and get the info for each one.
for (const auto& name : names) {
- result.push_back(durableCatalog->getIndexSpec(opCtx, collection->getCatalogId(), name));
+ result.push_back(collection->getIndexSpec(name));
}
auto comp = std::make_unique<SimpleBSONObjComparator>();
@@ -371,9 +369,7 @@ std::vector<BSONObj> collectionIndexInfo(OperationContext* opCtx, const Collecti
}
BSONObj collectionOptions(OperationContext* opCtx, const CollectionPtr& collection) {
- return DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, collection->getCatalogId())
- .toBSON();
+ return collection->getCollectionOptions().toBSON();
}
AutoGetDbForDbCheck::AutoGetDbForDbCheck(OperationContext* opCtx, const NamespaceString& nss)
diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp
index f265fecf08f..4a0891546f6 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.cpp
+++ b/src/mongo/db/repl/idempotency_test_fixture.cpp
@@ -60,7 +60,6 @@
#include "mongo/db/repl/replication_consistency_markers_mock.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/storage_interface.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/util/md5.hpp"
namespace mongo {
@@ -415,15 +414,12 @@ CollectionState IdempotencyTest::validate(const NamespaceString& nss) {
std::string dataHash = computeDataHash(collection.getCollection());
- auto durableCatalog = DurableCatalog::get(_opCtx.get());
- auto collectionOptions =
- durableCatalog->getCollectionOptions(_opCtx.get(), collection->getCatalogId());
+ auto collectionOptions = collection->getCollectionOptions();
std::vector<std::string> allIndexes;
BSONObjSet indexSpecs = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
- durableCatalog->getAllIndexes(_opCtx.get(), collection->getCatalogId(), &allIndexes);
+ collection->getAllIndexes(&allIndexes);
for (auto const& index : allIndexes) {
- indexSpecs.insert(
- durableCatalog->getIndexSpec(_opCtx.get(), collection->getCatalogId(), index));
+ indexSpecs.insert(collection->getIndexSpec(index));
}
ASSERT_EQUALS(indexSpecs.size(), allIndexes.size());
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 36f859ce77d..00c24dcc725 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -89,7 +89,6 @@
#include "mongo/db/service_context.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/stats/server_write_concern_metrics.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/transaction_participant.h"
@@ -208,7 +207,8 @@ void createIndexForApplyOps(OperationContext* opCtx,
<< "; normalized index specs: "
<< BSON("normalSpecs" << normalSpecs));
auto indexCatalog = indexCollection->getIndexCatalog();
- auto prepareSpecResult = indexCatalog->prepareSpecForCreate(opCtx, normalSpecs[0], {});
+ auto prepareSpecResult =
+ indexCatalog->prepareSpecForCreate(opCtx, indexCollection, normalSpecs[0], {});
if (ErrorCodes::IndexBuildAlreadyInProgress == prepareSpecResult) {
LOGV2(4924900,
"Index build: already in progress during initial sync",
@@ -661,8 +661,7 @@ void createOplog(OperationContext* opCtx,
if (collection) {
if (replSettings.getOplogSizeBytes() != 0) {
- const CollectionOptions oplogOpts =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
+ const CollectionOptions& oplogOpts = collection->getCollectionOptions();
int o = (int)(oplogOpts.cappedSize / (1024 * 1024));
int n = (int)(replSettings.getOplogSizeBytes() / (1024 * 1024));
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index 4ae77c557ef..7b90135c649 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -47,7 +47,6 @@
#include "mongo/db/repl/replication_process.h"
#include "mongo/db/repl/replication_recovery.h"
#include "mongo/db/repl/rs_rollback.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/unittest/log_test.h"
#include "mongo/util/str.h"
@@ -356,9 +355,7 @@ void RollbackResyncsCollectionOptionsTest::resyncCollectionOptionsTest(
// Make sure the collection options are correct.
AutoGetCollectionForReadCommand autoColl(_opCtx.get(), NamespaceString(nss.toString()));
- auto collAfterRollbackOptions =
- DurableCatalog::get(_opCtx.get())
- ->getCollectionOptions(_opCtx.get(), autoColl.getCollection()->getCatalogId());
+ auto collAfterRollbackOptions = autoColl->getCollectionOptions();
BSONObjBuilder expectedOptionsBob;
if (localCollOptions.uuid) {
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 87db2d9fe9f..a7099c5850f 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -74,7 +74,6 @@
#include "mongo/db/s/shard_identity_rollback_notifier.h"
#include "mongo/db/session_catalog_mongod.h"
#include "mongo/db/storage/control/journal_flusher.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/remove_saver.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/logv2/log.h"
@@ -856,9 +855,10 @@ void checkRbidAndUpdateMinValid(OperationContext* opCtx,
* collection.
*/
void dropIndex(OperationContext* opCtx,
- IndexCatalog* indexCatalog,
+ Collection* collection,
const string& indexName,
NamespaceString& nss) {
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
bool includeUnfinishedIndexes = true;
auto indexDescriptor =
indexCatalog->findIndexByName(opCtx, indexName, includeUnfinishedIndexes);
@@ -872,8 +872,8 @@ void dropIndex(OperationContext* opCtx,
}
auto entry = indexCatalog->getEntry(indexDescriptor);
- if (entry->isReady(opCtx)) {
- auto status = indexCatalog->dropIndex(opCtx, indexDescriptor);
+ if (entry->isReady(opCtx, collection)) {
+ auto status = indexCatalog->dropIndex(opCtx, collection, indexDescriptor);
if (!status.isOK()) {
LOGV2_ERROR(21738,
"Rollback failed to drop index {indexName} in {namespace}: {error}",
@@ -883,7 +883,7 @@ void dropIndex(OperationContext* opCtx,
"error"_attr = redact(status));
}
} else {
- auto status = indexCatalog->dropUnfinishedIndex(opCtx, indexDescriptor);
+ auto status = indexCatalog->dropUnfinishedIndex(opCtx, collection, indexDescriptor);
if (!status.isOK()) {
LOGV2_ERROR(
21739,
@@ -945,7 +945,7 @@ void rollbackCreateIndexes(OperationContext* opCtx, UUID uuid, std::set<std::str
"indexName"_attr = indexName);
WriteUnitOfWork wuow(opCtx);
- dropIndex(opCtx, collection.getWritableCollection()->getIndexCatalog(), indexName, *nss);
+ dropIndex(opCtx, collection.getWritableCollection(), indexName, *nss);
wuow.commit();
LOGV2_DEBUG(21673,
@@ -1620,7 +1620,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
WriteUnitOfWork wuow(opCtx);
// Set collection to whatever temp status is on the sync source.
- DurableCatalog::get(opCtx)->setIsTemp(opCtx, collection->getCatalogId(), options.temp);
+ collection.getWritableCollection()->setIsTemp(opCtx, options.temp);
// Set any document validation options. We update the validator fields without
// parsing/validation, since we fetched the options object directly from the sync
@@ -1645,10 +1645,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
"namespace"_attr = *nss,
"uuid"_attr = uuid,
"info"_attr = redact(info),
- "catalogId"_attr =
- redact(DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, collection->getCatalogId())
- .toBSON()));
+ "catalogId"_attr = redact(collection->getCollectionOptions().toBSON()));
}
// Since we read from the sync source to retrieve the metadata of the
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index b1ed5a19d95..ab900376345 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -58,7 +58,6 @@
#include "mongo/db/repl/rollback_test_fixture.h"
#include "mongo/db/repl/rs_rollback.h"
#include "mongo/db/s/shard_identity_rollback_notifier.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
@@ -213,7 +212,7 @@ int _createIndexOnEmptyCollection(OperationContext* opCtx,
Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
auto indexCatalog = coll->getIndexCatalog();
WriteUnitOfWork wunit(opCtx);
- ASSERT_OK(indexCatalog->createIndexOnEmptyCollection(opCtx, indexSpec).getStatus());
+ ASSERT_OK(indexCatalog->createIndexOnEmptyCollection(opCtx, coll, indexSpec).getStatus());
wunit.commit();
return indexCatalog->numIndexesReady(opCtx);
}
@@ -1358,9 +1357,7 @@ TEST_F(RSRollbackTest, RollbackRenameCollectionInSameDatabaseCommand) {
ASSERT_TRUE(oldCollName.getCollection());
// Remote collection options should have been empty.
- auto collAfterRollbackOptions =
- DurableCatalog::get(_opCtx.get())
- ->getCollectionOptions(_opCtx.get(), oldCollName.getCollection()->getCatalogId());
+ auto collAfterRollbackOptions = oldCollName->getCollectionOptions();
ASSERT_BSONOBJ_EQ(BSON("uuid" << *options.uuid), collAfterRollbackOptions.toBSON());
}
}
@@ -1416,9 +1413,7 @@ TEST_F(RSRollbackTest,
ASSERT_TRUE(rollbackSource.getCollectionInfoCalled);
AutoGetCollectionForReadCommand autoColl(_opCtx.get(), NamespaceString(renameFromNss));
- auto collAfterRollbackOptions =
- DurableCatalog::get(_opCtx.get())
- ->getCollectionOptions(_opCtx.get(), autoColl.getCollection()->getCatalogId());
+ auto collAfterRollbackOptions = autoColl->getCollectionOptions();
ASSERT_TRUE(collAfterRollbackOptions.temp);
ASSERT_BSONOBJ_EQ(BSON("uuid" << *options.uuid << "temp" << true),
collAfterRollbackOptions.toBSON());
@@ -2020,9 +2015,7 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) {
// Make sure the collection options are correct.
AutoGetCollectionForReadCommand autoColl(_opCtx.get(), NamespaceString("test.t"));
- auto collAfterRollbackOptions =
- DurableCatalog::get(_opCtx.get())
- ->getCollectionOptions(_opCtx.get(), autoColl.getCollection()->getCatalogId());
+ auto collAfterRollbackOptions = autoColl->getCollectionOptions();
ASSERT_BSONOBJ_EQ(BSON("uuid" << *options.uuid), collAfterRollbackOptions.toBSON());
}
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 67e991b944c..352f2799ecf 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -78,7 +78,6 @@
#include "mongo/db/storage/checkpointer.h"
#include "mongo/db/storage/control/journal_flusher.h"
#include "mongo/db/storage/control/storage_control.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/oplog_cap_maintainer_thread.h"
#include "mongo/logv2/log.h"
#include "mongo/util/assert_util.h"
@@ -261,7 +260,8 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
if (!idIndexSpec.isEmpty()) {
auto status = autoColl->getWritableCollection()
->getIndexCatalog()
- ->createIndexOnEmptyCollection(opCtx.get(), idIndexSpec);
+ ->createIndexOnEmptyCollection(
+ opCtx.get(), autoColl->getWritableCollection(), idIndexSpec);
if (!status.getStatus().isOK()) {
return status.getStatus();
}
@@ -269,7 +269,8 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
for (auto&& spec : secondaryIndexSpecs) {
auto status = autoColl->getWritableCollection()
->getIndexCatalog()
- ->createIndexOnEmptyCollection(opCtx.get(), spec);
+ ->createIndexOnEmptyCollection(
+ opCtx.get(), autoColl->getWritableCollection(), spec);
if (!status.getStatus().isOK()) {
return status.getStatus();
}
@@ -454,26 +455,16 @@ Status StorageInterfaceImpl::createOplog(OperationContext* opCtx, const Namespac
}
StatusWith<size_t> StorageInterfaceImpl::getOplogMaxSize(OperationContext* opCtx) {
- // This writeConflictRetry loop protects callers from WriteConflictExceptions thrown by the
- // storage engine running out of cache space, despite this operation not performing any writes.
- return writeConflictRetry(
- opCtx,
- "StorageInterfaceImpl::getOplogMaxSize",
- NamespaceString::kRsOplogNamespace.ns(),
- [&]() -> StatusWith<size_t> {
- AutoGetOplog oplogRead(opCtx, OplogAccessMode::kRead);
- const auto& oplog = oplogRead.getCollection();
- if (!oplog) {
- return {ErrorCodes::NamespaceNotFound, "Your oplog doesn't exist."};
- }
- const auto options =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, oplog->getCatalogId());
- if (!options.capped)
- return {ErrorCodes::BadValue,
- str::stream()
- << NamespaceString::kRsOplogNamespace.ns() << " isn't capped"};
- return options.cappedSize;
- });
+ AutoGetOplog oplogRead(opCtx, OplogAccessMode::kRead);
+ const auto& oplog = oplogRead.getCollection();
+ if (!oplog) {
+ return {ErrorCodes::NamespaceNotFound, "Your oplog doesn't exist."};
+ }
+ const auto options = oplog->getCollectionOptions();
+ if (!options.capped)
+ return {ErrorCodes::BadValue,
+ str::stream() << NamespaceString::kRsOplogNamespace.ns() << " isn't capped"};
+ return options.cappedSize;
}
Status StorageInterfaceImpl::createCollection(OperationContext* opCtx,
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 5c12b016b12..7b854f41c72 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -52,7 +52,6 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/stdx/thread.h"
#include "mongo/transport/transport_layer_mock.h"
#include "mongo/unittest/unittest.h"
@@ -137,7 +136,9 @@ int _createIndexOnEmptyCollection(OperationContext* opCtx, NamespaceString nss,
auto indexCatalog = coll.getWritableCollection()->getIndexCatalog();
ASSERT(indexCatalog);
- ASSERT_OK(indexCatalog->createIndexOnEmptyCollection(opCtx, indexSpec).getStatus());
+ ASSERT_OK(
+ indexCatalog->createIndexOnEmptyCollection(opCtx, coll.getWritableCollection(), indexSpec)
+ .getStatus());
wunit.commit();
return indexCatalog->numIndexesReady(opCtx);
@@ -793,9 +794,7 @@ TEST_F(StorageInterfaceImplTest, RenameCollectionWithStayTempFalseMakesItNotTemp
AutoGetCollectionForReadCommand autoColl2(opCtx, toNss);
ASSERT_TRUE(autoColl2.getCollection());
- ASSERT_FALSE(DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, autoColl2.getCollection()->getCatalogId())
- .temp);
+ ASSERT_FALSE(autoColl2->getCollectionOptions().temp);
}
TEST_F(StorageInterfaceImplTest, RenameCollectionWithStayTempTrueMakesItTemp) {
@@ -814,9 +813,7 @@ TEST_F(StorageInterfaceImplTest, RenameCollectionWithStayTempTrueMakesItTemp) {
AutoGetCollectionForReadCommand autoColl2(opCtx, toNss);
ASSERT_TRUE(autoColl2.getCollection());
- ASSERT_TRUE(DurableCatalog::get(opCtx)
- ->getCollectionOptions(opCtx, autoColl2.getCollection()->getCatalogId())
- .temp);
+ ASSERT_TRUE(autoColl2->getCollectionOptions().temp);
}
TEST_F(StorageInterfaceImplTest, RenameCollectionFailsBetweenDatabases) {
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 584d3791bf2..33b1d94590b 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -808,7 +808,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(
auto checkEmptyOrGetMissingIndexesFromDonor = [&](const CollectionPtr& collection) {
auto indexCatalog = collection->getIndexCatalog();
auto indexSpecs = indexCatalog->removeExistingIndexesNoChecks(
- opCtx, collectionOptionsAndIndexes.indexSpecs);
+ opCtx, collection, collectionOptionsAndIndexes.indexSpecs);
if (!indexSpecs.empty()) {
// Only allow indexes to be copied if the collection does not have any documents.
uassert(ErrorCodes::CannotCreateCollection,
diff --git a/src/mongo/db/s/shard_local.cpp b/src/mongo/db/s/shard_local.cpp
index 873b628ad65..506e21ffbf4 100644
--- a/src/mongo/db/s/shard_local.cpp
+++ b/src/mongo/db/s/shard_local.cpp
@@ -167,6 +167,7 @@ Status ShardLocal::createIndexOnConfig(OperationContext* opCtx,
auto removeIndexBuildsToo = false;
auto indexSpecs = indexCatalog->removeExistingIndexes(
opCtx,
+ CollectionPtr(collection, CollectionPtr::NoYieldTag{}),
uassertStatusOK(
collection->addCollationDefaultsToIndexSpecsForCreate(opCtx, {index.toBSON()})),
removeIndexBuildsToo);
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index 74904339b81..d91d484d319 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -53,7 +53,6 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl_set_member_in_standalone_mode.h"
#include "mongo/db/server_options.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/storage_repair_observer.h"
#include "mongo/logv2/log.h"
#include "mongo/util/exit.h"
@@ -148,11 +147,10 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx
* Returns true if the collection associated with the given CollectionCatalogEntry has an index on
* the _id field
*/
-bool checkIdIndexExists(OperationContext* opCtx, RecordId catalogId) {
- auto durableCatalog = DurableCatalog::get(opCtx);
- auto indexCount = durableCatalog->getTotalIndexCount(opCtx, catalogId);
+bool checkIdIndexExists(OperationContext* opCtx, const CollectionPtr& coll) {
+ auto indexCount = coll->getTotalIndexCount();
auto indexNames = std::vector<std::string>(indexCount);
- durableCatalog->getAllIndexes(opCtx, catalogId, &indexNames);
+ coll->getAllIndexes(&indexNames);
for (auto name : indexNames) {
if (name == "_id_") {
@@ -171,7 +169,7 @@ Status buildMissingIdIndex(OperationContext* opCtx, Collection* collection) {
});
const auto indexCatalog = collection->getIndexCatalog();
- const auto idIndexSpec = indexCatalog->getDefaultIdIndexSpec();
+ const auto idIndexSpec = indexCatalog->getDefaultIdIndexSpec(collection);
CollectionWriter collWriter(collection);
auto swSpecs = indexer.init(opCtx, collWriter, idIndexSpec, MultiIndexBlock::kNoopOnInitFn);
@@ -225,15 +223,13 @@ Status ensureCollectionProperties(OperationContext* opCtx,
// All user-created replicated collections created since MongoDB 4.0 have _id indexes.
auto requiresIndex = coll->requiresIdIndex() && coll->ns().isReplicated();
- auto collOptions =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, coll->getCatalogId());
+ const auto& collOptions = coll->getCollectionOptions();
auto hasAutoIndexIdField = collOptions.autoIndexId == CollectionOptions::YES;
// Even if the autoIndexId field is not YES, the collection may still have an _id index
// that was created manually by the user. Check the list of indexes to confirm index
// does not exist before attempting to build it or returning an error.
- if (requiresIndex && !hasAutoIndexIdField &&
- !checkIdIndexExists(opCtx, coll->getCatalogId())) {
+ if (requiresIndex && !hasAutoIndexIdField && !checkIdIndexExists(opCtx, coll)) {
LOGV2(21001,
"collection {coll_ns} is missing an _id index",
"Collection is missing an _id index",
@@ -397,13 +393,13 @@ void reconcileCatalogAndRebuildUnfinishedIndexes(
// Determine which indexes need to be rebuilt. rebuildIndexesOnCollection() requires that all
// indexes on that collection are done at once, so we use a map to group them together.
StringMap<IndexNameObjs> nsToIndexNameObjMap;
+ auto catalog = CollectionCatalog::get(opCtx);
for (auto&& idxIdentifier : reconcileResult.indexesToRebuild) {
NamespaceString collNss = idxIdentifier.nss;
const std::string& indexName = idxIdentifier.indexName;
auto swIndexSpecs =
- getIndexNameObjs(opCtx, idxIdentifier.catalogId, [&indexName](const std::string& name) {
- return name == indexName;
- });
+ getIndexNameObjs(catalog->lookupCollectionByNamespace(opCtx, collNss),
+ [&indexName](const std::string& name) { return name == indexName; });
if (!swIndexSpecs.isOK() || swIndexSpecs.getValue().first.empty()) {
fassert(40590,
{ErrorCodes::InternalError,
@@ -420,7 +416,6 @@ void reconcileCatalogAndRebuildUnfinishedIndexes(
ino.second.emplace_back(std::move(indexesToRebuild.second.back()));
}
- auto catalog = CollectionCatalog::get(opCtx);
for (const auto& entry : nsToIndexNameObjMap) {
NamespaceString collNss(entry.first);
diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript
index 710b48e82a6..4a2297efea8 100644
--- a/src/mongo/db/storage/SConscript
+++ b/src/mongo/db/storage/SConscript
@@ -466,6 +466,7 @@ env.CppUnitTest(
LIBDEPS=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/auth/authmocks',
+ '$BUILD_DIR/mongo/db/catalog/catalog_impl',
'$BUILD_DIR/mongo/db/catalog/catalog_test_fixture',
'$BUILD_DIR/mongo/db/catalog/collection_options',
'$BUILD_DIR/mongo/db/catalog_raii',
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index e63344740fd..5b949f23fff 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -160,7 +160,7 @@ bool BSONCollectionCatalogEntry::MetaData::eraseIndex(StringData name) {
return true;
}
-BSONObj BSONCollectionCatalogEntry::MetaData::toBSON() const {
+BSONObj BSONCollectionCatalogEntry::MetaData::toBSON(bool hasExclusiveAccess) const {
BSONObjBuilder b;
b.append("ns", ns);
b.append("options", options.toBSON());
@@ -170,14 +170,20 @@ BSONObj BSONCollectionCatalogEntry::MetaData::toBSON() const {
BSONObjBuilder sub(arr.subobjStart());
sub.append("spec", indexes[i].spec);
sub.appendBool("ready", indexes[i].ready);
- sub.appendBool("multikey", indexes[i].multikey);
-
- if (!indexes[i].multikeyPaths.empty()) {
- BSONObjBuilder subMultikeyPaths(sub.subobjStart("multikeyPaths"));
- appendMultikeyPathsAsBytes(indexes[i].spec.getObjectField("key"),
- indexes[i].multikeyPaths,
- &subMultikeyPaths);
- subMultikeyPaths.doneFast();
+ {
+ stdx::unique_lock lock(indexes[i].multikeyMutex, stdx::defer_lock_t{});
+ if (!hasExclusiveAccess) {
+ lock.lock();
+ }
+ sub.appendBool("multikey", indexes[i].multikey);
+
+ if (!indexes[i].multikeyPaths.empty()) {
+ BSONObjBuilder subMultikeyPaths(sub.subobjStart("multikeyPaths"));
+ appendMultikeyPathsAsBytes(indexes[i].spec.getObjectField("key"),
+ indexes[i].multikeyPaths,
+ &subMultikeyPaths);
+ subMultikeyPaths.doneFast();
+ }
}
sub.append("head", 0ll); // For backward compatibility with 4.0
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index c29e2146074..e8cee07e2f2 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -61,6 +61,30 @@ public:
struct IndexMetaData {
IndexMetaData() {}
+ IndexMetaData(const IndexMetaData& other)
+ : spec(other.spec),
+ ready(other.ready),
+ isBackgroundSecondaryBuild(other.isBackgroundSecondaryBuild),
+ buildUUID(other.buildUUID) {
+ // We need to hold the multikey mutex when copying, someone else might be modifying this
+ stdx::lock_guard lock(other.multikeyMutex);
+ multikey = other.multikey;
+ multikeyPaths = other.multikeyPaths;
+ }
+
+ IndexMetaData& operator=(IndexMetaData&& rhs) {
+ spec = std::move(rhs.spec);
+ ready = std::move(rhs.ready);
+ isBackgroundSecondaryBuild = std::move(rhs.isBackgroundSecondaryBuild);
+ buildUUID = std::move(rhs.buildUUID);
+
+ // No need to hold mutex on move, there are no concurrent readers while we're moving the
+ // instance.
+ multikey = std::move(rhs.multikey);
+ multikeyPaths = std::move(rhs.multikeyPaths);
+ return *this;
+ }
+
void updateTTLSetting(long long newExpireSeconds);
void updateHiddenSetting(bool hidden);
@@ -71,7 +95,6 @@ public:
BSONObj spec;
bool ready = false;
- bool multikey = false;
bool isBackgroundSecondaryBuild = false;
// If initialized, a two-phase index build is in progress.
@@ -81,12 +104,20 @@ public:
// the index key pattern. Each element in the vector is an ordered set of positions
// (starting at 0) into the corresponding indexed field that represent what prefixes of the
// indexed field cause the index to be multikey.
- MultikeyPaths multikeyPaths;
+ // multikeyMutex must be held when accessing multikey or multikeyPaths
+ mutable Mutex multikeyMutex;
+ mutable bool multikey = false;
+ mutable MultikeyPaths multikeyPaths;
};
struct MetaData {
void parse(const BSONObj& obj);
- BSONObj toBSON() const;
+
+ /**
+ * If we have exclusive access to this MetaData (holding a unique copy). We don't need to
+ * hold mutexes when reading internal data.
+ */
+ BSONObj toBSON(bool hasExclusiveAccess = false) const;
int findIndexOffset(StringData name) const;
diff --git a/src/mongo/db/storage/durable_catalog.h b/src/mongo/db/storage/durable_catalog.h
index f1935e66fad..5d6d24848b7 100644
--- a/src/mongo/db/storage/durable_catalog.h
+++ b/src/mongo/db/storage/durable_catalog.h
@@ -38,7 +38,6 @@
#include "mongo/db/storage/storage_engine.h"
namespace mongo {
-
/**
* An interface to modify the on-disk catalog metadata.
*/
@@ -82,8 +81,8 @@ public:
virtual BSONObj getCatalogEntry(OperationContext* opCtx, RecordId catalogId) const = 0;
- virtual BSONCollectionCatalogEntry::MetaData getMetaData(OperationContext* opCtx,
- RecordId id) const = 0;
+ virtual std::shared_ptr<BSONCollectionCatalogEntry::MetaData> getMetaData(
+ OperationContext* opCtx, RecordId id) const = 0;
/**
* Updates the catalog entry for the collection 'nss' with the fields specified in 'md'. If
@@ -94,14 +93,6 @@ public:
RecordId id,
BSONCollectionCatalogEntry::MetaData& md) = 0;
- /**
- * Checks that the metadata for the index exists and matches the given spec.
- */
- virtual Status checkMetaDataForIndex(OperationContext* opCtx,
- RecordId catalogId,
- const std::string& indexName,
- const BSONObj& spec) = 0;
-
virtual std::vector<std::string> getAllIdents(OperationContext* opCtx) const = 0;
virtual bool isUserDataIdent(StringData ident) const = 0;
@@ -110,6 +101,7 @@ public:
virtual bool isCollectionIdent(StringData ident) const = 0;
+
virtual RecordStore* getRecordStore() = 0;
/**
@@ -143,6 +135,17 @@ public:
const CollectionOptions& options,
bool allocateDefaultSpace) = 0;
+ virtual Status createIndex(OperationContext* opCtx,
+ RecordId catalogId,
+ const CollectionOptions& collOptions,
+ const IndexDescriptor* spec) = 0;
+
+ virtual BSONCollectionCatalogEntry::IndexMetaData prepareIndexMetaDataForIndexBuild(
+ OperationContext* opCtx,
+ const IndexDescriptor* spec,
+ boost::optional<UUID> buildUUID,
+ bool isBackgroundSecondaryBuild) = 0;
+
/**
* Import a collection by inserting the given metadata into the durable catalog and instructing
* the storage engine to import the corresponding idents. The metadata object should be a valid
@@ -175,7 +178,7 @@ public:
virtual Status renameCollection(OperationContext* opCtx,
RecordId catalogId,
const NamespaceString& toNss,
- bool stayTemp) = 0;
+ BSONCollectionCatalogEntry::MetaData& md) = 0;
/**
* Deletes the persisted collection catalog entry identified by 'catalogId'.
@@ -186,105 +189,22 @@ public:
virtual Status dropCollection(OperationContext* opCtx, RecordId catalogId) = 0;
/**
- * Updates size of a capped Collection.
- */
- virtual void updateCappedSize(OperationContext* opCtx, RecordId catalogId, long long size) = 0;
-
- /**
- * Updates the expireAfterSeconds option on the clustered index. If no expireAfterSeconds value
- * is passed in then TTL deletions will be stopped on the clustered index.
- */
- virtual void updateClusteredIndexTTLSetting(OperationContext* opCtx,
- RecordId catalogId,
- boost::optional<int64_t> expireAfterSeconds) = 0;
-
- /*
- * Updates the expireAfterSeconds field of the given index to the value in newExpireSecs.
- * The specified index must already contain an expireAfterSeconds field, and the value in
- * that field and newExpireSecs must both be numeric.
- */
- virtual void updateTTLSetting(OperationContext* opCtx,
- RecordId catalogId,
- StringData idxName,
- long long newExpireSeconds) = 0;
-
- /*
- * Hide or unhide the given index. A hidden index will not be considered for use by the
- * query planner.
- */
- virtual void updateHiddenSetting(OperationContext* opCtx,
- RecordId catalogId,
- StringData idxName,
- bool hidden) = 0;
-
- /**
- * Compares the UUID argument to the UUID obtained from the metadata. Returns true if they are
- * equal, false otherwise.
- */
- virtual bool isEqualToMetadataUUID(OperationContext* opCtx,
- RecordId catalogId,
- const UUID& uuid) = 0;
-
- /**
- * Updates the 'temp' setting for this collection.
- */
- virtual void setIsTemp(OperationContext* opCtx, RecordId catalogId, bool isTemp) = 0;
-
- /**
- * Updates whether updates/deletes should store their pre-images in the opLog.
- */
- virtual void setRecordPreImages(OperationContext* opCtx, RecordId catalogId, bool val) = 0;
-
- /**
- * Updates the validator for this collection.
- *
- * An empty validator removes all validation.
- */
- virtual void updateValidator(OperationContext* opCtx,
- RecordId catalogId,
- const BSONObj& validator,
- boost::optional<ValidationLevelEnum> newLevel,
- boost::optional<ValidationActionEnum> newAction) = 0;
-
- /**
- * Removes the index 'indexName' from the persisted collection catalog entry identified by
- * 'catalogId'.
- */
- virtual void removeIndex(OperationContext* opCtx, RecordId catalogId, StringData indexName) = 0;
-
- /**
- * Updates the persisted catalog entry for 'ns' with the new index and creates the index on
- * disk.
- *
- * A passed 'buildUUID' implies that the index is part of a two-phase index build.
- */
- virtual Status prepareForIndexBuild(OperationContext* opCtx,
- RecordId catalogId,
- const IndexDescriptor* spec,
- boost::optional<UUID> buildUUID,
- bool isBackgroundSecondaryBuild) = 0;
-
- /**
* Drops the provided ident and recreates it as empty for use in resuming an index build.
*/
virtual Status dropAndRecreateIndexIdentForResume(OperationContext* opCtx,
- RecordId catalogId,
+ const CollectionOptions& collOptions,
const IndexDescriptor* spec,
StringData ident) = 0;
- /**
- * Returns a UUID if the index is being built with the two-phase index build procedure.
- */
- virtual boost::optional<UUID> getIndexBuildUUID(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) const = 0;
+ virtual int getTotalIndexCount(OperationContext* opCtx, RecordId catalogId) const = 0;
- /**
- * Indicate that an index build is completed and the index is ready to use.
- */
- virtual void indexBuildSuccess(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) = 0;
+ virtual bool isIndexPresent(OperationContext* opCtx,
+ RecordId catalogId,
+ StringData indexName) const = 0;
+
+ virtual bool isIndexReady(OperationContext* opCtx,
+ RecordId catalogId,
+ StringData indexName) const = 0;
/**
* Returns true if the index identified by 'indexName' is multikey, and returns false otherwise.
@@ -302,62 +222,6 @@ public:
StringData indexName,
MultikeyPaths* multikeyPaths) const = 0;
- /**
- * Sets the index identified by 'indexName' to be multikey.
- *
- * If 'multikeyPaths' is non-empty, then it must be a vector with size equal to the number of
- * elements in the index key pattern. Additionally, at least one path component of the indexed
- * fields must cause this index to be multikey.
- *
- * This function returns true if the index metadata has changed, and returns false otherwise.
- */
- virtual bool setIndexIsMultikey(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName,
- const MultikeyPaths& multikeyPaths) = 0;
-
- /**
- * Sets the index to be multikey with the provided paths. This performs minimal validation of
- * the inputs and is intended to be used internally to "correct" multikey metadata that drifts
- * from the underlying collection data.
- *
- * When isMultikey is false, ignores multikeyPaths and resets the metadata appropriately based
- * on the index descriptor. Otherwise, overwrites the existing multikeyPaths with the ones
- * provided. This only writes multikey paths if the index type supports path-level tracking, and
- * only sets the multikey boolean flag otherwise.
- */
- virtual void forceSetIndexIsMultikey(OperationContext* opCtx,
- RecordId catalogId,
- const IndexDescriptor* desc,
- bool isMultikey,
- const MultikeyPaths& multikeyPaths) = 0;
-
- virtual CollectionOptions getCollectionOptions(OperationContext* opCtx,
- RecordId catalogId) const = 0;
-
- virtual int getTotalIndexCount(OperationContext* opCtx, RecordId catalogId) const = 0;
-
- virtual int getCompletedIndexCount(OperationContext* opCtx, RecordId catalogId) const = 0;
-
- virtual BSONObj getIndexSpec(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) const = 0;
-
- virtual void getAllIndexes(OperationContext* opCtx,
- RecordId catalogId,
- std::vector<std::string>* names) const = 0;
-
- virtual void getReadyIndexes(OperationContext* opCtx,
- RecordId catalogId,
- std::vector<std::string>* names) const = 0;
-
- virtual bool isIndexPresent(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) const = 0;
-
- virtual bool isIndexReady(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) const = 0;
virtual void setRand_forTest(const std::string& rand) = 0;
diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp
index cbc354ee6e8..86ed33795fa 100644
--- a/src/mongo/db/storage/durable_catalog_impl.cpp
+++ b/src/mongo/db/storage/durable_catalog_impl.cpp
@@ -604,15 +604,16 @@ BSONObj DurableCatalogImpl::_findEntry(OperationContext* opCtx, RecordId catalog
return data.releaseToBson().getOwned();
}
-BSONCollectionCatalogEntry::MetaData DurableCatalogImpl::getMetaData(OperationContext* opCtx,
- RecordId catalogId) const {
+std::shared_ptr<BSONCollectionCatalogEntry::MetaData> DurableCatalogImpl::getMetaData(
+ OperationContext* opCtx, RecordId catalogId) const {
BSONObj obj = _findEntry(opCtx, catalogId);
LOGV2_DEBUG(22209, 3, " fetched CCE metadata: {obj}", "obj"_attr = obj);
- BSONCollectionCatalogEntry::MetaData md;
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> md;
const BSONElement mdElement = obj["md"];
if (mdElement.isABSONObj()) {
LOGV2_DEBUG(22210, 3, "returning metadata: {mdElement}", "mdElement"_attr = mdElement);
- md.parse(mdElement.Obj());
+ md = std::make_shared<BSONCollectionCatalogEntry::MetaData>();
+ md->parse(mdElement.Obj());
}
return md;
}
@@ -638,7 +639,7 @@ void DurableCatalogImpl::putMetaData(OperationContext* opCtx,
string name = index.name();
// All indexes with buildUUIDs must be ready:false.
- invariant(!(index.buildUUID && index.ready), str::stream() << md.toBSON());
+ invariant(!(index.buildUUID && index.ready), str::stream() << md.toBSON(true));
// fix ident map
BSONElement e = oldIdentMap[name];
@@ -665,44 +666,15 @@ void DurableCatalogImpl::putMetaData(OperationContext* opCtx,
fassert(28521, status);
}
-Status DurableCatalogImpl::checkMetaDataForIndex(OperationContext* opCtx,
- RecordId catalogId,
- const std::string& indexName,
- const BSONObj& spec) {
- auto md = getMetaData(opCtx, catalogId);
- int offset = md.findIndexOffset(indexName);
- if (offset < 0) {
- return {ErrorCodes::IndexNotFound,
- str::stream() << "Index [" << indexName
- << "] not found in metadata for recordId: " << catalogId};
- }
-
- if (spec.woCompare(md.indexes[offset].spec)) {
- return {ErrorCodes::BadValue,
- str::stream() << "Spec for index [" << indexName
- << "] does not match spec in the metadata for recordId: " << catalogId
- << ". Spec: " << spec
- << " metadata's spec: " << md.indexes[offset].spec};
- }
-
- return Status::OK();
-}
-
Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx,
RecordId catalogId,
const NamespaceString& toNss,
- bool stayTemp) {
+ BSONCollectionCatalogEntry::MetaData& md) {
BSONObj old = _findEntry(opCtx, catalogId).getOwned();
{
BSONObjBuilder b;
b.append("ns", toNss.ns());
-
- BSONCollectionCatalogEntry::MetaData md;
- md.parse(old["md"].Obj());
- md.ns = toNss.ns();
- if (!stayTemp)
- md.options.temp = false;
b.append("md", md.toBSON());
b.appendElementsUnique(old);
@@ -887,6 +859,21 @@ StatusWith<std::pair<RecordId, std::unique_ptr<RecordStore>>> DurableCatalogImpl
return std::pair<RecordId, std::unique_ptr<RecordStore>>(entry.catalogId, std::move(rs));
}
+Status DurableCatalogImpl::createIndex(OperationContext* opCtx,
+ RecordId catalogId,
+ const CollectionOptions& collOptions,
+ const IndexDescriptor* spec) {
+ std::string ident = getIndexIdent(opCtx, catalogId, spec->indexName());
+
+ auto kvEngine = _engine->getEngine();
+ const Status status = kvEngine->createSortedDataInterface(opCtx, collOptions, ident, spec);
+ if (status.isOK()) {
+ opCtx->recoveryUnit()->registerChange(
+ std::make_unique<AddIndexChange>(opCtx->recoveryUnit(), _engine, ident));
+ }
+ return status;
+}
+
StatusWith<DurableCatalog::ImportResult> DurableCatalogImpl::importCollection(
OperationContext* opCtx,
const NamespaceString& nss,
@@ -996,8 +983,8 @@ StatusWith<DurableCatalog::ImportResult> DurableCatalogImpl::importCollection(
Status DurableCatalogImpl::renameCollection(OperationContext* opCtx,
RecordId catalogId,
const NamespaceString& toNss,
- bool stayTemp) {
- return _replaceEntry(opCtx, catalogId, toNss, stayTemp);
+ BSONCollectionCatalogEntry::MetaData& md) {
+ return _replaceEntry(opCtx, catalogId, toNss, md);
}
Status DurableCatalogImpl::dropCollection(OperationContext* opCtx, RecordId catalogId) {
@@ -1019,102 +1006,11 @@ Status DurableCatalogImpl::dropCollection(OperationContext* opCtx, RecordId cata
return Status::OK();
}
-void DurableCatalogImpl::updateCappedSize(OperationContext* opCtx,
- RecordId catalogId,
- long long size) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- md.options.cappedSize = size;
- putMetaData(opCtx, catalogId, md);
-}
-
-void DurableCatalogImpl::updateClusteredIndexTTLSetting(
- OperationContext* opCtx, RecordId catalogId, boost::optional<int64_t> expireAfterSeconds) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- uassert(5401000, "The collection doesn't have a clustered index", md.options.clusteredIndex);
-
- md.options.clusteredIndex->setExpireAfterSeconds(expireAfterSeconds);
- putMetaData(opCtx, catalogId, md);
-}
-
-void DurableCatalogImpl::updateTTLSetting(OperationContext* opCtx,
- RecordId catalogId,
- StringData idxName,
- long long newExpireSeconds) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- int offset = md.findIndexOffset(idxName);
- invariant(offset >= 0,
- str::stream() << "cannot update TTL setting for index " << idxName << " @ "
- << catalogId << " : " << md.toBSON());
- md.indexes[offset].updateTTLSetting(newExpireSeconds);
- putMetaData(opCtx, catalogId, md);
-}
-
-void DurableCatalogImpl::updateHiddenSetting(OperationContext* opCtx,
- RecordId catalogId,
- StringData idxName,
- bool hidden) {
-
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- int offset = md.findIndexOffset(idxName);
- invariant(offset >= 0);
- md.indexes[offset].updateHiddenSetting(hidden);
- putMetaData(opCtx, catalogId, md);
-}
-
-
-bool DurableCatalogImpl::isEqualToMetadataUUID(OperationContext* opCtx,
- RecordId catalogId,
- const UUID& uuid) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- invariant(md.options.uuid,
- str::stream() << "UUID missing for catalog entry " << catalogId << " : "
- << md.toBSON());
- return *md.options.uuid == uuid;
-}
-
-void DurableCatalogImpl::setIsTemp(OperationContext* opCtx, RecordId catalogId, bool isTemp) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- md.options.temp = isTemp;
- putMetaData(opCtx, catalogId, md);
-}
-
-void DurableCatalogImpl::setRecordPreImages(OperationContext* opCtx, RecordId catalogId, bool val) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- md.options.recordPreImages = val;
- putMetaData(opCtx, catalogId, md);
-}
-
-void DurableCatalogImpl::updateValidator(OperationContext* opCtx,
- RecordId catalogId,
- const BSONObj& validator,
- boost::optional<ValidationLevelEnum> newLevel,
- boost::optional<ValidationActionEnum> newAction) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- md.options.validator = validator;
- md.options.validationLevel = newLevel;
- md.options.validationAction = newAction;
- putMetaData(opCtx, catalogId, md);
-}
-
-void DurableCatalogImpl::removeIndex(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
-
- if (md.findIndexOffset(indexName) < 0)
- return; // never had the index so nothing to do.
-
- md.eraseIndex(indexName);
- putMetaData(opCtx, catalogId, md);
-}
-
-Status DurableCatalogImpl::prepareForIndexBuild(OperationContext* opCtx,
- RecordId catalogId,
- const IndexDescriptor* spec,
- boost::optional<UUID> buildUUID,
- bool isBackgroundSecondaryBuild) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
-
+BSONCollectionCatalogEntry::IndexMetaData DurableCatalogImpl::prepareIndexMetaDataForIndexBuild(
+ OperationContext* opCtx,
+ const IndexDescriptor* spec,
+ boost::optional<UUID> buildUUID,
+ bool isBackgroundSecondaryBuild) {
BSONCollectionCatalogEntry::IndexMetaData imd;
imd.spec = spec->infoObj();
imd.ready = false;
@@ -1132,245 +1028,77 @@ Status DurableCatalogImpl::prepareForIndexBuild(OperationContext* opCtx,
// Mark collation feature as in use if the index has a non-simple collation.
if (imd.spec["collation"]) {
- const auto feature = DurableCatalogImpl::FeatureTracker::NonRepairableFeature::kCollation;
+ const auto feature = FeatureTracker::NonRepairableFeature::kCollation;
if (!getFeatureTracker()->isNonRepairableFeatureInUse(opCtx, feature)) {
getFeatureTracker()->markNonRepairableFeatureAsInUse(opCtx, feature);
}
}
- // Confirm that our index is not already in the current metadata.
- invariant(-1 == md.findIndexOffset(imd.name()));
-
- md.indexes.push_back(imd);
- putMetaData(opCtx, catalogId, md);
-
- string ident = getIndexIdent(opCtx, catalogId, spec->indexName());
-
- auto kvEngine = _engine->getEngine();
- const Status status = kvEngine->createSortedDataInterface(
- opCtx, getCollectionOptions(opCtx, catalogId), ident, spec);
- if (status.isOK()) {
- opCtx->recoveryUnit()->registerChange(
- std::make_unique<AddIndexChange>(opCtx->recoveryUnit(), _engine, ident));
- }
-
- return status;
+ return imd;
}
Status DurableCatalogImpl::dropAndRecreateIndexIdentForResume(OperationContext* opCtx,
- RecordId catalogId,
+ const CollectionOptions& collOptions,
const IndexDescriptor* spec,
StringData ident) {
auto status = _engine->getEngine()->dropSortedDataInterface(opCtx, ident);
if (!status.isOK())
return status;
- status = _engine->getEngine()->createSortedDataInterface(
- opCtx, getCollectionOptions(opCtx, catalogId), ident, spec);
+ status = _engine->getEngine()->createSortedDataInterface(opCtx, collOptions, ident, spec);
return status;
}
-boost::optional<UUID> DurableCatalogImpl::getIndexBuildUUID(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- int offset = md.findIndexOffset(indexName);
- invariant(offset >= 0,
- str::stream() << "cannot get build UUID for index " << indexName << " @ " << catalogId
- << " : " << md.toBSON());
- return md.indexes[offset].buildUUID;
-}
-
-void DurableCatalogImpl::indexBuildSuccess(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- int offset = md.findIndexOffset(indexName);
- invariant(offset >= 0,
- str::stream() << "cannot mark index " << indexName << " as ready @ " << catalogId
- << " : " << md.toBSON());
- md.indexes[offset].ready = true;
- md.indexes[offset].buildUUID = boost::none;
- putMetaData(opCtx, catalogId, md);
-}
-
bool DurableCatalogImpl::isIndexMultikey(OperationContext* opCtx,
RecordId catalogId,
StringData indexName,
MultikeyPaths* multikeyPaths) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
+ auto md = getMetaData(opCtx, catalogId);
- int offset = md.findIndexOffset(indexName);
+ int offset = md->findIndexOffset(indexName);
invariant(offset >= 0,
str::stream() << "cannot get multikey for index " << indexName << " @ " << catalogId
- << " : " << md.toBSON());
-
- if (multikeyPaths && !md.indexes[offset].multikeyPaths.empty()) {
- *multikeyPaths = md.indexes[offset].multikeyPaths;
- }
-
- return md.indexes[offset].multikey;
-}
-
-bool DurableCatalogImpl::setIndexIsMultikey(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName,
- const MultikeyPaths& multikeyPaths) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
-
- int offset = md.findIndexOffset(indexName);
- invariant(offset >= 0,
- str::stream() << "cannot set index " << indexName << " as multikey @ " << catalogId
- << " : " << md.toBSON());
-
- const bool tracksPathLevelMultikeyInfo = !md.indexes[offset].multikeyPaths.empty();
- if (tracksPathLevelMultikeyInfo) {
- invariant(!multikeyPaths.empty());
- invariant(multikeyPaths.size() == md.indexes[offset].multikeyPaths.size());
- } else {
- invariant(multikeyPaths.empty());
-
- if (md.indexes[offset].multikey) {
- // The index is already set as multikey and we aren't tracking path-level multikey
- // information for it. We return false to indicate that the index metadata is unchanged.
- return false;
- }
- }
-
- md.indexes[offset].multikey = true;
-
- if (tracksPathLevelMultikeyInfo) {
- bool newPathIsMultikey = false;
- bool somePathIsMultikey = false;
-
- // Store new path components that cause this index to be multikey in catalog's index
- // metadata.
- for (size_t i = 0; i < multikeyPaths.size(); ++i) {
- MultikeyComponents& indexMultikeyComponents = md.indexes[offset].multikeyPaths[i];
- for (const auto multikeyComponent : multikeyPaths[i]) {
- auto result = indexMultikeyComponents.insert(multikeyComponent);
- newPathIsMultikey = newPathIsMultikey || result.second;
- somePathIsMultikey = true;
- }
- }
-
- // If all of the sets in the multikey paths vector were empty, then no component of any
- // indexed field caused the index to be multikey. setIndexIsMultikey() therefore shouldn't
- // have been called.
- invariant(somePathIsMultikey);
-
- if (!newPathIsMultikey) {
- // We return false to indicate that the index metadata is unchanged.
- return false;
- }
- }
-
- putMetaData(opCtx, catalogId, md);
- return true;
-}
+ << " : " << md->toBSON());
-void DurableCatalogImpl::forceSetIndexIsMultikey(OperationContext* opCtx,
- RecordId catalogId,
- const IndexDescriptor* desc,
- bool isMultikey,
- const MultikeyPaths& multikeyPaths) {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
-
- int offset = md.findIndexOffset(desc->indexName());
- invariant(offset >= 0,
- str::stream() << "cannot set index " << desc->indexName() << " multikey state @ "
- << catalogId << " : " << md.toBSON());
-
- md.indexes[offset].multikey = isMultikey;
- if (indexTypeSupportsPathLevelMultikeyTracking(desc->getAccessMethodName())) {
- if (isMultikey) {
- md.indexes[offset].multikeyPaths = multikeyPaths;
- } else {
- md.indexes[offset].multikeyPaths =
- MultikeyPaths{static_cast<size_t>(desc->keyPattern().nFields())};
- }
+ if (multikeyPaths && !md->indexes[offset].multikeyPaths.empty()) {
+ *multikeyPaths = md->indexes[offset].multikeyPaths;
}
- putMetaData(opCtx, catalogId, md);
-}
-
-CollectionOptions DurableCatalogImpl::getCollectionOptions(OperationContext* opCtx,
- RecordId catalogId) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- return md.options;
+ return md->indexes[offset].multikey;
}
int DurableCatalogImpl::getTotalIndexCount(OperationContext* opCtx, RecordId catalogId) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
-
- return static_cast<int>(md.indexes.size());
-}
-
-int DurableCatalogImpl::getCompletedIndexCount(OperationContext* opCtx, RecordId catalogId) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
-
- int num = 0;
- for (unsigned i = 0; i < md.indexes.size(); i++) {
- if (md.indexes[i].ready)
- num++;
- }
- return num;
-}
-
-BSONObj DurableCatalogImpl::getIndexSpec(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
-
- int offset = md.findIndexOffset(indexName);
- invariant(offset >= 0,
- str::stream() << "cannot get index spec for " << indexName << " @ " << catalogId
- << " : " << md.toBSON());
-
- BSONObj spec = md.indexes[offset].spec.getOwned();
- return spec;
-}
-
-void DurableCatalogImpl::getAllIndexes(OperationContext* opCtx,
- RecordId catalogId,
- std::vector<std::string>* names) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
-
- for (unsigned i = 0; i < md.indexes.size(); i++) {
- names->push_back(md.indexes[i].spec["name"].String());
- }
-}
-
-void DurableCatalogImpl::getReadyIndexes(OperationContext* opCtx,
- RecordId catalogId,
- std::vector<std::string>* names) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
+ auto md = getMetaData(opCtx, catalogId);
+ if (!md)
+ return 0;
- for (unsigned i = 0; i < md.indexes.size(); i++) {
- if (md.indexes[i].ready)
- names->push_back(md.indexes[i].spec["name"].String());
- }
+ return static_cast<int>(md->indexes.size());
}
bool DurableCatalogImpl::isIndexPresent(OperationContext* opCtx,
RecordId catalogId,
StringData indexName) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
- int offset = md.findIndexOffset(indexName);
+ auto md = getMetaData(opCtx, catalogId);
+ if (!md)
+ return false;
+
+ int offset = md->findIndexOffset(indexName);
return offset >= 0;
}
bool DurableCatalogImpl::isIndexReady(OperationContext* opCtx,
RecordId catalogId,
StringData indexName) const {
- BSONCollectionCatalogEntry::MetaData md = getMetaData(opCtx, catalogId);
+ auto md = getMetaData(opCtx, catalogId);
+ if (!md)
+ return false;
- int offset = md.findIndexOffset(indexName);
+ int offset = md->findIndexOffset(indexName);
invariant(offset >= 0,
str::stream() << "cannot get ready status for index " << indexName << " @ "
- << catalogId << " : " << md.toBSON());
- return md.indexes[offset].ready;
+ << catalogId << " : " << md->toBSON());
+ return md->indexes[offset].ready;
}
void DurableCatalogImpl::setRand_forTest(const std::string& rand) {
diff --git a/src/mongo/db/storage/durable_catalog_impl.h b/src/mongo/db/storage/durable_catalog_impl.h
index e28424e4ea3..63a8c9ec062 100644
--- a/src/mongo/db/storage/durable_catalog_impl.h
+++ b/src/mongo/db/storage/durable_catalog_impl.h
@@ -50,7 +50,6 @@ class StorageEngineInterface;
class DurableCatalogImpl : public DurableCatalog {
public:
class FeatureTracker;
-
/**
* The RecordStore must be thread-safe, in particular with concurrent calls to
* RecordStore::find, updateRecord, insertRecord, deleteRecord and dataFor. The
@@ -78,17 +77,12 @@ public:
return _findEntry(opCtx, catalogId);
}
- BSONCollectionCatalogEntry::MetaData getMetaData(OperationContext* opCtx,
- RecordId catalogId) const;
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> getMetaData(OperationContext* opCtx,
+ RecordId catalogId) const;
void putMetaData(OperationContext* opCtx,
RecordId catalogId,
BSONCollectionCatalogEntry::MetaData& md);
- Status checkMetaDataForIndex(OperationContext* opCtx,
- RecordId catalogId,
- const std::string& indexName,
- const BSONObj& spec);
-
std::vector<std::string> getAllIdents(OperationContext* opCtx) const;
bool isUserDataIdent(StringData ident) const;
@@ -121,6 +115,17 @@ public:
const CollectionOptions& options,
bool allocateDefaultSpace);
+ Status createIndex(OperationContext* opCtx,
+ RecordId catalogId,
+ const CollectionOptions& collOptions,
+ const IndexDescriptor* spec);
+
+ BSONCollectionCatalogEntry::IndexMetaData prepareIndexMetaDataForIndexBuild(
+ OperationContext* opCtx,
+ const IndexDescriptor* spec,
+ boost::optional<UUID> buildUUID,
+ bool isBackgroundSecondaryBuild);
+
StatusWith<ImportResult> importCollection(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& metadata,
@@ -130,89 +135,22 @@ public:
Status renameCollection(OperationContext* opCtx,
RecordId catalogId,
const NamespaceString& toNss,
- bool stayTemp);
+ BSONCollectionCatalogEntry::MetaData& md);
Status dropCollection(OperationContext* opCtx, RecordId catalogId);
- void updateCappedSize(OperationContext* opCtx, RecordId catalogId, long long size);
-
- void updateClusteredIndexTTLSetting(OperationContext* opCtx,
- RecordId catalogId,
- boost::optional<int64_t> expireAfterSeconds);
-
- void updateTTLSetting(OperationContext* opCtx,
- RecordId catalogId,
- StringData idxName,
- long long newExpireSeconds);
-
- void updateHiddenSetting(OperationContext* opCtx,
- RecordId catalogId,
- StringData idxName,
- bool hidden);
-
- bool isEqualToMetadataUUID(OperationContext* opCtx, RecordId catalogId, const UUID& uuid);
-
- void setIsTemp(OperationContext* opCtx, RecordId catalogId, bool isTemp);
-
- void setRecordPreImages(OperationContext* opCtx, RecordId catalogId, bool val) override;
-
- void updateValidator(OperationContext* opCtx,
- RecordId catalogId,
- const BSONObj& validator,
- boost::optional<ValidationLevelEnum> newLevel,
- boost::optional<ValidationActionEnum> newAction);
-
- void removeIndex(OperationContext* opCtx, RecordId catalogId, StringData indexName);
-
- Status prepareForIndexBuild(OperationContext* opCtx,
- RecordId catalogId,
- const IndexDescriptor* spec,
- boost::optional<UUID> buildUUID,
- bool isBackgroundSecondaryBuild);
-
Status dropAndRecreateIndexIdentForResume(OperationContext* opCtx,
- RecordId catalogId,
+ const CollectionOptions& collOptions,
const IndexDescriptor* spec,
StringData ident);
- boost::optional<UUID> getIndexBuildUUID(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName) const;
-
- void indexBuildSuccess(OperationContext* opCtx, RecordId catalogId, StringData indexName);
-
bool isIndexMultikey(OperationContext* opCtx,
RecordId catalogId,
StringData indexName,
MultikeyPaths* multikeyPaths) const;
- bool setIndexIsMultikey(OperationContext* opCtx,
- RecordId catalogId,
- StringData indexName,
- const MultikeyPaths& multikeyPaths);
-
- void forceSetIndexIsMultikey(OperationContext* opCtx,
- RecordId catalogId,
- const IndexDescriptor* desc,
- bool isMultikey,
- const MultikeyPaths& multikeyPaths);
-
- CollectionOptions getCollectionOptions(OperationContext* opCtx, RecordId catalogId) const;
-
int getTotalIndexCount(OperationContext* opCtx, RecordId catalogId) const;
- int getCompletedIndexCount(OperationContext* opCtx, RecordId catalogId) const;
-
- BSONObj getIndexSpec(OperationContext* opCtx, RecordId catalogId, StringData indexName) const;
-
- void getAllIndexes(OperationContext* opCtx,
- RecordId catalogId,
- std::vector<std::string>* names) const;
-
- void getReadyIndexes(OperationContext* opCtx,
- RecordId catalogId,
- std::vector<std::string>* names) const;
-
bool isIndexPresent(OperationContext* opCtx, RecordId catalogId, StringData indexName) const;
bool isIndexReady(OperationContext* opCtx, RecordId catalogId, StringData indexName) const;
@@ -240,7 +178,7 @@ private:
Status _replaceEntry(OperationContext* opCtx,
RecordId catalogId,
const NamespaceString& toNss,
- bool stayTemp);
+ BSONCollectionCatalogEntry::MetaData& md);
Status _removeEntry(OperationContext* opCtx, RecordId catalogId);
/**
diff --git a/src/mongo/db/storage/kv/durable_catalog_test.cpp b/src/mongo/db/storage/kv/durable_catalog_test.cpp
index d352e742728..595acc34bb1 100644
--- a/src/mongo/db/storage/kv/durable_catalog_test.cpp
+++ b/src/mongo/db/storage/kv/durable_catalog_test.cpp
@@ -35,7 +35,8 @@
#include "mongo/db/catalog/catalog_test_fixture.h"
#include "mongo/db/catalog/collection_catalog.h"
-#include "mongo/db/catalog/collection_mock.h"
+#include "mongo/db/catalog/collection_impl.h"
+#include "mongo/db/catalog_raii.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/multikey_paths.h"
#include "mongo/db/index_names.h"
@@ -63,7 +64,7 @@ public:
CatalogTestFixture::setUp();
_nss = NamespaceString("unittests.durable_catalog");
- _catalogId = createCollection(_nss);
+ _collectionUUID = createCollection(_nss);
}
NamespaceString ns() {
@@ -74,11 +75,17 @@ public:
return operationContext()->getServiceContext()->getStorageEngine()->getCatalog();
}
- RecordId getCatalogId() {
- return _catalogId;
+ CollectionPtr getCollection() {
+ return CollectionCatalog::get(operationContext())
+ ->lookupCollectionByUUID(operationContext(), *_collectionUUID);
}
- RecordId createCollection(const NamespaceString& nss) {
+ CollectionWriter getCollectionWriter() {
+ return CollectionWriter(
+ operationContext(), *_collectionUUID, CollectionCatalog::LifetimeMode::kInplace);
+ }
+
+ CollectionUUID createCollection(const NamespaceString& nss) {
Lock::DBLock dbLk(operationContext(), nss.db(), MODE_IX);
Lock::CollectionLock collLk(operationContext(), nss, MODE_IX);
@@ -95,7 +102,12 @@ public:
std::pair<RecordId, std::unique_ptr<RecordStore>> coll = std::move(swColl.getValue());
RecordId catalogId = coll.first;
- std::shared_ptr<Collection> collection = std::make_shared<CollectionMock>(nss, catalogId);
+ std::shared_ptr<Collection> collection = std::make_shared<CollectionImpl>(
+ operationContext(),
+ nss,
+ catalogId,
+ getCatalog()->getMetaData(operationContext(), catalogId),
+ std::move(coll.second));
CollectionCatalog::write(operationContext(), [&](CollectionCatalog& catalog) {
catalog.registerCollection(
operationContext(), options.uuid.get(), std::move(collection));
@@ -103,29 +115,47 @@ public:
wuow.commit();
- return catalogId;
+ return *options.uuid;
}
- std::string createIndex(BSONObj keyPattern,
- std::string indexType = IndexNames::BTREE,
- bool twoPhase = false) {
+ IndexCatalogEntry* createIndex(BSONObj keyPattern,
+ std::string indexType = IndexNames::BTREE,
+ bool twoPhase = false) {
+ Lock::DBLock dbLk(operationContext(), _nss.db(), MODE_IX);
+ Lock::CollectionLock collLk(operationContext(), _nss, MODE_X);
+
std::string indexName = "idx" + std::to_string(numIndexesCreated);
+ // Make sure we have a valid IndexSpec for the type requested
+ IndexSpec spec;
+ spec.version(1).name(indexName).addKeys(keyPattern);
+ if (indexType == IndexNames::GEO_HAYSTACK) {
+ spec.geoHaystackBucketSize(1.0);
+ } else if (indexType == IndexNames::TEXT) {
+ spec.textWeights(BSON("a" << 1));
+ spec.textIndexVersion(2);
+ spec.textDefaultLanguage("swedish");
+ }
- auto collection = std::make_unique<CollectionMock>(_nss);
- IndexDescriptor desc(indexType,
- BSON("v" << 1 << "key" << keyPattern << "name" << indexName));
+ auto desc = std::make_unique<IndexDescriptor>(indexType, spec.toBSON());
+ IndexCatalogEntry* entry = nullptr;
+ auto collWriter = getCollectionWriter();
{
WriteUnitOfWork wuow(operationContext());
const bool isSecondaryBackgroundIndexBuild = false;
boost::optional<UUID> buildUUID(twoPhase, UUID::gen());
- ASSERT_OK(getCatalog()->prepareForIndexBuild(
- operationContext(), _catalogId, &desc, buildUUID, isSecondaryBackgroundIndexBuild));
+ ASSERT_OK(collWriter.getWritableCollection()->prepareForIndexBuild(
+ operationContext(), desc.get(), buildUUID, isSecondaryBackgroundIndexBuild));
+ entry = collWriter.getWritableCollection()->getIndexCatalog()->createIndexEntry(
+ operationContext(),
+ collWriter.getWritableCollection(),
+ std::move(desc),
+ CreateIndexEntryFlags::kNone);
wuow.commit();
}
++numIndexesCreated;
- return indexName;
+ return entry;
}
void assertMultikeyPathsAreEqual(const MultikeyPaths& actual, const MultikeyPaths& expected) {
@@ -175,149 +205,156 @@ private:
NamespaceString _nss;
size_t numIndexesCreated = 0;
- RecordId _catalogId;
+ // RecordId _catalogId;
+ OptionalCollectionUUID _collectionUUID;
};
TEST_F(DurableCatalogTest, MultikeyPathsForBtreeIndexInitializedToVectorOfEmptySets) {
- std::string indexName = createIndex(BSON("a" << 1 << "b" << 1));
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << 1 << "b" << 1));
+ auto collection = getCollection();
{
MultikeyPaths multikeyPaths;
- ASSERT(!catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(!collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {MultikeyComponents{}, MultikeyComponents{}});
}
}
TEST_F(DurableCatalogTest, CanSetIndividualPathComponentOfBtreeIndexAsMultikey) {
- std::string indexName = createIndex(BSON("a" << 1 << "b" << 1));
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << 1 << "b" << 1));
+ auto collection = getCollection();
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(catalog->setIndexIsMultikey(
- operationContext(), getCatalogId(), indexName, {MultikeyComponents{}, {0U}}));
+ ASSERT(collection->setIndexIsMultikey(operationContext(),
+ indexEntry->descriptor()->indexName(),
+ {MultikeyComponents{}, {0U}}));
wuow.commit();
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {MultikeyComponents{}, {0U}});
}
}
TEST_F(DurableCatalogTest, MultikeyPathsAccumulateOnDifferentFields) {
- std::string indexName = createIndex(BSON("a" << 1 << "b" << 1));
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << 1 << "b" << 1));
+ auto collection = getCollection();
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(catalog->setIndexIsMultikey(
- operationContext(), getCatalogId(), indexName, {MultikeyComponents{}, {0U}}));
+ ASSERT(collection->setIndexIsMultikey(operationContext(),
+ indexEntry->descriptor()->indexName(),
+ {MultikeyComponents{}, {0U}}));
wuow.commit();
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {MultikeyComponents{}, {0U}});
}
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(catalog->setIndexIsMultikey(
- operationContext(), getCatalogId(), indexName, {{0U}, MultikeyComponents{}}));
+ ASSERT(collection->setIndexIsMultikey(operationContext(),
+ indexEntry->descriptor()->indexName(),
+ {{0U}, MultikeyComponents{}}));
wuow.commit();
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {{0U}, {0U}});
}
}
TEST_F(DurableCatalogTest, MultikeyPathsAccumulateOnDifferentComponentsOfTheSameField) {
- std::string indexName = createIndex(BSON("a.b" << 1));
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a.b" << 1));
+ auto collection = getCollection();
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(catalog->setIndexIsMultikey(operationContext(), getCatalogId(), indexName, {{0U}}));
+ ASSERT(collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), {{0U}}));
wuow.commit();
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {{0U}});
}
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(catalog->setIndexIsMultikey(operationContext(), getCatalogId(), indexName, {{1U}}));
+ ASSERT(collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), {{1U}}));
wuow.commit();
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {{0U, 1U}});
}
}
TEST_F(DurableCatalogTest, NoOpWhenSpecifiedPathComponentsAlreadySetAsMultikey) {
- std::string indexName = createIndex(BSON("a" << 1));
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << 1));
+ auto collection = getCollection();
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(catalog->setIndexIsMultikey(operationContext(), getCatalogId(), indexName, {{0U}}));
+ ASSERT(collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), {{0U}}));
wuow.commit();
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {{0U}});
}
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(!catalog->setIndexIsMultikey(operationContext(), getCatalogId(), indexName, {{0U}}));
+ ASSERT(!collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), {{0U}}));
// Rollback WUOW.
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {{0U}});
}
}
TEST_F(DurableCatalogTest, CanSetMultipleFieldsAndComponentsAsMultikey) {
- std::string indexName = createIndex(BSON("a.b.c" << 1 << "a.b.d" << 1));
- DurableCatalog* catalog = getCatalog();
-
+ auto indexEntry = createIndex(BSON("a.b.c" << 1 << "a.b.d" << 1));
+ auto collection = getCollection();
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(catalog->setIndexIsMultikey(
- operationContext(), getCatalogId(), indexName, {{0U, 1U}, {0U, 1U}}));
+ ASSERT(collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), {{0U, 1U}, {0U, 1U}}));
wuow.commit();
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {{0U, 1U}, {0U, 1U}});
}
}
@@ -325,34 +362,35 @@ TEST_F(DurableCatalogTest, CanSetMultipleFieldsAndComponentsAsMultikey) {
DEATH_TEST_REGEX_F(DurableCatalogTest,
CannotOmitPathLevelMultikeyInfoWithBtreeIndex,
R"#(Invariant failure.*!multikeyPaths.empty\(\))#") {
- std::string indexName = createIndex(BSON("a" << 1 << "b" << 1));
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << 1 << "b" << 1));
+ auto collection = getCollection();
WriteUnitOfWork wuow(operationContext());
- catalog->setIndexIsMultikey(operationContext(), getCatalogId(), indexName, MultikeyPaths{});
+ collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), MultikeyPaths{});
}
DEATH_TEST_REGEX_F(DurableCatalogTest,
AtLeastOnePathComponentMustCauseIndexToBeMultikey,
R"#(Invariant failure.*somePathIsMultikey)#") {
- std::string indexName = createIndex(BSON("a" << 1 << "b" << 1));
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << 1 << "b" << 1));
+ auto collection = getCollection();
WriteUnitOfWork wuow(operationContext());
- catalog->setIndexIsMultikey(operationContext(),
- getCatalogId(),
- indexName,
- {MultikeyComponents{}, MultikeyComponents{}});
+ collection->setIndexIsMultikey(operationContext(),
+
+ indexEntry->descriptor()->indexName(),
+ {MultikeyComponents{}, MultikeyComponents{}});
}
TEST_F(DurableCatalogTest, PathLevelMultikeyTrackingIsSupportedBy2dsphereIndexes) {
std::string indexType = IndexNames::GEO_2DSPHERE;
- std::string indexName = createIndex(BSON("a" << indexType << "b" << 1), indexType);
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << indexType << "b" << 1), indexType);
+ auto collection = getCollection();
{
MultikeyPaths multikeyPaths;
- ASSERT(!catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(!collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
assertMultikeyPathsAreEqual(multikeyPaths, {MultikeyComponents{}, MultikeyComponents{}});
}
}
@@ -362,12 +400,12 @@ TEST_F(DurableCatalogTest, PathLevelMultikeyTrackingIsNotSupportedByAllIndexType
IndexNames::GEO_2D, IndexNames::GEO_HAYSTACK, IndexNames::TEXT, IndexNames::HASHED};
for (auto&& indexType : indexTypes) {
- std::string indexName = createIndex(BSON("a" << indexType << "b" << 1), indexType);
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << indexType << "b" << 1), indexType);
+ auto collection = getCollection();
{
MultikeyPaths multikeyPaths;
- ASSERT(!catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(!collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
ASSERT(multikeyPaths.empty());
}
}
@@ -375,102 +413,107 @@ TEST_F(DurableCatalogTest, PathLevelMultikeyTrackingIsNotSupportedByAllIndexType
TEST_F(DurableCatalogTest, CanSetEntireTextIndexAsMultikey) {
std::string indexType = IndexNames::TEXT;
- std::string indexName = createIndex(BSON("a" << indexType << "b" << 1), indexType);
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << indexType << "b" << 1), indexType);
+ auto collection = getCollection();
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(catalog->setIndexIsMultikey(
- operationContext(), getCatalogId(), indexName, MultikeyPaths{}));
+ ASSERT(collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), MultikeyPaths{}));
wuow.commit();
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
ASSERT(multikeyPaths.empty());
}
}
TEST_F(DurableCatalogTest, NoOpWhenEntireIndexAlreadySetAsMultikey) {
std::string indexType = IndexNames::TEXT;
- std::string indexName = createIndex(BSON("a" << indexType << "b" << 1), indexType);
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << indexType << "b" << 1), indexType);
+ auto collection = getCollection();
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(catalog->setIndexIsMultikey(
- operationContext(), getCatalogId(), indexName, MultikeyPaths{}));
+ ASSERT(collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), MultikeyPaths{}));
wuow.commit();
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
ASSERT(multikeyPaths.empty());
}
{
WriteUnitOfWork wuow(operationContext());
- ASSERT(!catalog->setIndexIsMultikey(
- operationContext(), getCatalogId(), indexName, MultikeyPaths{}));
+ ASSERT(!collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), MultikeyPaths{}));
// Rollback WUOW.
}
{
MultikeyPaths multikeyPaths;
- ASSERT(catalog->isIndexMultikey(
- operationContext(), getCatalogId(), indexName, &multikeyPaths));
+ ASSERT(collection->isIndexMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), &multikeyPaths));
ASSERT(multikeyPaths.empty());
}
}
TEST_F(DurableCatalogTest, SinglePhaseIndexBuild) {
- std::string indexName = createIndex(BSON("a" << 1));
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << 1));
+ auto collection = getCollection();
- ASSERT_FALSE(catalog->isIndexReady(operationContext(), getCatalogId(), indexName));
- ASSERT_FALSE(catalog->getIndexBuildUUID(operationContext(), getCatalogId(), indexName));
+ ASSERT_FALSE(collection->isIndexReady(indexEntry->descriptor()->indexName()));
+ ASSERT_FALSE(collection->getIndexBuildUUID(indexEntry->descriptor()->indexName()));
{
WriteUnitOfWork wuow(operationContext());
- catalog->indexBuildSuccess(operationContext(), getCatalogId(), indexName);
+ getCollectionWriter().getWritableCollection()->indexBuildSuccess(operationContext(),
+ indexEntry);
wuow.commit();
}
- ASSERT_TRUE(catalog->isIndexReady(operationContext(), getCatalogId(), indexName));
- ASSERT_FALSE(catalog->getIndexBuildUUID(operationContext(), getCatalogId(), indexName));
+ collection = getCollection();
+ ASSERT_TRUE(collection->isIndexReady(indexEntry->descriptor()->indexName()));
+ ASSERT_FALSE(collection->getIndexBuildUUID(indexEntry->descriptor()->indexName()));
}
TEST_F(DurableCatalogTest, TwoPhaseIndexBuild) {
bool twoPhase = true;
- std::string indexName = createIndex(BSON("a" << 1), IndexNames::BTREE, twoPhase);
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << 1), IndexNames::BTREE, twoPhase);
+ auto collection = getCollection();
- ASSERT_FALSE(catalog->isIndexReady(operationContext(), getCatalogId(), indexName));
- ASSERT_TRUE(catalog->getIndexBuildUUID(operationContext(), getCatalogId(), indexName));
+ ASSERT_FALSE(collection->isIndexReady(indexEntry->descriptor()->indexName()));
+ ASSERT_TRUE(collection->getIndexBuildUUID(indexEntry->descriptor()->indexName()));
{
WriteUnitOfWork wuow(operationContext());
- catalog->indexBuildSuccess(operationContext(), getCatalogId(), indexName);
+ getCollectionWriter().getWritableCollection()->indexBuildSuccess(operationContext(),
+ indexEntry);
wuow.commit();
}
- ASSERT_TRUE(catalog->isIndexReady(operationContext(), getCatalogId(), indexName));
- ASSERT_FALSE(catalog->getIndexBuildUUID(operationContext(), getCatalogId(), indexName));
+ collection = getCollection();
+ ASSERT_TRUE(collection->isIndexReady(indexEntry->descriptor()->indexName()));
+ ASSERT_FALSE(collection->getIndexBuildUUID(indexEntry->descriptor()->indexName()));
}
DEATH_TEST_REGEX_F(DurableCatalogTest,
CannotSetIndividualPathComponentsOfTextIndexAsMultikey,
R"#(Invariant failure.*multikeyPaths.empty\(\))#") {
std::string indexType = IndexNames::TEXT;
- std::string indexName = createIndex(BSON("a" << indexType << "b" << 1), indexType);
- DurableCatalog* catalog = getCatalog();
+ auto indexEntry = createIndex(BSON("a" << indexType << "b" << 1), indexType);
+ auto collection = getCollection();
WriteUnitOfWork wuow(operationContext());
- catalog->setIndexIsMultikey(operationContext(), getCatalogId(), indexName, {{0U}, {0U}});
+ collection->setIndexIsMultikey(
+ operationContext(), indexEntry->descriptor()->indexName(), {{0U}, {0U}});
}
TEST_F(DurableCatalogTest, ImportCollection) {
@@ -546,7 +589,10 @@ TEST_F(DurableCatalogTest, IdentSuffixUsesRand) {
const NamespaceString nss = NamespaceString("a.b");
- RecordId catalogId = createCollection(nss);
+ auto uuid = createCollection(nss);
+ auto collection = CollectionCatalog::get(operationContext())
+ ->lookupCollectionByUUID(operationContext(), uuid);
+ RecordId catalogId = collection->getCatalogId();
ASSERT(StringData(getCatalog()->getEntry(catalogId).ident).endsWith(rand));
ASSERT_EQUALS(getCatalog()->getRand_forTest(), rand);
}
@@ -589,8 +635,9 @@ TEST_F(DurableCatalogTest, ImportCollectionRandConflict) {
{
// Check that a newly created collection doesn't use 'rand' as the suffix in the ident.
const NamespaceString nss = NamespaceString("a.b");
+ createCollection(nss);
- RecordId catalogId = createCollection(nss);
+ RecordId catalogId = getCollection()->getCatalogId();
ASSERT(!StringData(getCatalog()->getEntry(catalogId).ident).endsWith(rand));
}
diff --git a/src/mongo/db/storage/kv/storage_engine_test.cpp b/src/mongo/db/storage/kv/storage_engine_test.cpp
index 74ac5ed28d5..6d64c063ee7 100644
--- a/src/mongo/db/storage/kv/storage_engine_test.cpp
+++ b/src/mongo/db/storage/kv/storage_engine_test.cpp
@@ -43,7 +43,6 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/storage/devnull/devnull_kv_engine.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/storage_engine_impl.h"
#include "mongo/db/storage/storage_engine_test_fixture.h"
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index 13f841b3c3b..77144ae5044 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -307,10 +307,10 @@ void StorageEngineImpl::_initCollection(OperationContext* opCtx,
const NamespaceString& nss,
bool forRepair,
Timestamp minVisibleTs) {
- BSONCollectionCatalogEntry::MetaData md = _catalog->getMetaData(opCtx, catalogId);
+ auto md = _catalog->getMetaData(opCtx, catalogId);
uassert(ErrorCodes::MustDowngrade,
str::stream() << "Collection does not have UUID in KVCatalog. Collection: " << nss,
- md.options.uuid);
+ md->options.uuid);
auto ident = _catalog->getEntry(catalogId).ident;
@@ -320,18 +320,16 @@ void StorageEngineImpl::_initCollection(OperationContext* opCtx,
// repaired. This also ensures that if we try to use it, it will blow up.
rs = nullptr;
} else {
- rs = _engine->getRecordStore(opCtx, nss.ns(), ident, md.options);
+ rs = _engine->getRecordStore(opCtx, nss.ns(), ident, md->options);
invariant(rs);
}
- auto options = _catalog->getCollectionOptions(opCtx, catalogId);
-
auto collectionFactory = Collection::Factory::get(getGlobalServiceContext());
- auto collection = collectionFactory->make(opCtx, nss, catalogId, options, std::move(rs));
+ auto collection = collectionFactory->make(opCtx, nss, catalogId, md, std::move(rs));
collection->setMinimumVisibleSnapshot(minVisibleTs);
CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) {
- catalog.registerCollection(opCtx, options.uuid.get(), std::move(collection));
+ catalog.registerCollection(opCtx, md->options.uuid.get(), std::move(collection));
});
}
@@ -367,7 +365,7 @@ Status StorageEngineImpl::_recoverOrphanedCollection(OperationContext* opCtx,
WriteUnitOfWork wuow(opCtx);
const auto metadata = _catalog->getMetaData(opCtx, catalogId);
Status status =
- _engine->recoverOrphanedIdent(opCtx, collectionName, collectionIdent, metadata.options);
+ _engine->recoverOrphanedIdent(opCtx, collectionName, collectionIdent, metadata->options);
bool dataModified = status.code() == ErrorCodes::DataModifiedByRepair;
if (!status.isOK() && !dataModified) {
@@ -560,13 +558,13 @@ StatusWith<StorageEngine::ReconcileResult> StorageEngineImpl::reconcileCatalogAn
// Also, remove unfinished builds except those that were background index builds started on a
// secondary.
for (DurableCatalog::Entry entry : catalogEntries) {
- BSONCollectionCatalogEntry::MetaData metaData =
+ std::shared_ptr<BSONCollectionCatalogEntry::MetaData> metaData =
_catalog->getMetaData(opCtx, entry.catalogId);
- NamespaceString coll(metaData.ns);
+ NamespaceString coll(metaData->ns);
// Batch up the indexes to remove them from `metaData` outside of the iterator.
std::vector<std::string> indexesToDrop;
- for (const auto& indexMetaData : metaData.indexes) {
+ for (const auto& indexMetaData : metaData->indexes) {
const std::string& indexName = indexMetaData.name();
std::string indexIdent = _catalog->getIndexIdent(opCtx, entry.catalogId, indexName);
@@ -619,7 +617,7 @@ StatusWith<StorageEngine::ReconcileResult> StorageEngineImpl::reconcileCatalogAn
if (indexMetaData.buildUUID) {
invariant(!indexMetaData.ready);
- auto collUUID = metaData.options.uuid;
+ auto collUUID = metaData->options.uuid;
invariant(collUUID);
auto buildUUID = *indexMetaData.buildUUID;
@@ -676,13 +674,17 @@ StatusWith<StorageEngine::ReconcileResult> StorageEngineImpl::reconcileCatalogAn
}
for (auto&& indexName : indexesToDrop) {
- invariant(metaData.eraseIndex(indexName),
+ invariant(metaData->eraseIndex(indexName),
str::stream()
<< "Index is missing. Collection: " << coll << " Index: " << indexName);
}
if (indexesToDrop.size() > 0) {
WriteUnitOfWork wuow(opCtx);
- _catalog->putMetaData(opCtx, entry.catalogId, metaData);
+ auto collection =
+ CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForMetadataWrite(
+ opCtx, CollectionCatalog::LifetimeMode::kInplace, entry.nss);
+ invariant(collection->getCatalogId() == entry.catalogId);
+ collection->replaceMetadata(opCtx, std::move(metaData));
wuow.commit();
}
}
@@ -802,7 +804,8 @@ Status StorageEngineImpl::_dropCollectionsNoTimestamp(OperationContext* opCtx,
WriteUnitOfWork untimestampedDropWuow(opCtx);
auto collectionCatalog = CollectionCatalog::get(opCtx);
for (auto& uuid : toDrop) {
- auto coll = collectionCatalog->lookupCollectionByUUID(opCtx, uuid);
+ auto coll = collectionCatalog->lookupCollectionByUUIDForMetadataWrite(
+ opCtx, CollectionCatalog::LifetimeMode::kInplace, uuid);
// No need to remove the indexes from the IndexCatalog because eliminating the Collection
// will have the same effect.
@@ -813,12 +816,8 @@ Status StorageEngineImpl::_dropCollectionsNoTimestamp(OperationContext* opCtx,
audit::logDropIndex(opCtx->getClient(), ice->descriptor()->indexName(), coll->ns());
- catalog::removeIndex(opCtx,
- ice->descriptor()->indexName(),
- coll->getCatalogId(),
- coll->uuid(),
- coll->ns(),
- ice->getSharedIdent());
+ catalog::removeIndex(
+ opCtx, ice->descriptor()->indexName(), coll, ice->getSharedIdent());
}
audit::logDropCollection(opCtx->getClient(), coll->ns());
@@ -1247,7 +1246,7 @@ int64_t StorageEngineImpl::sizeOnDiskForDb(OperationContext* opCtx, StringData d
size += collection->getRecordStore()->storageSize(opCtx);
std::vector<std::string> indexNames;
- _catalog->getAllIndexes(opCtx, collection->getCatalogId(), &indexNames);
+ collection->getAllIndexes(&indexNames);
for (size_t i = 0; i < indexNames.size(); i++) {
std::string ident =
diff --git a/src/mongo/db/storage/storage_engine_test_fixture.h b/src/mongo/db/storage/storage_engine_test_fixture.h
index 9ef8d7252c1..57b50a1d32c 100644
--- a/src/mongo/db/storage/storage_engine_test_fixture.h
+++ b/src/mongo/db/storage/storage_engine_test_fixture.h
@@ -32,7 +32,7 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
#include "mongo/db/catalog/collection_catalog.h"
-#include "mongo/db/catalog/collection_mock.h"
+#include "mongo/db/catalog/collection_impl.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
@@ -69,7 +69,12 @@ public:
_storageEngine->getCatalog()->createCollection(opCtx, ns, options, true));
wuow.commit();
}
- std::shared_ptr<Collection> coll = std::make_shared<CollectionMock>(ns, catalogId);
+ std::shared_ptr<Collection> coll = std::make_shared<CollectionImpl>(
+ opCtx,
+ ns,
+ catalogId,
+ _storageEngine->getCatalog()->getMetaData(opCtx, catalogId),
+ std::move(rs));
CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) {
catalog.registerCollection(opCtx, options.uuid.get(), std::move(coll));
});
@@ -159,22 +164,22 @@ public:
}
BSONObj spec = builder.append("name", key).append("v", 2).done();
- CollectionPtr collection =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, collNs);
+ Collection* collection =
+ CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForMetadataWrite(
+ opCtx, CollectionCatalog::LifetimeMode::kInplace, collNs);
auto descriptor = std::make_unique<IndexDescriptor>(IndexNames::findPluginName(spec), spec);
- auto ret = DurableCatalog::get(opCtx)->prepareForIndexBuild(opCtx,
- collection->getCatalogId(),
- descriptor.get(),
- buildUUID,
- isBackgroundSecondaryBuild);
+ auto ret = collection->prepareForIndexBuild(
+ opCtx, descriptor.get(), buildUUID, isBackgroundSecondaryBuild);
return ret;
}
void indexBuildSuccess(OperationContext* opCtx, NamespaceString collNs, std::string key) {
- CollectionPtr collection =
- CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, collNs);
- DurableCatalog::get(opCtx)->indexBuildSuccess(opCtx, collection->getCatalogId(), key);
+ Collection* collection =
+ CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForMetadataWrite(
+ opCtx, CollectionCatalog::LifetimeMode::kInplace, collNs);
+ auto descriptor = collection->getIndexCatalog()->findIndexByName(opCtx, key, true);
+ collection->indexBuildSuccess(opCtx, descriptor->getEntry());
}
Status removeEntry(OperationContext* opCtx, StringData collNs, DurableCatalog* catalog) {
diff --git a/src/mongo/db/storage/storage_util.cpp b/src/mongo/db/storage/storage_util.cpp
index f4d5b8ae8ee..81380e9eae2 100644
--- a/src/mongo/db/storage/storage_util.cpp
+++ b/src/mongo/db/storage/storage_util.cpp
@@ -83,9 +83,7 @@ auto removeEmptyDirectory =
void removeIndex(OperationContext* opCtx,
StringData indexName,
- RecordId collectionCatalogId,
- UUID collectionUUID,
- const NamespaceString& nss,
+ Collection* collection,
std::shared_ptr<Ident> ident) {
auto durableCatalog = DurableCatalog::get(opCtx);
@@ -94,11 +92,11 @@ void removeIndex(OperationContext* opCtx,
// to wait for existing users to finish.
if (!ident) {
ident = std::make_shared<Ident>(
- durableCatalog->getIndexIdent(opCtx, collectionCatalogId, indexName));
+ durableCatalog->getIndexIdent(opCtx, collection->getCatalogId(), indexName));
}
// Run the first phase of drop to remove the catalog entry.
- durableCatalog->removeIndex(opCtx, collectionCatalogId, indexName);
+ collection->removeIndex(opCtx, indexName);
// The OperationContext may not be valid when the RecoveryUnit executes the onCommit handlers.
// Therefore, anything that would normally be fetched from the opCtx must be passed in
@@ -114,8 +112,8 @@ void removeIndex(OperationContext* opCtx,
opCtx->recoveryUnit()->onCommit([svcCtx = opCtx->getServiceContext(),
recoveryUnit,
storageEngine,
- collectionUUID,
- nss,
+ uuid = collection->uuid(),
+ nss = collection->ns(),
indexNameStr = indexName.toString(),
ident](boost::optional<Timestamp> commitTimestamp) {
StorageEngine::DropIdentCallback onDrop = [svcCtx, storageEngine, nss] {
@@ -131,7 +129,7 @@ void removeIndex(OperationContext* opCtx,
"Deferring table drop for index",
"index"_attr = indexNameStr,
logAttrs(nss),
- "uuid"_attr = collectionUUID,
+ "uuid"_attr = uuid,
"ident"_attr = ident->getIdent(),
"commitTimestamp"_attr = commitTimestamp);
storageEngine->addDropPendingIdent(*commitTimestamp, ident, std::move(onDrop));
diff --git a/src/mongo/db/storage/storage_util.h b/src/mongo/db/storage/storage_util.h
index 69351f63c2b..ba5e9aedbef 100644
--- a/src/mongo/db/storage/storage_util.h
+++ b/src/mongo/db/storage/storage_util.h
@@ -34,6 +34,7 @@
namespace mongo {
+class Collection;
class Ident;
class OperationContext;
class NamespaceString;
@@ -54,9 +55,7 @@ namespace catalog {
*/
void removeIndex(OperationContext* opCtx,
StringData indexName,
- RecordId collectionCatalogId,
- UUID collectionUUID,
- const NamespaceString& nss,
+ Collection* collection,
std::shared_ptr<Ident> ident);
/**
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index f851a48e0a5..bebf1c02c7c 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -761,6 +761,7 @@ TransactionParticipant::TxnResources::TxnResources(WithLock wl,
_apiParameters = APIParameters::get(opCtx);
_readConcernArgs = repl::ReadConcernArgs::get(opCtx);
_uncommittedCollections = UncommittedCollections::get(opCtx).releaseResources();
+ _uncommittedMultikey = UncommittedMultikey::get(opCtx).releaseResources();
}
TransactionParticipant::TxnResources::~TxnResources() {
@@ -828,6 +829,10 @@ void TransactionParticipant::TxnResources::release(OperationContext* opCtx) {
UncommittedCollections::get(opCtx).receiveResources(_uncommittedCollections);
_uncommittedCollections = nullptr;
+ // Transfer ownership of UncommittedMultikey
+ UncommittedMultikey::get(opCtx).receiveResources(_uncommittedMultikey);
+ _uncommittedMultikey = nullptr;
+
auto oldState = opCtx->setRecoveryUnit(std::move(_recoveryUnit),
WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork);
invariant(oldState == WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork,
@@ -1158,7 +1163,7 @@ Timestamp TransactionParticipant::Participant::prepareTransaction(
str::stream() << "prepareTransaction failed because one of the transaction "
"operations was done against a temporary collection '"
<< collection->ns() << "'.",
- !collection->isTemporary(opCtx));
+ !collection->isTemporary());
}
boost::optional<OplogSlotReserver> oplogSlotReserver;
diff --git a/src/mongo/db/transaction_participant.h b/src/mongo/db/transaction_participant.h
index 4427ed7d85a..de02c0e3e9c 100644
--- a/src/mongo/db/transaction_participant.h
+++ b/src/mongo/db/transaction_participant.h
@@ -35,6 +35,7 @@
#include "mongo/db/api_parameters.h"
#include "mongo/db/catalog/uncommitted_collections.h"
+#include "mongo/db/catalog/uncommitted_multikey.h"
#include "mongo/db/commands/txn_cmds_gen.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/locker.h"
@@ -232,6 +233,7 @@ public:
repl::ReadConcernArgs _readConcernArgs;
WriteUnitOfWork::RecoveryUnitState _ruState;
std::shared_ptr<UncommittedCollections::UncommittedCollectionsMap> _uncommittedCollections;
+ std::shared_ptr<UncommittedMultikey::MultikeyMap> _uncommittedMultikey;
};
/**
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 05d633f9e47..bf7bca72dc1 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -54,7 +54,6 @@
#include "mongo/db/repl/tenant_migration_access_blocker_registry.h"
#include "mongo/db/service_context.h"
#include "mongo/db/stats/resource_consumption_metrics.h"
-#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/timeseries/bucket_catalog.h"
#include "mongo/db/ttl_collection_cache.h"
#include "mongo/db/ttl_gen.h"
@@ -326,21 +325,18 @@ private:
TTLCollectionCache* ttlCollectionCache,
const CollectionPtr& collection,
std::string indexName) {
- if (!DurableCatalog::get(opCtx)->isIndexPresent(
- opCtx, collection->getCatalogId(), indexName)) {
+ if (!collection->isIndexPresent(indexName)) {
ttlCollectionCache->deregisterTTLInfo(collection->uuid(), indexName);
return;
}
- BSONObj spec =
- DurableCatalog::get(opCtx)->getIndexSpec(opCtx, collection->getCatalogId(), indexName);
+ BSONObj spec = collection->getIndexSpec(indexName);
if (!spec.hasField(IndexDescriptor::kExpireAfterSecondsFieldName)) {
ttlCollectionCache->deregisterTTLInfo(collection->uuid(), indexName);
return;
}
- if (!DurableCatalog::get(opCtx)->isIndexReady(
- opCtx, collection->getCatalogId(), indexName)) {
+ if (!collection->isIndexReady(indexName)) {
return;
}
@@ -451,8 +447,7 @@ private:
void deleteExpiredWithCollscan(OperationContext* opCtx,
TTLCollectionCache* ttlCollectionCache,
const CollectionPtr& collection) {
- auto collOptions =
- DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, collection->getCatalogId());
+ const auto& collOptions = collection->getCollectionOptions();
uassert(5400701,
"collection is not clustered by _id but is described as being TTL",
collOptions.clusteredIndex);