summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcos Grillo <marcos.grillo@mongodb.com>2022-06-29 11:42:34 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-29 12:43:24 +0000
commit490a66218e20c9829a8a614bfa3eabede35f4de9 (patch)
tree625c79dbe18473e1aa576602aad93d249c00b640
parente888bf2f9e5043162825493aebcf2b39174777ad (diff)
downloadmongo-490a66218e20c9829a8a614bfa3eabede35f4de9.tar.gz
SERVER-62805 Add global indexes local storage
-rw-r--r--jstests/sharding/global_index_sharding_catalog_collection_upgrade_downgrade.js53
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp28
-rw-r--r--src/mongo/db/namespace_string.cpp6
-rw-r--r--src/mongo/db/namespace_string.h8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp87
-rw-r--r--src/mongo/db/s/config/index_on_config.cpp65
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp9
-rw-r--r--src/mongo/db/s/sharding_util.cpp107
-rw-r--r--src/mongo/db/s/sharding_util.h13
-rw-r--r--src/mongo/s/SConscript1
-rw-r--r--src/mongo/s/catalog/type_index_catalog.idl60
11 files changed, 340 insertions, 97 deletions
diff --git a/jstests/sharding/global_index_sharding_catalog_collection_upgrade_downgrade.js b/jstests/sharding/global_index_sharding_catalog_collection_upgrade_downgrade.js
new file mode 100644
index 00000000000..3e6a660f594
--- /dev/null
+++ b/jstests/sharding/global_index_sharding_catalog_collection_upgrade_downgrade.js
@@ -0,0 +1,53 @@
+/**
+ * Tests that the global indexes collections are dropped on FCV downgrade and recreated after
+ * upgrading.
+ *
+ * @tags: [multiversion_incompatible, featureFlagGlobalIndexesShardingCatalog]
+ */
+
+(function() {
+'use strict';
+
+const st = new ShardingTest({shards: 1});
+
+const csrsIndexesCollection = 'csrs.indexes';
+const shardIndexesCollection = 'shard.indexes';
+
+const CSRSIndexes = st.configRS.getPrimary()
+ .getDB('config')
+ .runCommand({listIndexes: csrsIndexesCollection})
+ .cursor.firstBatch;
+assert.eq(2, CSRSIndexes.length);
+
+const shardIndexes = st.rs0.getPrimary()
+ .getDB('config')
+ .runCommand({listIndexes: shardIndexesCollection})
+ .cursor.firstBatch;
+assert.eq(2, shardIndexes.length);
+
+st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV});
+
+assert.commandFailedWithCode(
+ st.configRS.getPrimary().getDB('config').runCommand({listIndexes: csrsIndexesCollection}),
+ ErrorCodes.NamespaceNotFound);
+
+assert.commandFailedWithCode(
+ st.rs0.getPrimary().getDB('config').runCommand({listIndexes: shardIndexesCollection}),
+ ErrorCodes.NamespaceNotFound);
+
+st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV});
+
+const afterUpgradeCSRSIndexes = st.configRS.getPrimary()
+ .getDB('config')
+ .runCommand({listIndexes: csrsIndexesCollection})
+ .cursor.firstBatch;
+assert.eq(2, afterUpgradeCSRSIndexes.length);
+
+const afterUpgradeShardIndexes = st.rs0.getPrimary()
+ .getDB('config')
+ .runCommand({listIndexes: shardIndexesCollection})
+ .cursor.firstBatch;
+assert.eq(2, afterUpgradeShardIndexes.length);
+
+st.stop();
+})();
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 0a996e29646..7fc9aa12e4c 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -74,6 +74,7 @@
#include "mongo/db/s/resharding/resharding_coordinator_service.h"
#include "mongo/db/s/resharding/resharding_donor_recipient_common.h"
#include "mongo/db/s/sharding_ddl_coordinator_service.h"
+#include "mongo/db/s/sharding_util.h"
#include "mongo/db/s/transaction_coordinator_service.h"
#include "mongo/db/server_feature_flags_gen.h"
#include "mongo/db/server_options.h"
@@ -530,6 +531,11 @@ private:
createChangeStreamPreImagesCollection(opCtx);
}
+ // TODO SERVER-67392: Remove once FCV 7.0 becomes last-lts.
+ if (feature_flags::gGlobalIndexesShardingCatalog.isEnabledOnVersion(requestedVersion)) {
+ uassertStatusOK(sharding_util::createGlobalIndexesIndexes(opCtx));
+ }
+
hangWhileUpgrading.pauseWhileSet(opCtx);
}
@@ -670,6 +676,28 @@ private:
opCtx, DDLCoordinatorTypeEnum::kReshardCollection);
}
+ // TODO SERVER-67392: Remove when 7.0 branches-out.
+ if (requestedVersion == GenericFCV::kLastLTS) {
+ NamespaceString indexCatalogNss;
+ if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
+ indexCatalogNss = NamespaceString::kConfigsvrIndexCatalogNamespace;
+ } else {
+ indexCatalogNss = NamespaceString::kShardsIndexCatalogNamespace;
+ }
+ LOGV2(6280502, "Droping global indexes collection", "nss"_attr = indexCatalogNss);
+ DropReply dropReply;
+ const auto deletionStatus =
+ dropCollection(opCtx,
+ indexCatalogNss,
+ &dropReply,
+ DropCollectionSystemCollectionMode::kAllowSystemCollectionDrops);
+ uassert(deletionStatus.code(),
+ str::stream() << "Failed to drop " << indexCatalogNss
+ << causedBy(deletionStatus.reason()),
+ deletionStatus.isOK() ||
+ deletionStatus.code() == ErrorCodes::NamespaceNotFound);
+ }
+
uassert(ErrorCodes::Error(549181),
"Failing downgrade due to 'failDowngrading' failpoint set",
!failDowngrading.shouldFail());
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 633ab3ce8ce..399bebab90d 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -170,6 +170,12 @@ const NamespaceString NamespaceString::kClusterParametersNamespace(NamespaceStri
const NamespaceString NamespaceString::kConfigsvrShardsNamespace(NamespaceString::kConfigDb,
"shards");
+const NamespaceString NamespaceString::kConfigsvrIndexCatalogNamespace(NamespaceString::kConfigDb,
+ "csrs.indexes");
+
+const NamespaceString NamespaceString::kShardsIndexCatalogNamespace(NamespaceString::kConfigDb,
+ "shard.indexes");
+
NamespaceString NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(StringData ns) {
if (!gMultitenancySupport) {
return NamespaceString(ns, boost::none);
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 91dbcd144b8..efc7edc372c 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -227,9 +227,15 @@ public:
// Namespace used for storing cluster wide parameters.
static const NamespaceString kClusterParametersNamespace;
- // Namespace used for storing the list of shards on the CSRS
+ // Namespace used for storing the list of shards on the CSRS.
static const NamespaceString kConfigsvrShardsNamespace;
+ // Namespace used for storing the index catalog on the CSRS.
+ static const NamespaceString kConfigsvrIndexCatalogNamespace;
+
+ // Namespace used for storing the index catalog on the shards.
+ static const NamespaceString kShardsIndexCatalogNamespace;
+
/**
* Constructs an empty NamespaceString.
*/
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index de903787a74..4714f43c3c9 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -90,6 +90,7 @@
#include "mongo/db/s/shard_local.h"
#include "mongo/db/s/sharding_initialization_mongod.h"
#include "mongo/db/s/sharding_state_recovery.h"
+#include "mongo/db/s/sharding_util.h"
#include "mongo/db/s/transaction_coordinator_service.h"
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
@@ -933,30 +934,43 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
PeriodicShardedIndexConsistencyChecker::get(_service).onStepUp(_service);
TransactionCoordinatorService::get(_service)->onStepUp(opCtx);
- } else if (ShardingState::get(opCtx)->enabled()) {
- Status status = ShardingStateRecovery::recover(opCtx);
- VectorClockMutable::get(opCtx)->recoverDirect(opCtx);
-
- // If the node is shutting down or it lost quorum just as it was becoming primary, don't
- // run the sharding onStepUp machinery. The onStepDown counterpart to these methods is
- // already idempotent, so the machinery will remain in the stepped down state.
- if (ErrorCodes::isShutdownError(status.code()) ||
- ErrorCodes::isNotPrimaryError(status.code())) {
- return;
+ } else if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ if (ShardingState::get(opCtx)->enabled()) {
+ Status status = ShardingStateRecovery::recover(opCtx);
+ VectorClockMutable::get(opCtx)->recoverDirect(opCtx);
+
+ // If the node is shutting down or it lost quorum just as it was becoming primary, don't
+ // run the sharding onStepUp machinery. The onStepDown counterpart to these methods is
+ // already idempotent, so the machinery will remain in the stepped down state.
+ if (ErrorCodes::isShutdownError(status.code()) ||
+ ErrorCodes::isNotPrimaryError(status.code())) {
+ return;
+ }
+ fassert(40107, status);
+
+ CatalogCacheLoader::get(_service).onStepUp();
+ ChunkSplitter::get(_service).onStepUp();
+ PeriodicBalancerConfigRefresher::get(_service).onStepUp(_service);
+ TransactionCoordinatorService::get(_service)->onStepUp(opCtx);
+
+ const auto configsvrConnStr =
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->getConnString();
+ ShardingInitializationMongoD::get(opCtx)->updateShardIdentityConfigString(
+ opCtx, configsvrConnStr);
+
+ // Note, these must be done after the configOpTime is recovered via
+ // ShardingStateRecovery::recover above, because they may trigger filtering metadata
+ // refreshes which should use the recovered configOpTime.
+ migrationutil::resubmitRangeDeletionsOnStepUp(_service);
+ migrationutil::resumeMigrationCoordinationsOnStepUp(opCtx);
+ migrationutil::resumeMigrationRecipientsOnStepUp(opCtx);
+
+ const bool scheduleAsyncRefresh = true;
+ resharding::clearFilteringMetadata(opCtx, scheduleAsyncRefresh);
}
- fassert(40107, status);
-
- const auto configsvrConnStr =
- Grid::get(opCtx)->shardRegistry()->getConfigShard()->getConnString();
- ShardingInitializationMongoD::get(opCtx)->updateShardIdentityConfigString(opCtx,
- configsvrConnStr);
-
- CatalogCacheLoader::get(_service).onStepUp();
- ChunkSplitter::get(_service).onStepUp();
- PeriodicBalancerConfigRefresher::get(_service).onStepUp(_service);
- TransactionCoordinatorService::get(_service)->onStepUp(opCtx);
-
- // Create uuid index on config.rangeDeletions if needed
+ // The code above will only be executed after a stepdown happens, however the code below
+ // needs to be executed also on startup, and the enabled check might fail in shards during
+ // startup. Create uuid index on config.rangeDeletions if needed
auto minKeyFieldName = RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMinKey;
auto maxKeyFieldName = RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMaxKey;
Status indexStatus = createIndexOnConfigCollection(
@@ -980,15 +994,24 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
"shard's first transition to primary"));
}
- // Note, these must be done after the configOpTime is recovered via
- // ShardingStateRecovery::recover above, because they may trigger filtering metadata
- // refreshes which should use the recovered configOpTime.
- migrationutil::resubmitRangeDeletionsOnStepUp(_service);
- migrationutil::resumeMigrationCoordinationsOnStepUp(opCtx);
- migrationutil::resumeMigrationRecipientsOnStepUp(opCtx);
-
- const bool scheduleAsyncRefresh = true;
- resharding::clearFilteringMetadata(opCtx, scheduleAsyncRefresh);
+ // Create indexes in config.shard.indexes if needed.
+ indexStatus = sharding_util::createGlobalIndexesIndexes(opCtx);
+ if (!indexStatus.isOK()) {
+ // If the node is shutting down or it lost quorum just as it was becoming primary,
+ // don't run the sharding onStepUp machinery. The onStepDown counterpart to these
+ // methods is already idempotent, so the machinery will remain in the stepped down
+ // state.
+ if (ErrorCodes::isShutdownError(indexStatus.code()) ||
+ ErrorCodes::isNotPrimaryError(indexStatus.code())) {
+ return;
+ }
+ fassertFailedWithStatus(
+ 6280501,
+ indexStatus.withContext(str::stream()
+ << "Failed to create index on "
+ << NamespaceString::kShardsIndexCatalogNamespace
+ << " on shard's first transition to primary"));
+ }
} else { // unsharded
if (auto validator = LogicalTimeValidator::get(_service)) {
validator->enableKeyGenerator(opCtx, true);
diff --git a/src/mongo/db/s/config/index_on_config.cpp b/src/mongo/db/s/config/index_on_config.cpp
index 7a3560325bd..11daeb3d6cd 100644
--- a/src/mongo/db/s/config/index_on_config.cpp
+++ b/src/mongo/db/s/config/index_on_config.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/index_builds_coordinator.h"
+#include "mongo/db/s/sharding_util.h"
#include "mongo/logv2/log.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
@@ -45,69 +46,7 @@ Status createIndexOnConfigCollection(OperationContext* opCtx,
bool unique) {
invariant(ns.db() == NamespaceString::kConfigDb || ns.db() == NamespaceString::kAdminDb);
- try {
- // TODO SERVER-50983: Create abstraction for creating collection when using
- // AutoGetCollection
- AutoGetCollection autoColl(opCtx, ns, MODE_X);
- const Collection* collection = autoColl.getCollection().get();
- if (!collection) {
- CollectionOptions options;
- options.uuid = UUID::gen();
- writeConflictRetry(opCtx, "createIndexOnConfigCollection", ns.ns(), [&] {
- WriteUnitOfWork wunit(opCtx);
- auto db = autoColl.ensureDbExists(opCtx);
- collection = db->createCollection(opCtx, ns, options);
- invariant(collection,
- str::stream() << "Failed to create collection " << ns.ns()
- << " in config database for indexes: " << keys);
- wunit.commit();
- });
- }
- auto indexCatalog = collection->getIndexCatalog();
- IndexSpec index;
- index.addKeys(keys);
- index.unique(unique);
- index.version(int(IndexDescriptor::kLatestIndexVersion));
- auto removeIndexBuildsToo = false;
- auto indexSpecs = indexCatalog->removeExistingIndexes(
- opCtx,
- CollectionPtr(collection, CollectionPtr::NoYieldTag{}),
- uassertStatusOK(
- collection->addCollationDefaultsToIndexSpecsForCreate(opCtx, {index.toBSON()})),
- removeIndexBuildsToo);
-
- if (indexSpecs.empty()) {
- return Status::OK();
- }
-
- auto fromMigrate = false;
- if (!collection->isEmpty(opCtx)) {
- // We typically create indexes on config/admin collections for sharding while setting up
- // a sharded cluster, so we do not expect to see data in the collection.
- // Therefore, it is ok to log this index build.
- const auto& indexSpec = indexSpecs[0];
- LOGV2(5173300,
- "Creating index on sharding collection with existing data",
- logAttrs(ns),
- "uuid"_attr = collection->uuid(),
- "index"_attr = indexSpec);
- auto indexConstraints = IndexBuildsManager::IndexConstraints::kEnforce;
- IndexBuildsCoordinator::get(opCtx)->createIndex(
- opCtx, collection->uuid(), indexSpec, indexConstraints, fromMigrate);
- } else {
- writeConflictRetry(opCtx, "createIndexOnConfigCollection", ns.ns(), [&] {
- WriteUnitOfWork wunit(opCtx);
- CollectionWriter collWriter(opCtx, collection->uuid());
- IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection(
- opCtx, collWriter, indexSpecs, fromMigrate);
- wunit.commit();
- });
- }
- } catch (const DBException& e) {
- return e.toStatus();
- }
-
- return Status::OK();
+ return sharding_util::createIndexOnCollection(opCtx, ns, keys, unique);
}
} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 9c412a3581c..3739fbaf277 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -62,6 +62,7 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/database_version.h"
#include "mongo/s/grid.h"
+#include "mongo/s/sharding_feature_flags_gen.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/transport/service_entry_point.h"
@@ -474,6 +475,14 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
return result.withContext("couldn't create ns_1_tag_1 index on config db");
}
+ if (feature_flags::gGlobalIndexesShardingCatalog.isEnabled(
+ serverGlobalParams.featureCompatibility)) {
+ result = sharding_util::createGlobalIndexesIndexes(opCtx);
+ if (!result.isOK()) {
+ return result;
+ }
+ }
+
return Status::OK();
}
diff --git a/src/mongo/db/s/sharding_util.cpp b/src/mongo/db/s/sharding_util.cpp
index c082038d714..ea4e1ca7d69 100644
--- a/src/mongo/db/s/sharding_util.cpp
+++ b/src/mongo/db/s/sharding_util.cpp
@@ -32,14 +32,19 @@
#include <fmt/format.h>
+#include "mongo/db/catalog/index_builds_manager.h"
+#include "mongo/db/catalog_raii.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/exception_util.h"
+#include "mongo/db/index_builds_coordinator.h"
#include "mongo/logv2/log.h"
+#include "mongo/s/catalog/type_index_catalog_gen.h"
#include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
+
namespace sharding_util {
using namespace fmt::literals;
@@ -107,5 +112,105 @@ std::vector<AsyncRequestsSender::Response> sendCommandToShards(
return responses;
}
+// TODO SERVER-67593: Investigate if DBDirectClient can be used instead.
+Status createIndexOnCollection(OperationContext* opCtx,
+ const NamespaceString& ns,
+ const BSONObj& keys,
+ bool unique) {
+ try {
+ // TODO SERVER-50983: Create abstraction for creating collection when using
+ // AutoGetCollection
+ AutoGetCollection autoColl(opCtx, ns, MODE_X);
+ const Collection* collection = autoColl.getCollection().get();
+ if (!collection) {
+ CollectionOptions options;
+ options.uuid = UUID::gen();
+ writeConflictRetry(opCtx, "createIndexOnCollection", ns.ns(), [&] {
+ WriteUnitOfWork wunit(opCtx);
+ auto db = autoColl.ensureDbExists(opCtx);
+ collection = db->createCollection(opCtx, ns, options);
+ invariant(collection,
+ str::stream() << "Failed to create collection " << ns.ns()
+ << " for indexes: " << keys);
+ wunit.commit();
+ });
+ }
+ auto indexCatalog = collection->getIndexCatalog();
+ IndexSpec index;
+ index.addKeys(keys);
+ index.unique(unique);
+ index.version(int(IndexDescriptor::kLatestIndexVersion));
+ auto removeIndexBuildsToo = false;
+ auto indexSpecs = indexCatalog->removeExistingIndexes(
+ opCtx,
+ CollectionPtr(collection, CollectionPtr::NoYieldTag{}),
+ uassertStatusOK(
+ collection->addCollationDefaultsToIndexSpecsForCreate(opCtx, {index.toBSON()})),
+ removeIndexBuildsToo);
+
+ if (indexSpecs.empty()) {
+ return Status::OK();
+ }
+
+ auto fromMigrate = false;
+ if (!collection->isEmpty(opCtx)) {
+ // We typically create indexes on config/admin collections for sharding while setting up
+ // a sharded cluster, so we do not expect to see data in the collection.
+ // Therefore, it is ok to log this index build.
+ const auto& indexSpec = indexSpecs[0];
+ LOGV2(5173300,
+ "Creating index on sharding collection with existing data",
+ logAttrs(ns),
+ "uuid"_attr = collection->uuid(),
+ "index"_attr = indexSpec);
+ auto indexConstraints = IndexBuildsManager::IndexConstraints::kEnforce;
+ IndexBuildsCoordinator::get(opCtx)->createIndex(
+ opCtx, collection->uuid(), indexSpec, indexConstraints, fromMigrate);
+ } else {
+ writeConflictRetry(opCtx, "createIndexOnConfigCollection", ns.ns(), [&] {
+ WriteUnitOfWork wunit(opCtx);
+ CollectionWriter collWriter(opCtx, collection->uuid());
+ IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection(
+ opCtx, collWriter, indexSpecs, fromMigrate);
+ wunit.commit();
+ });
+ }
+ } catch (const DBException& e) {
+ return e.toStatus();
+ }
+
+ return Status::OK();
+}
+
+Status createGlobalIndexesIndexes(OperationContext* opCtx) {
+ bool unique = true;
+ if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
+ auto result =
+ createIndexOnCollection(opCtx,
+ NamespaceString::kConfigsvrIndexCatalogNamespace,
+ BSON(IndexCatalogType::kCollectionUUIDFieldName
+ << 1 << IndexCatalogType::kLastModFieldName << 1),
+ !unique);
+ if (!result.isOK()) {
+ return result.withContext(str::stream()
+ << "couldn't create collectionUUID_1_lastmod_1 index on "
+ << NamespaceString::kConfigsvrIndexCatalogNamespace);
+ }
+ } else {
+ auto result =
+ createIndexOnCollection(opCtx,
+ NamespaceString::kShardsIndexCatalogNamespace,
+ BSON(IndexCatalogType::kCollectionUUIDFieldName
+ << 1 << IndexCatalogType::kLastModFieldName << 1),
+ !unique);
+ if (!result.isOK()) {
+ return result.withContext(str::stream()
+ << "couldn't create collectionUUID_1_lastmod_1 index on "
+ << NamespaceString::kShardsIndexCatalogNamespace);
+ }
+ }
+ return Status::OK();
+}
+
} // namespace sharding_util
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_util.h b/src/mongo/db/s/sharding_util.h
index 783c6703138..370f3f8ad75 100644
--- a/src/mongo/db/s/sharding_util.h
+++ b/src/mongo/db/s/sharding_util.h
@@ -61,5 +61,18 @@ std::vector<AsyncRequestsSender::Response> sendCommandToShards(
const std::shared_ptr<executor::TaskExecutor>& executor,
bool throwOnError = true);
+/**
+ * Creates the necessary indexes for the globalIndexes collections.
+ */
+Status createGlobalIndexesIndexes(OperationContext* opCtx);
+
+/**
+ * Helper function to create an index on a collection locally.
+ */
+Status createIndexOnCollection(OperationContext* opCtx,
+ const NamespaceString& ns,
+ const BSONObj& keys,
+ bool unique);
+
} // namespace sharding_util
} // namespace mongo
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 80cef8dfc81..5030281ee2d 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -174,6 +174,7 @@ env.Library(
'catalog/type_collection.idl',
'catalog/type_config_version.cpp',
'catalog/type_database.idl',
+ 'catalog/type_index_catalog.idl',
'catalog/type_mongos.cpp',
'catalog/type_shard.cpp',
'catalog/type_tags.cpp',
diff --git a/src/mongo/s/catalog/type_index_catalog.idl b/src/mongo/s/catalog/type_index_catalog.idl
new file mode 100644
index 00000000000..930c5c23548
--- /dev/null
+++ b/src/mongo/s/catalog/type_index_catalog.idl
@@ -0,0 +1,60 @@
+# Copyright(C) 2022 - present MongoDB, Inc.
+#
+# This program is free software : you can redistribute it and / or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program.If not, see
+# < http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library.You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein.If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so.If you do not wish to do so,
+# delete this exception statement from your version.If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+global:
+ cpp_namespace: "mongo"
+
+imports:
+ - "mongo/idl/basic_types.idl"
+
+structs:
+ IndexCatalogType:
+ description: "Represents the layout and contents of documents contained in the
+ config.shard.indexes and config.csrs.indexes collections"
+ strict: false
+ fields:
+ name:
+ type: string
+ description: "Index name"
+ keyPattern:
+ type: object
+ description: "Index field description"
+ options:
+ type: object
+ description: "Index options"
+ lastMod:
+ type: timestamp
+ description: "Index version"
+ collectionUUID:
+ type: uuid
+ description: "UUID of the collection"
+ indexCollectionUUID:
+ type: uuid
+ description: "UUID of the collection which backs this index. If there
+ is not indexCollectionUUID then the index is not global."
+ optional: true \ No newline at end of file