summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp28
-rw-r--r--src/mongo/db/namespace_string.cpp6
-rw-r--r--src/mongo/db/namespace_string.h8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp87
-rw-r--r--src/mongo/db/s/config/index_on_config.cpp65
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp9
-rw-r--r--src/mongo/db/s/sharding_util.cpp107
-rw-r--r--src/mongo/db/s/sharding_util.h13
8 files changed, 226 insertions, 97 deletions
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 0a996e29646..7fc9aa12e4c 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -74,6 +74,7 @@
#include "mongo/db/s/resharding/resharding_coordinator_service.h"
#include "mongo/db/s/resharding/resharding_donor_recipient_common.h"
#include "mongo/db/s/sharding_ddl_coordinator_service.h"
+#include "mongo/db/s/sharding_util.h"
#include "mongo/db/s/transaction_coordinator_service.h"
#include "mongo/db/server_feature_flags_gen.h"
#include "mongo/db/server_options.h"
@@ -530,6 +531,11 @@ private:
createChangeStreamPreImagesCollection(opCtx);
}
+ // TODO SERVER-67392: Remove once FCV 7.0 becomes last-lts.
+ if (feature_flags::gGlobalIndexesShardingCatalog.isEnabledOnVersion(requestedVersion)) {
+ uassertStatusOK(sharding_util::createGlobalIndexesIndexes(opCtx));
+ }
+
hangWhileUpgrading.pauseWhileSet(opCtx);
}
@@ -670,6 +676,28 @@ private:
opCtx, DDLCoordinatorTypeEnum::kReshardCollection);
}
+ // TODO SERVER-67392: Remove when 7.0 branches-out.
+ if (requestedVersion == GenericFCV::kLastLTS) {
+ NamespaceString indexCatalogNss;
+ if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
+ indexCatalogNss = NamespaceString::kConfigsvrIndexCatalogNamespace;
+ } else {
+ indexCatalogNss = NamespaceString::kShardsIndexCatalogNamespace;
+ }
+ LOGV2(6280502, "Droping global indexes collection", "nss"_attr = indexCatalogNss);
+ DropReply dropReply;
+ const auto deletionStatus =
+ dropCollection(opCtx,
+ indexCatalogNss,
+ &dropReply,
+ DropCollectionSystemCollectionMode::kAllowSystemCollectionDrops);
+ uassert(deletionStatus.code(),
+ str::stream() << "Failed to drop " << indexCatalogNss
+ << causedBy(deletionStatus.reason()),
+ deletionStatus.isOK() ||
+ deletionStatus.code() == ErrorCodes::NamespaceNotFound);
+ }
+
uassert(ErrorCodes::Error(549181),
"Failing downgrade due to 'failDowngrading' failpoint set",
!failDowngrading.shouldFail());
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 633ab3ce8ce..399bebab90d 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -170,6 +170,12 @@ const NamespaceString NamespaceString::kClusterParametersNamespace(NamespaceStri
const NamespaceString NamespaceString::kConfigsvrShardsNamespace(NamespaceString::kConfigDb,
"shards");
+const NamespaceString NamespaceString::kConfigsvrIndexCatalogNamespace(NamespaceString::kConfigDb,
+ "csrs.indexes");
+
+const NamespaceString NamespaceString::kShardsIndexCatalogNamespace(NamespaceString::kConfigDb,
+ "shard.indexes");
+
NamespaceString NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(StringData ns) {
if (!gMultitenancySupport) {
return NamespaceString(ns, boost::none);
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 91dbcd144b8..efc7edc372c 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -227,9 +227,15 @@ public:
// Namespace used for storing cluster wide parameters.
static const NamespaceString kClusterParametersNamespace;
- // Namespace used for storing the list of shards on the CSRS
+ // Namespace used for storing the list of shards on the CSRS.
static const NamespaceString kConfigsvrShardsNamespace;
+ // Namespace used for storing the index catalog on the CSRS.
+ static const NamespaceString kConfigsvrIndexCatalogNamespace;
+
+ // Namespace used for storing the index catalog on the shards.
+ static const NamespaceString kShardsIndexCatalogNamespace;
+
/**
* Constructs an empty NamespaceString.
*/
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index de903787a74..4714f43c3c9 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -90,6 +90,7 @@
#include "mongo/db/s/shard_local.h"
#include "mongo/db/s/sharding_initialization_mongod.h"
#include "mongo/db/s/sharding_state_recovery.h"
+#include "mongo/db/s/sharding_util.h"
#include "mongo/db/s/transaction_coordinator_service.h"
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
@@ -933,30 +934,43 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
PeriodicShardedIndexConsistencyChecker::get(_service).onStepUp(_service);
TransactionCoordinatorService::get(_service)->onStepUp(opCtx);
- } else if (ShardingState::get(opCtx)->enabled()) {
- Status status = ShardingStateRecovery::recover(opCtx);
- VectorClockMutable::get(opCtx)->recoverDirect(opCtx);
-
- // If the node is shutting down or it lost quorum just as it was becoming primary, don't
- // run the sharding onStepUp machinery. The onStepDown counterpart to these methods is
- // already idempotent, so the machinery will remain in the stepped down state.
- if (ErrorCodes::isShutdownError(status.code()) ||
- ErrorCodes::isNotPrimaryError(status.code())) {
- return;
+ } else if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ if (ShardingState::get(opCtx)->enabled()) {
+ Status status = ShardingStateRecovery::recover(opCtx);
+ VectorClockMutable::get(opCtx)->recoverDirect(opCtx);
+
+ // If the node is shutting down or it lost quorum just as it was becoming primary, don't
+ // run the sharding onStepUp machinery. The onStepDown counterpart to these methods is
+ // already idempotent, so the machinery will remain in the stepped down state.
+ if (ErrorCodes::isShutdownError(status.code()) ||
+ ErrorCodes::isNotPrimaryError(status.code())) {
+ return;
+ }
+ fassert(40107, status);
+
+ CatalogCacheLoader::get(_service).onStepUp();
+ ChunkSplitter::get(_service).onStepUp();
+ PeriodicBalancerConfigRefresher::get(_service).onStepUp(_service);
+ TransactionCoordinatorService::get(_service)->onStepUp(opCtx);
+
+ const auto configsvrConnStr =
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->getConnString();
+ ShardingInitializationMongoD::get(opCtx)->updateShardIdentityConfigString(
+ opCtx, configsvrConnStr);
+
+ // Note, these must be done after the configOpTime is recovered via
+ // ShardingStateRecovery::recover above, because they may trigger filtering metadata
+ // refreshes which should use the recovered configOpTime.
+ migrationutil::resubmitRangeDeletionsOnStepUp(_service);
+ migrationutil::resumeMigrationCoordinationsOnStepUp(opCtx);
+ migrationutil::resumeMigrationRecipientsOnStepUp(opCtx);
+
+ const bool scheduleAsyncRefresh = true;
+ resharding::clearFilteringMetadata(opCtx, scheduleAsyncRefresh);
}
- fassert(40107, status);
-
- const auto configsvrConnStr =
- Grid::get(opCtx)->shardRegistry()->getConfigShard()->getConnString();
- ShardingInitializationMongoD::get(opCtx)->updateShardIdentityConfigString(opCtx,
- configsvrConnStr);
-
- CatalogCacheLoader::get(_service).onStepUp();
- ChunkSplitter::get(_service).onStepUp();
- PeriodicBalancerConfigRefresher::get(_service).onStepUp(_service);
- TransactionCoordinatorService::get(_service)->onStepUp(opCtx);
-
- // Create uuid index on config.rangeDeletions if needed
+ // The code above will only be executed after a stepdown happens, however the code below
+ // needs to be executed also on startup, and the enabled check might fail in shards during
+ // startup. Create uuid index on config.rangeDeletions if needed
auto minKeyFieldName = RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMinKey;
auto maxKeyFieldName = RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMaxKey;
Status indexStatus = createIndexOnConfigCollection(
@@ -980,15 +994,24 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
"shard's first transition to primary"));
}
- // Note, these must be done after the configOpTime is recovered via
- // ShardingStateRecovery::recover above, because they may trigger filtering metadata
- // refreshes which should use the recovered configOpTime.
- migrationutil::resubmitRangeDeletionsOnStepUp(_service);
- migrationutil::resumeMigrationCoordinationsOnStepUp(opCtx);
- migrationutil::resumeMigrationRecipientsOnStepUp(opCtx);
-
- const bool scheduleAsyncRefresh = true;
- resharding::clearFilteringMetadata(opCtx, scheduleAsyncRefresh);
+ // Create indexes in config.shard.indexes if needed.
+ indexStatus = sharding_util::createGlobalIndexesIndexes(opCtx);
+ if (!indexStatus.isOK()) {
+ // If the node is shutting down or it lost quorum just as it was becoming primary,
+ // don't run the sharding onStepUp machinery. The onStepDown counterpart to these
+ // methods is already idempotent, so the machinery will remain in the stepped down
+ // state.
+ if (ErrorCodes::isShutdownError(indexStatus.code()) ||
+ ErrorCodes::isNotPrimaryError(indexStatus.code())) {
+ return;
+ }
+ fassertFailedWithStatus(
+ 6280501,
+ indexStatus.withContext(str::stream()
+ << "Failed to create index on "
+ << NamespaceString::kShardsIndexCatalogNamespace
+ << " on shard's first transition to primary"));
+ }
} else { // unsharded
if (auto validator = LogicalTimeValidator::get(_service)) {
validator->enableKeyGenerator(opCtx, true);
diff --git a/src/mongo/db/s/config/index_on_config.cpp b/src/mongo/db/s/config/index_on_config.cpp
index 7a3560325bd..11daeb3d6cd 100644
--- a/src/mongo/db/s/config/index_on_config.cpp
+++ b/src/mongo/db/s/config/index_on_config.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/index_builds_coordinator.h"
+#include "mongo/db/s/sharding_util.h"
#include "mongo/logv2/log.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
@@ -45,69 +46,7 @@ Status createIndexOnConfigCollection(OperationContext* opCtx,
bool unique) {
invariant(ns.db() == NamespaceString::kConfigDb || ns.db() == NamespaceString::kAdminDb);
- try {
- // TODO SERVER-50983: Create abstraction for creating collection when using
- // AutoGetCollection
- AutoGetCollection autoColl(opCtx, ns, MODE_X);
- const Collection* collection = autoColl.getCollection().get();
- if (!collection) {
- CollectionOptions options;
- options.uuid = UUID::gen();
- writeConflictRetry(opCtx, "createIndexOnConfigCollection", ns.ns(), [&] {
- WriteUnitOfWork wunit(opCtx);
- auto db = autoColl.ensureDbExists(opCtx);
- collection = db->createCollection(opCtx, ns, options);
- invariant(collection,
- str::stream() << "Failed to create collection " << ns.ns()
- << " in config database for indexes: " << keys);
- wunit.commit();
- });
- }
- auto indexCatalog = collection->getIndexCatalog();
- IndexSpec index;
- index.addKeys(keys);
- index.unique(unique);
- index.version(int(IndexDescriptor::kLatestIndexVersion));
- auto removeIndexBuildsToo = false;
- auto indexSpecs = indexCatalog->removeExistingIndexes(
- opCtx,
- CollectionPtr(collection, CollectionPtr::NoYieldTag{}),
- uassertStatusOK(
- collection->addCollationDefaultsToIndexSpecsForCreate(opCtx, {index.toBSON()})),
- removeIndexBuildsToo);
-
- if (indexSpecs.empty()) {
- return Status::OK();
- }
-
- auto fromMigrate = false;
- if (!collection->isEmpty(opCtx)) {
- // We typically create indexes on config/admin collections for sharding while setting up
- // a sharded cluster, so we do not expect to see data in the collection.
- // Therefore, it is ok to log this index build.
- const auto& indexSpec = indexSpecs[0];
- LOGV2(5173300,
- "Creating index on sharding collection with existing data",
- logAttrs(ns),
- "uuid"_attr = collection->uuid(),
- "index"_attr = indexSpec);
- auto indexConstraints = IndexBuildsManager::IndexConstraints::kEnforce;
- IndexBuildsCoordinator::get(opCtx)->createIndex(
- opCtx, collection->uuid(), indexSpec, indexConstraints, fromMigrate);
- } else {
- writeConflictRetry(opCtx, "createIndexOnConfigCollection", ns.ns(), [&] {
- WriteUnitOfWork wunit(opCtx);
- CollectionWriter collWriter(opCtx, collection->uuid());
- IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection(
- opCtx, collWriter, indexSpecs, fromMigrate);
- wunit.commit();
- });
- }
- } catch (const DBException& e) {
- return e.toStatus();
- }
-
- return Status::OK();
+ return sharding_util::createIndexOnCollection(opCtx, ns, keys, unique);
}
} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 9c412a3581c..3739fbaf277 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -62,6 +62,7 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/database_version.h"
#include "mongo/s/grid.h"
+#include "mongo/s/sharding_feature_flags_gen.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/transport/service_entry_point.h"
@@ -474,6 +475,14 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
return result.withContext("couldn't create ns_1_tag_1 index on config db");
}
+ if (feature_flags::gGlobalIndexesShardingCatalog.isEnabled(
+ serverGlobalParams.featureCompatibility)) {
+ result = sharding_util::createGlobalIndexesIndexes(opCtx);
+ if (!result.isOK()) {
+ return result;
+ }
+ }
+
return Status::OK();
}
diff --git a/src/mongo/db/s/sharding_util.cpp b/src/mongo/db/s/sharding_util.cpp
index c082038d714..ea4e1ca7d69 100644
--- a/src/mongo/db/s/sharding_util.cpp
+++ b/src/mongo/db/s/sharding_util.cpp
@@ -32,14 +32,19 @@
#include <fmt/format.h>
+#include "mongo/db/catalog/index_builds_manager.h"
+#include "mongo/db/catalog_raii.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/exception_util.h"
+#include "mongo/db/index_builds_coordinator.h"
#include "mongo/logv2/log.h"
+#include "mongo/s/catalog/type_index_catalog_gen.h"
#include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
+
namespace sharding_util {
using namespace fmt::literals;
@@ -107,5 +112,105 @@ std::vector<AsyncRequestsSender::Response> sendCommandToShards(
return responses;
}
+// TODO SERVER-67593: Investigate if DBDirectClient can be used instead.
+Status createIndexOnCollection(OperationContext* opCtx,
+ const NamespaceString& ns,
+ const BSONObj& keys,
+ bool unique) {
+ try {
+ // TODO SERVER-50983: Create abstraction for creating collection when using
+ // AutoGetCollection
+ AutoGetCollection autoColl(opCtx, ns, MODE_X);
+ const Collection* collection = autoColl.getCollection().get();
+ if (!collection) {
+ CollectionOptions options;
+ options.uuid = UUID::gen();
+ writeConflictRetry(opCtx, "createIndexOnCollection", ns.ns(), [&] {
+ WriteUnitOfWork wunit(opCtx);
+ auto db = autoColl.ensureDbExists(opCtx);
+ collection = db->createCollection(opCtx, ns, options);
+ invariant(collection,
+ str::stream() << "Failed to create collection " << ns.ns()
+ << " for indexes: " << keys);
+ wunit.commit();
+ });
+ }
+ auto indexCatalog = collection->getIndexCatalog();
+ IndexSpec index;
+ index.addKeys(keys);
+ index.unique(unique);
+ index.version(int(IndexDescriptor::kLatestIndexVersion));
+ auto removeIndexBuildsToo = false;
+ auto indexSpecs = indexCatalog->removeExistingIndexes(
+ opCtx,
+ CollectionPtr(collection, CollectionPtr::NoYieldTag{}),
+ uassertStatusOK(
+ collection->addCollationDefaultsToIndexSpecsForCreate(opCtx, {index.toBSON()})),
+ removeIndexBuildsToo);
+
+ if (indexSpecs.empty()) {
+ return Status::OK();
+ }
+
+ auto fromMigrate = false;
+ if (!collection->isEmpty(opCtx)) {
+ // We typically create indexes on config/admin collections for sharding while setting up
+ // a sharded cluster, so we do not expect to see data in the collection.
+ // Therefore, it is ok to log this index build.
+ const auto& indexSpec = indexSpecs[0];
+ LOGV2(5173300,
+ "Creating index on sharding collection with existing data",
+ logAttrs(ns),
+ "uuid"_attr = collection->uuid(),
+ "index"_attr = indexSpec);
+ auto indexConstraints = IndexBuildsManager::IndexConstraints::kEnforce;
+ IndexBuildsCoordinator::get(opCtx)->createIndex(
+ opCtx, collection->uuid(), indexSpec, indexConstraints, fromMigrate);
+ } else {
+ writeConflictRetry(opCtx, "createIndexOnConfigCollection", ns.ns(), [&] {
+ WriteUnitOfWork wunit(opCtx);
+ CollectionWriter collWriter(opCtx, collection->uuid());
+ IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection(
+ opCtx, collWriter, indexSpecs, fromMigrate);
+ wunit.commit();
+ });
+ }
+ } catch (const DBException& e) {
+ return e.toStatus();
+ }
+
+ return Status::OK();
+}
+
+Status createGlobalIndexesIndexes(OperationContext* opCtx) {
+ bool unique = true;
+ if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
+ auto result =
+ createIndexOnCollection(opCtx,
+ NamespaceString::kConfigsvrIndexCatalogNamespace,
+ BSON(IndexCatalogType::kCollectionUUIDFieldName
+ << 1 << IndexCatalogType::kLastModFieldName << 1),
+ !unique);
+ if (!result.isOK()) {
+ return result.withContext(str::stream()
+ << "couldn't create collectionUUID_1_lastmod_1 index on "
+ << NamespaceString::kConfigsvrIndexCatalogNamespace);
+ }
+ } else {
+ auto result =
+ createIndexOnCollection(opCtx,
+ NamespaceString::kShardsIndexCatalogNamespace,
+ BSON(IndexCatalogType::kCollectionUUIDFieldName
+ << 1 << IndexCatalogType::kLastModFieldName << 1),
+ !unique);
+ if (!result.isOK()) {
+ return result.withContext(str::stream()
+ << "couldn't create collectionUUID_1_lastmod_1 index on "
+ << NamespaceString::kShardsIndexCatalogNamespace);
+ }
+ }
+ return Status::OK();
+}
+
} // namespace sharding_util
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_util.h b/src/mongo/db/s/sharding_util.h
index 783c6703138..370f3f8ad75 100644
--- a/src/mongo/db/s/sharding_util.h
+++ b/src/mongo/db/s/sharding_util.h
@@ -61,5 +61,18 @@ std::vector<AsyncRequestsSender::Response> sendCommandToShards(
const std::shared_ptr<executor::TaskExecutor>& executor,
bool throwOnError = true);
+/**
+ * Creates the necessary indexes for the globalIndexes collections.
+ */
+Status createGlobalIndexesIndexes(OperationContext* opCtx);
+
+/**
+ * Helper function to create an index on a collection locally.
+ */
+Status createIndexOnCollection(OperationContext* opCtx,
+ const NamespaceString& ns,
+ const BSONObj& keys,
+ bool unique);
+
} // namespace sharding_util
} // namespace mongo