diff options
author | Paolo Polato <paolo.polato@mongodb.com> | 2023-05-08 14:32:40 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2023-05-08 15:56:35 +0000 |
commit | cc76b1e01aa0345bd7f06977c7365805e84d8c1e (patch) | |
tree | db81baefdb282bf362fc9067e16bbdaf6cb7aba9 /src/mongo/db/s/config | |
parent | 0ee1538c37cf7004574b9e4a084e7bdc7bf6af96 (diff) | |
download | mongo-cc76b1e01aa0345bd7f06977c7365805e84d8c1e.tar.gz |
SERVER-76045 Introduce the new resetPlacementHistory server command
Diffstat (limited to 'src/mongo/db/s/config')
7 files changed, 351 insertions, 117 deletions
diff --git a/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp b/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp index f6672440f6d..aecfe9d184c 100644 --- a/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp +++ b/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp @@ -30,7 +30,7 @@ #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/s/request_types/get_historical_placement_info_gen.h" +#include "mongo/s/request_types/placement_history_commands_gen.h" #include "mongo/s/sharding_feature_flags_gen.h" diff --git a/src/mongo/db/s/config/configsvr_reset_placement_history_command.cpp b/src/mongo/db/s/config/configsvr_reset_placement_history_command.cpp new file mode 100644 index 00000000000..0ca925fa770 --- /dev/null +++ b/src/mongo/db/s/config/configsvr_reset_placement_history_command.cpp @@ -0,0 +1,105 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/s/grid.h" +#include "mongo/s/request_types/placement_history_commands_gen.h" +#include "mongo/s/sharding_feature_flags_gen.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand + + +namespace mongo { +namespace { + +class ConfigSvrResetPlacementHistoryCommand final + : public TypedCommand<ConfigSvrResetPlacementHistoryCommand> { +public: + using Request = ConfigsvrResetPlacementHistory; + + std::string help() const override { + return "Internal command only invokable on the config server. Do not call directly. " + "Reinitializes the content of config.placementHistory based on a recent snapshot of " + "the Sharding catalog."; + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kNever; + } + + bool adminOnly() const override { + return true; + } + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + void typedRun(OperationContext* opCtx) { + uassert(ErrorCodes::IllegalOperation, + str::stream() << Request::kCommandName + << " can only be run on the config server", + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); + + opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + + uassert(ErrorCodes::TemporarilyUnavailable, + "feature compatibility version 7.0 or later is required to run this command", + feature_flags::gHistoricalPlacementShardingCatalog.isEnabled( + serverGlobalParams.featureCompatibility)); + + ShardingCatalogManager::get(opCtx)->initializePlacementHistory(opCtx); + } + + private: + NamespaceString ns() const override { + return NamespaceString(request().getDbName()); + } + + bool supportsWriteConcern() const override { + return true; + } + + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), + ActionType::internal)); + } + }; +} _cfgsvrResetPlacementHistory; + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/s/config/placement_history_cleaner.cpp b/src/mongo/db/s/config/placement_history_cleaner.cpp index 2d8fded89b4..797de9ef6fd 100644 --- a/src/mongo/db/s/config/placement_history_cleaner.cpp +++ b/src/mongo/db/s/config/placement_history_cleaner.cpp @@ -98,17 +98,15 @@ PlacementHistoryCleaner* PlacementHistoryCleaner::get(OperationContext* opCtx) { } void PlacementHistoryCleaner::runOnce(Client* client, size_t minPlacementHistoryDocs) { - auto opCtx = client->makeOperationContext(); - opCtx.get()->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + auto opCtxHolder = client->makeOperationContext(); + auto opCtx = opCtxHolder.get(); try { // Count the number of entries in the placementHistory collection; skip cleanup if below // threshold. - const auto numPlacementHistoryDocs = [&] { - PersistentTaskStore<NamespacePlacementType> store( - NamespaceString::kConfigsvrPlacementHistoryNamespace); - return store.count(opCtx.get(), BSONObj()); - }(); + PersistentTaskStore<NamespacePlacementType> store( + NamespaceString::kConfigsvrPlacementHistoryNamespace); + const auto numPlacementHistoryDocs = store.count(opCtx, BSONObj()); if (numPlacementHistoryDocs <= minPlacementHistoryDocs) { LOGV2_DEBUG(7068801, 3, "PlacementHistoryCleaner: nothing to be deleted on this round"); @@ -117,7 +115,7 @@ void PlacementHistoryCleaner::runOnce(Client* client, size_t minPlacementHistory // Get the time of the oldest op entry still persisted among the cluster shards; historical // placement entries that precede it may be safely dropped. - auto earliestOplogTime = getEarliestOpLogTimestampAmongAllShards(opCtx.get()); + auto earliestOplogTime = getEarliestOpLogTimestampAmongAllShards(opCtx); if (!earliestOplogTime) { LOGV2(7068802, "Skipping cleanup of config.placementHistory - no earliestOplogTime could " @@ -125,30 +123,73 @@ void PlacementHistoryCleaner::runOnce(Client* client, size_t minPlacementHistory return; } - ShardingCatalogManager::get(opCtx.get()) - ->cleanUpPlacementHistory(opCtx.get(), *earliestOplogTime); + // Check the latest initialization time is not greater than the earliestOpTime. + // The clean-up must always move the new initialization time forward. + const auto match = + BSON(NamespacePlacementType::kNssFieldName + << ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.ns() + << NamespacePlacementType::kTimestampFieldName + << BSON("$gte" << earliestOplogTime->toBSON())); + + if (store.count(opCtx, match) > 0) { + return; + } + + ShardingCatalogManager::get(opCtx)->cleanUpPlacementHistory(opCtx, *earliestOplogTime); } catch (const DBException& e) { LOGV2(7068804, "Periodic cleanup of config.placementHistory failed", "error"_attr = e); } } +void PlacementHistoryCleaner::onStepDown() { + _stop(true /* steppingDown*/); +} + void PlacementHistoryCleaner::onStepUpComplete(OperationContext* opCtx, long long term) { - auto periodicRunner = opCtx->getServiceContext()->getPeriodicRunner(); - invariant(periodicRunner); - - PeriodicRunner::PeriodicJob placementHistoryCleanerJob( - "PlacementHistoryCleanUpJob", - [](Client* client) { runOnce(client, kminPlacementHistoryEntries); }, - kJobExecutionPeriod, - // TODO(SERVER-74658): Please revisit if this periodic job could be made killable. - false /*isKillableByStepdown*/); - - _anchor = periodicRunner->makeJob(std::move(placementHistoryCleanerJob)); - _anchor.start(); + _start(opCtx, true /* steppingUp*/); } -void PlacementHistoryCleaner::onStepDown() { + +void PlacementHistoryCleaner::pause() { + _stop(false /* steppingDown*/); +} + +void PlacementHistoryCleaner::resume(OperationContext* opCtx) { + _start(opCtx, false /* steppingUp*/); +} + +void PlacementHistoryCleaner::_start(OperationContext* opCtx, bool steppingUp) { + stdx::lock_guard<Latch> scopedLock(_mutex); + + if (steppingUp) { + _runningAsPrimary = true; + } + + if (_runningAsPrimary && !_anchor.isValid()) { + auto periodicRunner = opCtx->getServiceContext()->getPeriodicRunner(); + invariant(periodicRunner); + + PeriodicRunner::PeriodicJob placementHistoryCleanerJob( + "PlacementHistoryCleanUpJob", + [](Client* client) { runOnce(client, kminPlacementHistoryEntries); }, + kJobExecutionPeriod, + // TODO(SERVER-74658): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); + + _anchor = periodicRunner->makeJob(std::move(placementHistoryCleanerJob)); + _anchor.start(); + } +} + +void PlacementHistoryCleaner::_stop(bool steppingDown) { + stdx::lock_guard<Latch> scopedLock(_mutex); + + if (steppingDown) { + _runningAsPrimary = false; + } + if (_anchor.isValid()) { _anchor.stop(); } } + } // namespace mongo diff --git a/src/mongo/db/s/config/placement_history_cleaner.h b/src/mongo/db/s/config/placement_history_cleaner.h index 0fec9a72a4e..82e90a1111a 100644 --- a/src/mongo/db/s/config/placement_history_cleaner.h +++ b/src/mongo/db/s/config/placement_history_cleaner.h @@ -30,6 +30,7 @@ #pragma once #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/platform/mutex.h" namespace mongo { @@ -47,12 +48,19 @@ public: static PlacementHistoryCleaner* get(ServiceContext* serviceContext); static PlacementHistoryCleaner* get(OperationContext* opCtx); + void pause(); + void resume(OperationContext* opCtx); + private: PlacementHistoryCleaner(const PlacementHistoryCleaner&) = delete; PlacementHistoryCleaner& operator=(const PlacementHistoryCleaner&) = delete; static void runOnce(Client* opCtx, size_t minPlacementHistoryEntries); + void _start(OperationContext* opCtx, bool steppingUp); + + void _stop(bool steppingDown); + /** * ReplicaSetAwareService entry points. */ @@ -76,6 +84,10 @@ private: return "PlacementHistoryCleaner"; } + Mutex _mutex = MONGO_MAKE_LATCH("PlacementHistoryCleaner::_mutex"); + PeriodicJobAnchor _anchor; + + bool _runningAsPrimary = false; }; } // namespace mongo diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp index efd96236e44..58165c460a1 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp @@ -52,6 +52,7 @@ #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/s/config/index_on_config.h" +#include "mongo/db/s/config/placement_history_cleaner.h" #include "mongo/db/s/sharding_util.h" #include "mongo/db/vector_clock.h" #include "mongo/logv2/log.h" @@ -499,7 +500,7 @@ void setInitializationTimeOnPlacementHistory( std::vector<ShardId> placementResponseForPreInitQueries) { /* * The initialization metadata of config.placementHistory is composed by two special docs, - * identified by kConfigsvrPlacementHistoryFcvMarkerNamespace: + * identified by kConfigPlacementHistoryInitializationMarker: * - initializationTimeInfo: contains the time of the initialization and an empty set of shards. * It will allow ShardingCatalogClient to serve accurate responses to historical placement * queries within the [initializationTime, +inf) range. @@ -509,13 +510,14 @@ void setInitializationTimeOnPlacementHistory( * placement queries within the [-inf, initializationTime) range. */ NamespacePlacementType initializationTimeInfo; - initializationTimeInfo.setNss(NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace); + initializationTimeInfo.setNss( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker); initializationTimeInfo.setTimestamp(initializationTime); initializationTimeInfo.setShards({}); NamespacePlacementType approximatedPlacementForPreInitQueries; approximatedPlacementForPreInitQueries.setNss( - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace); + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker); approximatedPlacementForPreInitQueries.setTimestamp(Timestamp(0, 1)); approximatedPlacementForPreInitQueries.setShards(placementResponseForPreInitQueries); @@ -530,7 +532,7 @@ void setInitializationTimeOnPlacementHistory( write_ops::DeleteOpEntry entryDelMarker; entryDelMarker.setQ( BSON(NamespacePlacementType::kNssFieldName - << NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns())); + << ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.ns())); entryDelMarker.setMulti(true); deleteRequest.setDeletes({entryDelMarker}); @@ -613,7 +615,8 @@ ShardingCatalogManager::ShardingCatalogManager( _localCatalogClient(std::move(localCatalogClient)), _kShardMembershipLock("shardMembershipLock"), _kChunkOpLock("chunkOpLock"), - _kZoneOpLock("zoneOpLock") { + _kZoneOpLock("zoneOpLock"), + _kPlacementHistoryInitializationLock("placementHistoryInitializationOpLock") { startup(); } @@ -1247,6 +1250,42 @@ void ShardingCatalogManager::initializePlacementHistory(OperationContext* opCtx) * - incoming (or not yet materialized) DDLs will insert more recent placement information, * which will have the effect of "updating" the snapshot produced by this function. */ + Lock::ExclusiveLock lk(opCtx, _kPlacementHistoryInitializationLock); + + // Suspend the periodic cleanup job that runs in background. + ScopeGuard restartHistoryCleaner( + [opCtx]() { PlacementHistoryCleaner::get(opCtx)->resume(opCtx); }); + + PlacementHistoryCleaner::get(opCtx)->pause(); + + // Delete any existing document that has been already majority committed. + { + repl::ReadConcernArgs::get(opCtx) = + repl::ReadConcernArgs(repl::ReadConcernLevel::kMajorityReadConcern); + + write_ops::DeleteCommandRequest deleteOp( + NamespaceString::kConfigsvrPlacementHistoryNamespace); + deleteOp.setDeletes({[&] { + write_ops::DeleteOpEntry entry; + entry.setQ({}); + entry.setMulti(true); + return entry; + }()}); + + uassertStatusOK(_localConfigShard->runCommandWithFixedRetryAttempts( + opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + NamespaceString::kConfigsvrPlacementHistoryNamespace.db().toString(), + deleteOp.toBSON(BSON(WriteConcernOptions::kWriteConcernField + << ShardingCatalogClient::kLocalWriteConcern.toBSON())), + Shard::RetryPolicy::kNotIdempotent)); + + const auto& replClient = repl::ReplClientInfo::forClient(opCtx->getClient()); + auto awaitReplicationResult = repl::ReplicationCoordinator::get(opCtx)->awaitReplication( + opCtx, replClient.getLastOp(), ShardingCatalogClient::kMajorityWriteConcern); + } + + // Set the time of the initialization. Timestamp initializationTime; std::vector<ShardId> shardsAtInitializationTime; { @@ -1356,7 +1395,7 @@ void ShardingCatalogManager::cleanUpPlacementHistory(OperationContext* opCtx, * }, * { * $match : { - * _id : { $ne : "kConfigsvrPlacementHistoryFcvMarkerNamespace"} + * _id : { $ne : "kConfigPlacementHistoryInitializationMarker"} * } * } * ]) @@ -1370,9 +1409,9 @@ void ShardingCatalogManager::cleanUpPlacementHistory(OperationContext* opCtx, << "$" + NamespacePlacementType::kNssFieldName << "mostRecentTimestamp" << BSON("$max" << "$" + NamespacePlacementType::kTimestampFieldName))); - pipeline.addStage<DocumentSourceMatch>( - BSON("_id" << BSON( - "$ne" << NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns()))); + pipeline.addStage<DocumentSourceMatch>(BSON( + "_id" << BSON( + "$ne" << ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.ns()))); auto aggRequest = pipeline.buildAsAggregateCommandRequest(); diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h index 3fa5208a6ff..fe30c757109 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.h +++ b/src/mongo/db/s/config/sharding_catalog_manager.h @@ -885,6 +885,12 @@ private: * taking this. */ Lock::ResourceMutex _kZoneOpLock; + + /** + * Lock for serializing internal/external initialization requests of config.placementHistory. + * Regular DDL and chunk operations over the same collection may be run concurrently. + */ + Lock::ResourceMutex _kPlacementHistoryInitializationLock; }; } // namespace mongo diff --git a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp index 13051202030..7b0dbc44d55 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp @@ -53,6 +53,7 @@ #include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/logv2/log.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_config_version.h" @@ -65,6 +66,8 @@ #include "mongo/s/sharding_feature_flags_gen.h" #include "mongo/util/scopeguard.h" +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + namespace mongo { namespace { @@ -390,102 +393,130 @@ TEST_F(ConfigInitializationTest, InizializePlacementHistory) { opObserver.onMajorityCommitPointUpdate(getServiceContext(), majorityCommitPoint); now = VectorClock::get(operationContext())->getTime(); - auto timeAtInitialization = now.configTime().asTimestamp(); + const auto timeAtFirstInvocation = now.configTime().asTimestamp(); // init placement history ShardingCatalogManager::get(operationContext())->initializePlacementHistory(operationContext()); - // Verify the outcome - DBDirectClient dbClient(operationContext()); - - // The expected amount of documents has been generated - ASSERT_EQUALS(dbClient.count(NamespaceString::kConfigsvrPlacementHistoryNamespace, BSONObj()), - 3 /*numDatabases*/ + 3 /*numCollections*/ + 2 /*numMarkers*/); + auto verifyOutcome = [&, + coll1 = coll1, + coll1Chunks = coll1Chunks, + coll2 = coll2, + coll2Chunks = coll2Chunks, + corruptedColl = corruptedColl](const Timestamp& timeAtInitialization) { + DBDirectClient dbClient(operationContext()); + + // The expected amount of documents has been generated + ASSERT_EQUALS( + dbClient.count(NamespaceString::kConfigsvrPlacementHistoryNamespace, BSONObj()), + 3 /*numDatabases*/ + 3 /*numCollections*/ + 2 /*numMarkers*/); + + // Each database is correctly described + for (const auto& [dbName, primaryShard, timeOfCreation] : databaseInfos) { + const NamespacePlacementType expectedEntry( + NamespaceString(dbName), timeOfCreation, {primaryShard}); + const auto generatedEntry = findOneOnConfigCollection<NamespacePlacementType>( + operationContext(), + NamespaceString::kConfigsvrPlacementHistoryNamespace, + BSON("nss" << dbName)); + + assertSamePlacementInfo(expectedEntry, generatedEntry); + } - // Each database is correctly described - for (const auto& [dbName, primaryShard, timeOfCreation] : databaseInfos) { - const NamespacePlacementType expectedEntry( - NamespaceString(dbName), timeOfCreation, {primaryShard}); - const auto generatedEntry = findOneOnConfigCollection<NamespacePlacementType>( + // Each collection is properly described: + const auto getExpectedTimestampForColl = [](const std::vector<ChunkType>& collChunks) { + return std::max_element(collChunks.begin(), + collChunks.end(), + [](const ChunkType& lhs, const ChunkType& rhs) { + return *lhs.getOnCurrentShardSince() < + *rhs.getOnCurrentShardSince(); + }) + ->getOnCurrentShardSince() + .value(); + }; + + // - coll1 + NamespacePlacementType expectedEntryForColl1( + coll1.getNss(), getExpectedTimestampForColl(coll1Chunks), expectedColl1Placement); + expectedEntryForColl1.setUuid(coll1.getUuid()); + const auto generatedEntryForColl1 = findOneOnConfigCollection<NamespacePlacementType>( operationContext(), NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << dbName)); + BSON("nss" << coll1.getNss().ns())); - assertSamePlacementInfo(expectedEntry, generatedEntry); - } + assertSamePlacementInfo(expectedEntryForColl1, generatedEntryForColl1); - // Each collection is properly described: - const auto getExpectedTimestampForColl = [](const std::vector<ChunkType>& collChunks) { - return std::max_element(collChunks.begin(), - collChunks.end(), - [](const ChunkType& lhs, const ChunkType& rhs) { - return *lhs.getOnCurrentShardSince() < - *rhs.getOnCurrentShardSince(); - }) - ->getOnCurrentShardSince() - .value(); - }; - - // - coll1 - NamespacePlacementType expectedEntryForColl1( - coll1.getNss(), getExpectedTimestampForColl(coll1Chunks), expectedColl1Placement); - expectedEntryForColl1.setUuid(coll1.getUuid()); - const auto generatedEntryForColl1 = findOneOnConfigCollection<NamespacePlacementType>( - operationContext(), - NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << coll1.getNss().ns())); - - assertSamePlacementInfo(expectedEntryForColl1, generatedEntryForColl1); - - // - coll2 - NamespacePlacementType expectedEntryForColl2( - coll2.getNss(), getExpectedTimestampForColl(coll2Chunks), expectedColl2Placement); - expectedEntryForColl2.setUuid(coll2.getUuid()); - const auto generatedEntryForColl2 = findOneOnConfigCollection<NamespacePlacementType>( - operationContext(), - NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << coll2.getNss().ns())); - - assertSamePlacementInfo(expectedEntryForColl2, generatedEntryForColl2); - - // - corruptedColl - NamespacePlacementType expectedEntryForCorruptedColl( - corruptedColl.getNss(), timeAtInitialization, expectedCorruptedCollPlacement); - expectedEntryForCorruptedColl.setUuid(corruptedColl.getUuid()); - const auto generatedEntryForCorruptedColl = findOneOnConfigCollection<NamespacePlacementType>( - operationContext(), - NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << corruptedColl.getNss().ns())); - - assertSamePlacementInfo(expectedEntryForCorruptedColl, generatedEntryForCorruptedColl); - - // Check FCV special markers: - // - one entry at begin-of-time with all the currently existing shards (and no UUID set). - const NamespacePlacementType expectedMarkerForDawnOfTime( - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace, - Timestamp(0, 1), - allShardIds); - const auto generatedMarkerForDawnOfTime = findOneOnConfigCollection<NamespacePlacementType>( - operationContext(), - NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns() - << "timestamp" << Timestamp(0, 1))); - - assertSamePlacementInfo(expectedMarkerForDawnOfTime, generatedMarkerForDawnOfTime); - - // - one entry at the time the initialization is performed with an empty set of shards - // (and no UUID set). - const NamespacePlacementType expectedMarkerForInitializationTime( - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace, timeAtInitialization, {}); - const auto generatedMarkerForInitializationTime = - findOneOnConfigCollection<NamespacePlacementType>( + // - coll2 + NamespacePlacementType expectedEntryForColl2( + coll2.getNss(), getExpectedTimestampForColl(coll2Chunks), expectedColl2Placement); + expectedEntryForColl2.setUuid(coll2.getUuid()); + const auto generatedEntryForColl2 = findOneOnConfigCollection<NamespacePlacementType>( + operationContext(), + NamespaceString::kConfigsvrPlacementHistoryNamespace, + BSON("nss" << coll2.getNss().ns())); + + assertSamePlacementInfo(expectedEntryForColl2, generatedEntryForColl2); + + // - corruptedColl + NamespacePlacementType expectedEntryForCorruptedColl( + corruptedColl.getNss(), timeAtInitialization, expectedCorruptedCollPlacement); + expectedEntryForCorruptedColl.setUuid(corruptedColl.getUuid()); + const auto generatedEntryForCorruptedColl = + findOneOnConfigCollection<NamespacePlacementType>( + operationContext(), + NamespaceString::kConfigsvrPlacementHistoryNamespace, + BSON("nss" << corruptedColl.getNss().ns())); + + assertSamePlacementInfo(expectedEntryForCorruptedColl, generatedEntryForCorruptedColl); + + // Check placement initialization markers: + // - one entry at begin-of-time with all the currently existing shards (and no UUID set). + const NamespacePlacementType expectedMarkerForDawnOfTime( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker, + Timestamp(0, 1), + allShardIds); + const auto generatedMarkerForDawnOfTime = findOneOnConfigCollection<NamespacePlacementType>( operationContext(), NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns() - << "timestamp" << timeAtInitialization)); + BSON("nss" << ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.ns() + << "timestamp" << Timestamp(0, 1))); + + assertSamePlacementInfo(expectedMarkerForDawnOfTime, generatedMarkerForDawnOfTime); + + // - one entry at the time the initialization is performed with an empty set of shards + // (and no UUID set). + const NamespacePlacementType expectedMarkerForInitializationTime( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker, + timeAtInitialization, + {}); + const auto generatedMarkerForInitializationTime = + findOneOnConfigCollection<NamespacePlacementType>( + operationContext(), + NamespaceString::kConfigsvrPlacementHistoryNamespace, + BSON( + "nss" << ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.ns() + << "timestamp" << timeAtInitialization)); + + assertSamePlacementInfo(expectedMarkerForInitializationTime, + generatedMarkerForInitializationTime); + }; + + verifyOutcome(timeAtFirstInvocation); + + // Perform a second invocation - the content created by the previous invocation should have been + // fully replaced by a new full representation with updated initialization markers + + now = VectorClock::get(operationContext())->getTime(); + majorityCommitPoint = repl::OpTime(now.clusterTime().asTimestamp(), 1); + opObserver.onMajorityCommitPointUpdate(getServiceContext(), majorityCommitPoint); + + now = VectorClock::get(operationContext())->getTime(); + const auto timeAtSecondInvocation = now.configTime().asTimestamp(); + ASSERT_GT(timeAtSecondInvocation, timeAtFirstInvocation); + + ShardingCatalogManager::get(operationContext())->initializePlacementHistory(operationContext()); - assertSamePlacementInfo(expectedMarkerForInitializationTime, - generatedMarkerForInitializationTime); + verifyOutcome(timeAtSecondInvocation); } } // unnamed namespace |