diff options
author | Allison Easton <allison.easton@mongodb.com> | 2021-07-29 12:55:51 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2021-08-03 08:34:20 +0000 |
commit | 64a119b69ce7ae34ec7eac15e52ad1a08beb5b08 (patch) | |
tree | 25a7024099282a290c9229efc59f4b508de685b2 /src/mongo/db/s | |
parent | 29e7de221b3e32c4a85dbff86e922553a5852ae8 (diff) | |
download | mongo-64a119b69ce7ae34ec7eac15e52ad1a08beb5b08.tar.gz |
SERVER-53283 Remove code that patches up sharding metadata on FCV upgrade/downgrade to/from 4.9
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r-- | src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp | 9 | ||||
-rw-r--r-- | src/mongo/db/s/config/sharding_catalog_manager.cpp | 639 | ||||
-rw-r--r-- | src/mongo/db/s/config/sharding_catalog_manager.h | 110 | ||||
-rw-r--r-- | src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp | 12 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_ddl_coordinator_service.cpp | 24 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_ddl_coordinator_service.h | 9 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_util.cpp | 12 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_util.h | 9 |
8 files changed, 6 insertions, 818 deletions
diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp index ce1065ddb0c..67a8843e5e2 100644 --- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp +++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp @@ -32,7 +32,6 @@ #include "mongo/db/audit.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/shard_key_util.h" @@ -60,14 +59,6 @@ public: uassert(ErrorCodes::InvalidOptions, "_configsvrRefineCollectionShardKey must be called with majority writeConcern", opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority); - - // TODO (SERVER-53283): Delete this code when FCV 5.1 becomes the official one - FixedFCVRegion fcvRegion(opCtx); - uassert(ErrorCodes::ConflictingOperationInProgress, - "Cannot refine collection shard key while the node is being upgraded or " - "downgraded", - !fcvRegion->isUpgradingOrDowngrading()); - _internalRun(opCtx); } diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp index f91f3e89e12..1eb6949ae59 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp @@ -61,7 +61,6 @@ #include "mongo/s/grid.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/stdx/unordered_map.h" #include "mongo/transport/service_entry_point.h" #include "mongo/util/log_and_backoff.h" @@ -177,77 +176,7 @@ void abortTransaction(OperationContext* opCtx, TxnNumber txnNumber) { } } -// Updates documents in the config db using DBDirectClient -void updateConfigDocumentDBDirect(OperationContext* opCtx, - const mongo::NamespaceString& nss, - const BSONObj& query, - const BSONObj& update, - bool upsert, - bool multi) { - invariant(nss.db() == "config"); - - DBDirectClient client(opCtx); - - write_ops::UpdateCommandRequest updateOp(nss, [&] { - write_ops::UpdateOpEntry u; - u.setQ(query); - u.setU(write_ops::UpdateModification::parseFromClassicUpdate(update)); - u.setMulti(multi); - u.setUpsert(upsert); - return std::vector{u}; - }()); - updateOp.setWriteCommandRequestBase([] { - write_ops::WriteCommandRequestBase base; - base.setOrdered(false); - return base; - }()); - - auto commandResult = - client.runCommand(OpMsgRequest::fromDBAndBody(nss.db(), updateOp.toBSON({}))); - uassertStatusOK([&] { - BatchedCommandResponse response; - std::string unusedErrmsg; - response.parseBSON( - commandResult->getCommandReply(), - &unusedErrmsg); // Return value intentionally ignored, because response.toStatus() will - // contain any errors in more detail - return response.toStatus(); - }()); - uassertStatusOK(getWriteConcernStatusFromCommandResult(commandResult->getCommandReply())); -} - -Status createNsIndexesForConfigChunks(OperationContext* opCtx) { - const bool unique = true; - auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - - Status result = configShard->createIndexOnConfig( - opCtx, ChunkType::ConfigNS, BSON(ChunkType::ns() << 1 << ChunkType::min() << 1), unique); - if (!result.isOK()) { - return result.withContext("couldn't create ns_1_min_1 index on config db"); - } - - result = configShard->createIndexOnConfig( - opCtx, - ChunkType::ConfigNS, - BSON(ChunkType::ns() << 1 << ChunkType::shard() << 1 << ChunkType::min() << 1), - unique); - if (!result.isOK()) { - return result.withContext("couldn't create ns_1_shard_1_min_1 index on config db"); - } - - result = - configShard->createIndexOnConfig(opCtx, - ChunkType::ConfigNS, - BSON(ChunkType::ns() << 1 << ChunkType::lastmod() << 1), - unique); - if (!result.isOK()) { - return result.withContext("couldn't create ns_1_lastmod_1 index on config db"); - } - - return Status::OK(); -} - -Status createUuidIndexesForConfigChunks(OperationContext* opCtx) { +Status createIndexesForConfigChunks(OperationContext* opCtx) { const bool unique = true; auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); Status result = configShard->createIndexOnConfig( @@ -279,46 +208,6 @@ Status createUuidIndexesForConfigChunks(OperationContext* opCtx) { return Status::OK(); } - -/** - * This step runs on FCV switch between 4.4 <-> 5.0, right after the shards have been told to enter - * Phase 1 and after all the local chunks' format has been changed to contain both UUID and - * Namespace. - * - * If isOnUpgrade is true, it will remove any config.chunks entries, which do not have UUID and if - * it is false, it will remove those which do not have Namespace. - */ -void removeIncompleteChunks(OperationContext* opCtx, bool isOnUpgrade) { - const auto catalogClient = Grid::get(opCtx)->catalogClient(); - - auto query = isOnUpgrade ? BSON(ChunkType::collectionUUID << BSON("$exists" << false)) - : BSON(ChunkType::ns << BSON("$exists" << false)); - uassertStatusOK(catalogClient->removeConfigDocuments( - opCtx, ChunkType::ConfigNS, query, ShardingCatalogClient::kLocalWriteConcern)); -} - -// When building indexes for existing collections during FCV upgrade, use the createIndexes command -// instead of Shard::createIndexesOnConfig, in order to leverage hybrid builds that do not require -// to hold the collection lock in X_MODE for the duration of the build. -// TODO SERVER-53283: Remove once 5.0 has been released. -void createIndexesForFCVUpgradeDowngrade(OperationContext* opCtx, - const std::vector<BSONObj>& keysVector) { - DBDirectClient client(opCtx); - const auto indexSpecs = [&keysVector]() -> std::vector<BSONObj> { - std::vector<BSONObj> indexSpecs; - for (const auto& keys : keysVector) { - IndexSpec spec; - spec.addKeys(keys); - spec.unique(true); - - indexSpecs.emplace_back(spec.toBSON()); - } - return indexSpecs; - }(); - - client.createIndexes(ChunkType::ConfigNS.ns(), indexSpecs); -} - } // namespace void ShardingCatalogManager::create(ServiceContext* serviceContext, @@ -353,8 +242,7 @@ ShardingCatalogManager::ShardingCatalogManager( _executorForAddShard(std::move(addShardExecutor)), _kShardMembershipLock("shardMembershipLock"), _kChunkOpLock("chunkOpLock"), - _kZoneOpLock("zoneOpLock"), - _kDatabaseOpLock("databaseOpLock") { + _kZoneOpLock("zoneOpLock") { startup(); } @@ -473,16 +361,12 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) { const bool unique = true; auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - // (Generic FCV reference): TODO SERVER-53283 Remove the outermost 'if' statement once 5.0 has - // branched out. - if (!serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading()) { - const auto result = createUuidIndexesForConfigChunks(opCtx); - if (result != Status::OK()) { - return result; - } + Status result = createIndexesForConfigChunks(opCtx); + if (result != Status::OK()) { + return result; } - Status result = configShard->createIndexOnConfig( + result = configShard->createIndexOnConfig( opCtx, MigrationType::ConfigNS, BSON(MigrationType::ns() << 1 << MigrationType::min() << 1), @@ -594,517 +478,6 @@ Status ShardingCatalogManager::setFeatureCompatibilityVersionOnShards(OperationC return Status::OK(); } -void ShardingCatalogManager::_removePre50LegacyMetadata(OperationContext* opCtx) { - const auto catalogClient = Grid::get(opCtx)->catalogClient(); - // Delete all documents which have {dropped: true} from config.collections - uassertStatusOK( - catalogClient->removeConfigDocuments(opCtx, - CollectionType::ConfigNS, - BSON("dropped" << true), - ShardingCatalogClient::kLocalWriteConcern)); - - // Clear the {dropped:true} and {distributionMode:sharded} fields from config.collections - updateConfigDocumentDBDirect(opCtx, - CollectionType::ConfigNS, - {} /* query */, - BSON("$unset" << BSON("dropped" - << "") - << "$unset" - << BSON("distributionMode" - << "")), - false /* upsert */, - true /* multi */); -} - -void ShardingCatalogManager::upgradeMetadataFor50Phase1(OperationContext* opCtx) { - LOGV2(5581200, "Starting metadata upgrade to 5.0 (phase 1)"); - - try { - _removePre50LegacyMetadata(opCtx); - } catch (const DBException& e) { - LOGV2(5276708, "Failed to upgrade sharding metadata: {error}", "error"_attr = e.toString()); - throw; - } - - try { - _upgradeDatabasesEntriesTo50(opCtx); - _upgradeCollectionsAndChunksEntriesTo50Phase1(opCtx); - } catch (const DBException& e) { - LOGV2(5581201, - "Failed to upgrade sharding metadata (phase 1): {error}", - "error"_attr = e.toString()); - throw; - } - - LOGV2(5581202, "Successfully upgraded metadata to 5.0 (phase 1)"); -} - -void ShardingCatalogManager::upgradeMetadataFor50Phase2(OperationContext* opCtx) { - LOGV2(5581206, "Starting metadata upgrade to 5.0 (phase 2)"); - - try { - _upgradeCollectionsAndChunksEntriesTo50Phase2(opCtx); - } catch (const DBException& e) { - LOGV2(5581207, - "Failed to upgrade sharding metadata (phase 2): {error}", - "error"_attr = e.toString()); - throw; - } - - LOGV2(5581208, "Successfully upgraded metadata to 5.0 (phase 2)"); -} - -void ShardingCatalogManager::downgradeMetadataToPre50Phase1(OperationContext* opCtx) { - LOGV2(5581203, "Starting metadata downgrade to pre 5.0 (phase 1)"); - - try { - _downgradeCollectionsAndChunksEntriesToPre50Phase1(opCtx); - _downgradeDatabasesEntriesToPre50(opCtx); - } catch (const DBException& e) { - LOGV2(5581204, - "Failed to downgrade sharding metadata (phase 1): {error}", - "error"_attr = e.toString()); - throw; - } - - LOGV2(5581205, "Successfully downgraded metadata to pre 5.0 (phase 1)"); -} - -void ShardingCatalogManager::downgradeMetadataToPre50Phase2(OperationContext* opCtx) { - LOGV2(5581209, "Starting metadata downgrade to pre 5.0 (phase 2)"); - - try { - _downgradeCollectionsAndChunksEntriesToPre50Phase2(opCtx); - } catch (const DBException& e) { - LOGV2(5581210, - "Failed to downgrade sharding metadata (phase 2): {error}", - "error"_attr = e.toString()); - throw; - } - - LOGV2(5581211, "Successfully downgraded metadata to pre 5.0 (phase 2)"); -} - -void ShardingCatalogManager::_upgradeDatabasesEntriesTo50(OperationContext* opCtx) { - LOGV2(5258802, "Starting upgrade of config.databases"); - - auto const catalogCache = Grid::get(opCtx)->catalogCache(); - auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - const auto dbDocs = - uassertStatusOK( - configShard->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kLocalReadConcern, - DatabaseType::ConfigNS, - BSON(DatabaseType::version() + "." + DatabaseVersion::kTimestampFieldName - << BSON("$exists" << false)), - BSONObj(), - boost::none)) - .docs; - - for (const auto& doc : dbDocs) { - const DatabaseType db = uassertStatusOK(DatabaseType::fromBSON(doc)); - const auto name = db.getName(); - - auto now = VectorClock::get(opCtx)->getTime(); - auto clusterTime = now.clusterTime().asTimestamp(); - - Lock::ExclusiveLock lock(opCtx->lockState(), _kDatabaseOpLock); - updateConfigDocumentDBDirect( - opCtx, - DatabaseType::ConfigNS, - BSON(DatabaseType::name << name), - BSON("$set" << BSON(DatabaseType::version() + "." + DatabaseVersion::kTimestampFieldName - << clusterTime)), - false /* upsert */, - false /* multi*/); - } - - // Wait until the last operation is majority-committed - WriteConcernResult ignoreResult; - const auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - uassertStatusOK(waitForWriteConcern( - opCtx, latestOpTime, ShardingCatalogClient::kMajorityWriteConcern, &ignoreResult)); - - // Forcing a refresh of each DB on each shard - const auto allDBDocs = uassertStatusOK(configShard->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kLocalReadConcern, - DatabaseType::ConfigNS, - BSONObj(), - BSONObj(), - boost::none)) - .docs; - const auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); - const auto fixedExecutor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor(); - for (const auto& doc : allDBDocs) { - const DatabaseType db = uassertStatusOK(DatabaseType::fromBSON(doc)); - const auto name = db.getName(); - - catalogCache->invalidateDatabaseEntry_LINEARIZABLE(name); - sharding_util::tellShardsToRefreshDatabase(opCtx, shardIds, name, fixedExecutor); - } - - LOGV2(5258803, "Successfully upgraded config.databases"); -} - -void ShardingCatalogManager::_downgradeDatabasesEntriesToPre50(OperationContext* opCtx) { - LOGV2(5258806, "Starting downgrade of config.databases"); - - { - Lock::ExclusiveLock lock(opCtx->lockState(), _kDatabaseOpLock); - updateConfigDocumentDBDirect( - opCtx, - DatabaseType::ConfigNS, - {} /* query */, - BSON("$unset" << BSON( - DatabaseType::version() + "." + DatabaseVersion::kTimestampFieldName << "")), - false /* upsert */, - true /* multi */); - } - - auto const catalogCache = Grid::get(opCtx)->catalogCache(); - auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - const auto dbDocs = uassertStatusOK(configShard->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kLocalReadConcern, - DatabaseType::ConfigNS, - BSONObj(), - BSONObj(), - boost::none)) - .docs; - - // Wait until the last operation is majority-committed - WriteConcernResult ignoreResult; - const auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - uassertStatusOK(waitForWriteConcern( - opCtx, latestOpTime, ShardingCatalogClient::kMajorityWriteConcern, &ignoreResult)); - - // Forcing a refresh of each DB on each shard - const auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); - const auto fixedExecutor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor(); - for (const auto& doc : dbDocs) { - const DatabaseType db = uassertStatusOK(DatabaseType::fromBSON(doc)); - const auto name = db.getName(); - - catalogCache->invalidateDatabaseEntry_LINEARIZABLE(name); - sharding_util::tellShardsToRefreshDatabase(opCtx, shardIds, name, fixedExecutor); - } - - LOGV2(5258807, "Successfully downgraded config.databases"); -} - -void ShardingCatalogManager::_upgradeCollectionsAndChunksEntriesTo50Phase1( - OperationContext* opCtx) { - LOGV2(5276700, "Starting upgrade of config.collections and config.chunks (phase 1)"); - - auto const catalogCache = Grid::get(opCtx)->catalogCache(); - auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - - auto collectionDocs = - uassertStatusOK( - configShard->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kLocalReadConcern, - CollectionType::ConfigNS, - BSONObj(BSON(CollectionType::kTimestampFieldName << BSON("$exists" << false))), - BSONObj(), - boost::none)) - .docs; - - stdx::unordered_map<NamespaceString, Timestamp> timestampMap; - - // Set timestamp and uuid for all chunks in all collections - for (const auto& doc : collectionDocs) { - const CollectionType coll(doc); - const auto uuid = coll.getUuid(); - const auto nss = coll.getNss(); - - uassert(547900, - str::stream() << "The 'allowMigrations' field of the " << nss - << " collection must be true", - coll.getAllowMigrations()); - - const auto now = VectorClock::get(opCtx)->getTime(); - const auto newTimestamp = now.clusterTime().asTimestamp(); - timestampMap.emplace(nss, newTimestamp); - - // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and - // migrations. - Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock); - - updateConfigDocumentDBDirect( - opCtx, - ChunkType::ConfigNS, - BSON(ChunkType::ns(nss.ns())) /* query */, - BSON("$set" << BSON(ChunkType::timestamp(newTimestamp) - << ChunkType::collectionUUID() << uuid)) /* update */, - false /* upsert */, - true /* multi */); - } - - removeIncompleteChunks(opCtx, true /* isOnUpgrade */); - - // Create uuid_* indexes for config.chunks - createIndexesForFCVUpgradeDowngrade( - opCtx, - {BSON(ChunkType::collectionUUID << 1 << ChunkType::min() << 1), - BSON(ChunkType::collectionUUID << 1 << ChunkType::shard() << 1 << ChunkType::min() << 1), - BSON(ChunkType::collectionUUID << 1 << ChunkType::lastmod() << 1)}); - - // Set timestamp for all collections in config.collections - for (const auto& doc : collectionDocs) { - // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and - // migrations. - Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock); - - const CollectionType coll(doc); - const auto nss = coll.getNss(); - - updateConfigDocumentDBDirect(opCtx, - CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()) /* query */, - BSON("$set" << BSON(CollectionType::kTimestampFieldName - << timestampMap.at(nss))) /* update */, - false /* upsert */, - false /* multi */); - } - - // Wait until the last operation is majority-committed - WriteConcernResult ignoreResult; - const auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - uassertStatusOK(waitForWriteConcern( - opCtx, latestOpTime, ShardingCatalogClient::kMajorityWriteConcern, &ignoreResult)); - - // Forcing a refresh of each collection on each shard - const auto allCollectionDocs = - uassertStatusOK( - configShard->exhaustiveFindOnConfig(opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kLocalReadConcern, - CollectionType::ConfigNS, - BSONObj(), - BSONObj(), - boost::none)) - .docs; - const auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); - const auto fixedExecutor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor(); - for (const auto& doc : allCollectionDocs) { - const CollectionType coll(doc); - const auto nss = coll.getNss(); - catalogCache->invalidateCollectionEntry_LINEARIZABLE(nss); - try { - sharding_util::tellShardsToRefreshCollection(opCtx, shardIds, nss, fixedExecutor); - } catch (const ExceptionFor<ErrorCodes::ConflictingOperationInProgress>& ex) { - // Collection is being dropped by a legacy-path dropCollection - LOGV2(5617300, - "Failed to refresh collection on shards after metadata patch-up", - "nss"_attr = nss, - "exception"_attr = redact(ex)); - } - } - - // Drop ns_* indexes of config.chunks - { - DBDirectClient client(opCtx); - - const bool includeBuildUUIDs = false; - const int options = 0; - auto indexes = client.getIndexSpecs(ChunkType::ConfigNS, includeBuildUUIDs, options); - BSONArrayBuilder indexNamesToDrop; - for (const auto& index : indexes) { - const auto indexName = index.getStringField("name"); - if (indexName == "ns_1_min_1"_sd || indexName == "ns_1_shard_1_min_1"_sd || - indexName == "ns_1_lastmod_1"_sd) { - indexNamesToDrop.append(indexName); - } - } - - BSONObj info; - if (!client.runCommand(ChunkType::ConfigNS.db().toString(), - BSON("dropIndexes" << ChunkType::ConfigNS.coll() << "index" - << indexNamesToDrop.arr()), - info)) - uassertStatusOK(getStatusFromCommandResult(info)); - } - - LOGV2(5276701, "Successfully upgraded config.collections and config.chunks (phase 1)"); -} - -void ShardingCatalogManager::_upgradeCollectionsAndChunksEntriesTo50Phase2( - OperationContext* opCtx) { - LOGV2(5276706, "Starting upgrade of config.chunks (phase 2)"); - - // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and - // migrations. - Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock); - - // Unset ns, epoch and timestamp for all chunks on config.chunks - updateConfigDocumentDBDirect( - opCtx, - ChunkType::ConfigNS, - {} /* query */, - BSON("$unset" << BSON(ChunkType::ns("") << ChunkType::epoch() << "" - << ChunkType::timestamp() << "")) /* update */, - false /* upsert */, - true /* multi */); - - LOGV2(5276707, "Successfully upgraded config.chunks (phase 2)"); -} - -void ShardingCatalogManager::_downgradeCollectionsAndChunksEntriesToPre50Phase1( - OperationContext* opCtx) { - LOGV2(5276702, "Starting downgrade of config.collections and config.chunks (phase 1)"); - - auto const catalogCache = Grid::get(opCtx)->catalogCache(); - auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - - auto collectionDocs = - uassertStatusOK( - configShard->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kLocalReadConcern, - CollectionType::ConfigNS, - BSONObj(BSON(CollectionType::kTimestampFieldName << BSON("$exists" << true))), - BSONObj(), - boost::none)) - .docs; - - // Set ns on all chunks - for (const auto& doc : collectionDocs) { - // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and - // migrations. - Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock); - - const CollectionType coll(doc); - const auto uuid = coll.getUuid(); - const auto nss = coll.getNss(); - - uassert(547901, - str::stream() << "The 'allowMigrations' field of the " << nss - << " collection must be true", - coll.getAllowMigrations()); - - updateConfigDocumentDBDirect( - opCtx, - ChunkType::ConfigNS, - BSON(ChunkType::collectionUUID << uuid) /* query */, - BSON("$set" << BSON(ChunkType::ns(nss.ns()) - << ChunkType::epoch() << coll.getEpoch())) /* update */, - false /* upsert */, - true /* multi */); - } - - removeIncompleteChunks(opCtx, false /* isOnUpgrade */); - - // Create ns_* indexes for config.chunks - createIndexesForFCVUpgradeDowngrade( - opCtx, - {BSON(ChunkType::ns << 1 << ChunkType::min() << 1), - BSON(ChunkType::ns << 1 << ChunkType::shard() << 1 << ChunkType::min() << 1), - BSON(ChunkType::ns << 1 << ChunkType::lastmod() << 1)}); - - // Unset the timestamp field on all collections - { - // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and - // migrations. - Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock); - - updateConfigDocumentDBDirect( - opCtx, - CollectionType::ConfigNS, - {} /* query */, - BSON("$unset" << BSON(CollectionType::kTimestampFieldName << "")) /* update */, - false /* upsert */, - true /* multi */); - } - - // Wait until the last operation is majority-committed - WriteConcernResult ignoreResult; - const auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - uassertStatusOK(waitForWriteConcern( - opCtx, latestOpTime, ShardingCatalogClient::kMajorityWriteConcern, &ignoreResult)); - - // Forcing a refresh of each collection on each shard - const auto allCollectionDocs = - uassertStatusOK( - configShard->exhaustiveFindOnConfig(opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kLocalReadConcern, - CollectionType::ConfigNS, - BSONObj(), - BSONObj(), - boost::none)) - .docs; - const auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); - const auto fixedExecutor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor(); - for (const auto& doc : allCollectionDocs) { - const CollectionType coll(doc); - const auto nss = coll.getNss(); - catalogCache->invalidateCollectionEntry_LINEARIZABLE(nss); - try { - sharding_util::tellShardsToRefreshCollection(opCtx, shardIds, nss, fixedExecutor); - } catch (const ExceptionFor<ErrorCodes::ConflictingOperationInProgress>& ex) { - // Collection is being dropped by a legacy-path dropCollection - LOGV2(5617301, - "Failed to refresh collection on shards after metadata patch-up", - "nss"_attr = nss, - "exception"_attr = redact(ex)); - } - } - - // Drop uuid_* indexes for config.chunks - { - DBDirectClient client(opCtx); - - const bool includeBuildUUIDs = false; - const int options = 0; - auto indexes = client.getIndexSpecs(ChunkType::ConfigNS, includeBuildUUIDs, options); - BSONArrayBuilder indexNamesToDrop; - for (const auto& index : indexes) { - const auto indexName = index.getStringField("name"); - if (indexName == "uuid_1_min_1"_sd || indexName == "uuid_1_shard_1_min_1"_sd || - indexName == "uuid_1_lastmod_1"_sd) { - indexNamesToDrop.append(indexName); - } - } - - BSONObj info; - if (!client.runCommand(ChunkType::ConfigNS.db().toString(), - BSON("dropIndexes" << ChunkType::ConfigNS.coll() << "index" - << indexNamesToDrop.arr()), - info)) - uassertStatusOK(getStatusFromCommandResult(info)); - } - - LOGV2(5276703, "Successfully downgraded config.collections and config.chunks (phase 1)"); -} - -void ShardingCatalogManager::_downgradeCollectionsAndChunksEntriesToPre50Phase2( - OperationContext* opCtx) { - LOGV2(5276709, "Starting downgrade of config.chunks (phase 2)"); - - // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and - // migrations. - Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock); - - // Unset the uuid for all chunks on config.chunks - updateConfigDocumentDBDirect( - opCtx, - ChunkType::ConfigNS, - {} /* query */, - BSON("$unset" << BSON(ChunkType::collectionUUID() << "")) /* update */, - false /* upsert */, - true /* multi */); - - LOGV2(5276710, "Successfully downgraded config.chunks (phase 2)"); -} - StatusWith<bool> ShardingCatalogManager::_isShardRequiredByZoneStillInUse( OperationContext* opCtx, const ReadPreferenceSetting& readPref, diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h index f357d88d8d8..9ff86dee0ca 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.h +++ b/src/mongo/db/s/config/sharding_catalog_manager.h @@ -427,42 +427,6 @@ public: */ Status setFeatureCompatibilityVersionOnShards(OperationContext* opCtx, const BSONObj& cmdObj); - /** - * Patches-up persistent metadata for 5.0. - * - * It shall be called when upgrading to 5.0 or newer versions, when shards are in phase-1 of the - * setFCV protocol. - * TODO SERVER-53283: Remove once 5.0 has been released. - */ - void upgradeMetadataFor50Phase1(OperationContext* opCtx); - - /** - * Patches-up persistent metadata for 5.0. - * - * It shall be called when upgrading to 5.0 or newer versions, when shards are in phase-2 of the - * setFCV protocol. - * TODO SERVER-53283: Remove once 5.0 has been released. - */ - void upgradeMetadataFor50Phase2(OperationContext* opCtx); - - /** - * Patches-up persistent metadata for downgrade from 5.0. - * - * It shall be called when downgrading from 5.0 to an earlier version, when shards are in - * phase-1 of the setFCV protocol. - * TODO SERVER-53283: Remove once 5.0 has been released. - */ - void downgradeMetadataToPre50Phase1(OperationContext* opCtx); - - /** - * Patches-up persistent metadata for downgrade from 5.0. - * - * It shall be called when downgrading from 5.0 to an earlier version, when shards are in - * phase-2 of the setFCV protocol. - * TODO SERVER-53283: Remove once 5.0 has been released. - */ - void downgradeMetadataToPre50Phase2(OperationContext* opCtx); - /* * Rename collection metadata as part of a renameCollection operation. * @@ -598,70 +562,6 @@ private: const std::string& shardName, const std::string& zoneName); - /** - * Removes all entries from the config server's config.collections where 'dropped' is true. - * - * Before 5.0, when a collection was dropped, its entry in config.collections remained, tagged - * as 'dropped: true'. As those are no longer needed, this method cleans up the leftover - * metadata. - * - * It shall be called when upgrading to 5.0 or newer versions. - * - * TODO SERVER-53283: Remove once 5.0 has becomes last-lts. - */ - void _removePre50LegacyMetadata(OperationContext* opCtx); - - /** - * Creates a 'version.timestamp' for each one of the entries in the config server's - * config.databases where it didn't already exist before. - * - * TODO SERVER-53283: Remove once 5.0 becomes last-lts. - */ - void _upgradeDatabasesEntriesTo50(OperationContext* opCtx); - - /** - * Downgrades the config.databases entries to prior 4.9 version. More specifically, it removes - * the 'version.timestamp' field from all the documents in config.databases. - * - * TODO SERVER-53283: Remove once 5.0 becomes last-lts. - */ - void _downgradeDatabasesEntriesToPre50(OperationContext* opCtx); - - /** - * For each one of the entries in config.collections where there is no 'timestamp': - * - Patches-up the entries in config.chunks to set their 'collectionUUID' and 'timestamp' - * fields. - * - Creates a 'timestamp' in its entry in config.collections. - * , and builds the uuid_* indexes and drops the ns_* indexes on config.chunks. - * - * TODO SERVER-53283: Remove once 5.0 becomes last-lts. - */ - void _upgradeCollectionsAndChunksEntriesTo50Phase1(OperationContext* opCtx); - - /** - * Unsets the 'ns' field from all documents in config.chunks - * - * TODO SERVER-53283: Remove once 5.0 becomes last-lts. - */ - void _upgradeCollectionsAndChunksEntriesTo50Phase2(OperationContext* opCtx); - - /** - * For each one of the entries in config.collections where there is a 'timestamp': - * - Patches-up the entries in config.chunks to set their 'ns' field. - * - Unsets the 'timestamp' field from its entry in config.collections. - * , and builds the ns_* indexes and drops the uuid_* indexes on config.chunks. - * - * TODO SERVER-53283: Remove once 5.0 becomes last-lts. - */ - void _downgradeCollectionsAndChunksEntriesToPre50Phase1(OperationContext* opCtx); - - /** - * Unsets the 'collectionUUID' and 'timestamp' fields from all documents in config.chunks - * - * TODO SERVER-53283: Remove once 5.0 becomes last-lts. - */ - void _downgradeCollectionsAndChunksEntriesToPre50Phase2(OperationContext* opCtx); - // The owning service context ServiceContext* const _serviceContext; @@ -714,16 +614,6 @@ private: * taking this. */ Lock::ResourceMutex _kZoneOpLock; - - /** - * Lock for local database operations. This should be acquired when executing - * 'commitMovePrimary' and 'setFeatureCompatibilityVersion' commands which affect the - * config.databases collection. No other locks should be held when locking this. If an operation - * needs to take database locks (for example to write to a local collection) those locks should - * be taken after taking this. - * TODO (SERVER-53283): Remove once version 5.0 has been released. - */ - Lock::ResourceMutex _kDatabaseOpLock; }; } // namespace mongo diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 6f9584d4929..4d204afce63 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -39,7 +39,6 @@ #include "mongo/client/connection_string.h" #include "mongo/client/read_preference.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/logical_session_cache.h" #include "mongo/db/namespace_string.h" @@ -68,8 +67,6 @@ namespace mongo { namespace { -using FeatureCompatibilityParams = ServerGlobalParams::FeatureCompatibility; - MONGO_FAIL_POINT_DEFINE(migrationCommitVersionError); MONGO_FAIL_POINT_DEFINE(migrateCommitInvalidChunkQuery); MONGO_FAIL_POINT_DEFINE(skipExpiringOldChunkHistory); @@ -1043,15 +1040,6 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( const ShardId& toShard, const boost::optional<Timestamp>& validAfter) { - // TODO(SERVER-53283): Remove the logic around fcvRegion to re-enable - // the concurrent execution of moveChunk() and setFCV(). - FixedFCVRegion fcvRegion(opCtx); - uassert(ErrorCodes::ConflictingOperationInProgress, - "Cannot commit a chunk migration request " - "while the cluster is being upgraded or downgraded", - !fcvRegion->isUpgradingOrDowngrading()); - - auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); // Must hold the shard lock until the entire commit finishes to serialize with removeShard. diff --git a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp index bf21a715361..7e239e6d2a9 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp +++ b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp @@ -96,12 +96,6 @@ ShardingDDLCoordinatorService* ShardingDDLCoordinatorService::getService(Operati std::shared_ptr<ShardingDDLCoordinatorService::Instance> ShardingDDLCoordinatorService::constructInstance(BSONObj initialState) { auto coord = constructShardingDDLCoordinatorInstance(this, std::move(initialState)); - - { - stdx::lock_guard lg(_completionMutex); - ++_numActiveCoordinators; - } - coord->getConstructionCompletionFuture() .thenRunOn(getInstanceCleanupExecutor()) .getAsync([this](auto status) { @@ -115,27 +109,9 @@ ShardingDDLCoordinatorService::constructInstance(BSONObj initialState) { _recoveredCV.notify_all(); } }); - - coord->getCompletionFuture() - .thenRunOn(getInstanceCleanupExecutor()) - .getAsync([this](auto status) { - stdx::lock_guard lg(_completionMutex); - if (--_numActiveCoordinators == 0) { - _completedCV.notify_all(); - } - }); - return coord; } -void ShardingDDLCoordinatorService::waitForAllCoordinatorsToComplete( - OperationContext* opCtx) const { - _waitForRecoveryCompletion(opCtx); - stdx::unique_lock lk(_completionMutex); - opCtx->waitForConditionOrInterrupt( - _completedCV, lk, [this]() { return _numActiveCoordinators == 0; }); -} - void ShardingDDLCoordinatorService::_afterStepDown() { stdx::lock_guard lg(_mutex); diff --git a/src/mongo/db/s/sharding_ddl_coordinator_service.h b/src/mongo/db/s/sharding_ddl_coordinator_service.h index 3f0163ae416..564f3821654 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator_service.h +++ b/src/mongo/db/s/sharding_ddl_coordinator_service.h @@ -63,9 +63,6 @@ public: std::shared_ptr<Instance> getOrCreateInstance(OperationContext* opCtx, BSONObj initialState); - // TODO SERVER-53283 remove the following function after 5.0 became last LTS - void waitForAllCoordinatorsToComplete(OperationContext* opCtx) const; - private: ExecutorFuture<void> _rebuildService(std::shared_ptr<executor::ScopedTaskExecutor> executor, const CancellationToken& token) override; @@ -74,12 +71,6 @@ private: void _afterStepDown() override; size_t _countCoordinatorDocs(OperationContext* opCtx); - // TODO SERVER-53283 remove the following 3 variables after 5.0 became last LTS - mutable Mutex _completionMutex = - MONGO_MAKE_LATCH("ShardingDDLCoordinatorService::_completionMutex"); - size_t _numActiveCoordinators{0}; - mutable stdx::condition_variable _completedCV; - mutable Mutex _mutex = MONGO_MAKE_LATCH("ShardingDDLCoordinatorService::_mutex"); // When the node stepDown the state is set to kPaused and all the new DDL operation will be diff --git a/src/mongo/db/s/sharding_util.cpp b/src/mongo/db/s/sharding_util.cpp index 351da27f49c..26c8da56f8c 100644 --- a/src/mongo/db/s/sharding_util.cpp +++ b/src/mongo/db/s/sharding_util.cpp @@ -37,7 +37,6 @@ #include "mongo/db/commands.h" #include "mongo/logv2/log.h" #include "mongo/s/async_requests_sender.h" -#include "mongo/s/request_types/flush_database_cache_updates_gen.h" #include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h" namespace mongo { @@ -56,17 +55,6 @@ void tellShardsToRefreshCollection(OperationContext* opCtx, sendCommandToShards(opCtx, NamespaceString::kAdminDb, cmdObj, shardIds, executor); } -void tellShardsToRefreshDatabase(OperationContext* opCtx, - const std::vector<ShardId>& shardIds, - const std::string& dbName, - const std::shared_ptr<executor::TaskExecutor>& executor) { - auto cmd = _flushDatabaseCacheUpdatesWithWriteConcern(dbName); - cmd.setSyncFromConfig(true); - cmd.setDbName(dbName); - auto cmdObj = CommandHelpers::appendMajorityWriteConcern(cmd.toBSON({})); - sendCommandToShards(opCtx, NamespaceString::kAdminDb, cmdObj, shardIds, executor); -} - void sendCommandToShards(OperationContext* opCtx, StringData dbName, const BSONObj& command, diff --git a/src/mongo/db/s/sharding_util.h b/src/mongo/db/s/sharding_util.h index 10b0c270edb..684568b0214 100644 --- a/src/mongo/db/s/sharding_util.h +++ b/src/mongo/db/s/sharding_util.h @@ -49,15 +49,6 @@ void tellShardsToRefreshCollection(OperationContext* opCtx, const std::shared_ptr<executor::TaskExecutor>& executor); /** - * Sends _flushDatabaseCacheUpdatesWithWriteConcern to a list of shards. Throws if one of the - * shards fails to refresh. - */ -void tellShardsToRefreshDatabase(OperationContext* opCtx, - const std::vector<ShardId>& shardIds, - const std::string& dbName, - const std::shared_ptr<executor::TaskExecutor>& executor); - -/** * Generic utility to send a command to a list of shards. Throws if one of the commands fails. */ void sendCommandToShards(OperationContext* opCtx, |