From 89306fde6167fa12ea6e30d61e05791e8e214e55 Mon Sep 17 00:00:00 2001 From: Paolo Polato Date: Fri, 4 Jun 2021 14:45:40 +0000 Subject: Revert "SERVER-56307 Allow the donor to enter the critical section when the untransferred mods are within a convergence threshold." This reverts commit 40e1693cb180fd17ec6fa0e1b32acc21769c0b85. --- .../db/s/migration_chunk_cloner_source_legacy.cpp | 59 +++------------------- .../db/s/migration_chunk_cloner_source_legacy.h | 3 -- src/mongo/db/s/migration_destination_manager.cpp | 28 +--------- src/mongo/db/s/start_chunk_clone_request.h | 3 -- 4 files changed, 7 insertions(+), 86 deletions(-) diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp index d65d7b6854d..e02a0ee23e3 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp @@ -41,13 +41,11 @@ #include "mongo/db/dbhelpers.h" #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/working_set_common.h" -#include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/query/internal_plans.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/s/start_chunk_clone_request.h" -#include "mongo/db/server_parameters.h" #include "mongo/db/service_context.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" @@ -65,12 +63,6 @@ namespace mongo { namespace { -/** - * The maximum percentage of untrasferred chunk mods at the end of a catch up iteration - * that may be deferred to the next phase of the migration protocol (where new writes get blocked). - */ -MONGO_EXPORT_SERVER_PARAMETER(maxCatchUpPercentageBeforeBlockingWrites, int, 10); - const char kRecvChunkStatus[] = "_recvChunkStatus"; const char kRecvChunkCommit[] = "_recvChunkCommit"; const char kRecvChunkAbort[] = "_recvChunkAbort"; @@ -304,32 +296,6 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate( return Status::OK(); } - bool supportsCriticalSectionDuringCatchUp = false; - if (auto featureSupportedField = - res[StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp]) { - if (!featureSupportedField.booleanSafe()) { - return {ErrorCodes::Error(563070), - str::stream() - << "Illegal value for " - << StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp}; - } - supportsCriticalSectionDuringCatchUp = true; - } - - if (res["state"].String() == "catchup" && supportsCriticalSectionDuringCatchUp) { - int64_t estimatedUntransferredModsSize = _deleted.size() * _averageObjectIdSize + - _reload.size() * _averageObjectSizeForCloneLocs; - auto estimatedUntransferredChunkPercentage = - (std::min(_args.getMaxChunkSizeBytes(), estimatedUntransferredModsSize) * 100) / - _args.getMaxChunkSizeBytes(); - if (estimatedUntransferredChunkPercentage < - maxCatchUpPercentageBeforeBlockingWrites.load()) { - // The recipient is sufficiently caught-up with the writes on the donor. - // Block writes, so that it can drain everything. - return Status::OK(); - } - } - if (res["state"].String() == "fail") { return {ErrorCodes::OperationFailed, str::stream() << "Data transfer error: " << res["errmsg"].str()}; @@ -643,11 +609,11 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC // Allow multiKey based on the invariant that shard keys must be single-valued. Therefore, any // multi-key index prefixed by shard key cannot be multikey over the shard key fields. - IndexDescriptor* const shardKeyIdx = + IndexDescriptor* const idx = collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx, _shardKeyPattern.toBSON(), false); // requireSingleKey - if (!shardKeyIdx) { + if (!idx) { return {ErrorCodes::IndexNotFound, str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON() << " in storeCurrentLocs for " @@ -668,7 +634,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC _deleteNotifyExec = std::move(statusWithDeleteNotificationPlanExecutor.getValue()); // Assume both min and max non-empty, append MinKey's to make them fit chosen index - const KeyPattern kp(shardKeyIdx->keyPattern()); + const KeyPattern kp(idx->keyPattern()); BSONObj min = Helpers::toKeyFormat(kp.extendRangeBound(_args.getMinKey(), false)); BSONObj max = Helpers::toKeyFormat(kp.extendRangeBound(_args.getMaxKey(), false)); @@ -677,7 +643,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC // being queued and will migrate in the 'transferMods' stage. auto exec = InternalPlanner::indexScan(opCtx, collection, - shardKeyIdx, + idx, min, max, BoundInclusion::kIncludeStartKeyOnly, @@ -732,19 +698,6 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC const uint64_t collectionAverageObjectSize = collection->averageObjectSize(opCtx); - uint64_t averageObjectIdSize = 0; - const uint64_t defaultObjectIdSize = OID::kOIDSize; - if (totalRecs > 0) { - const auto indexCatalog = collection->getIndexCatalog(); - const auto idIdx = indexCatalog->findIdIndex(opCtx); - if (!idIdx) { - return {ErrorCodes::IndexNotFound, - str::stream() << "can't find index '_id' in storeCurrentLocs for " - << _args.getNss().ns()}; - } - averageObjectIdSize = indexCatalog->getIndex(idIdx)->getSpaceUsedBytes(opCtx) / totalRecs; - } - if (isLargeChunk) { return { ErrorCodes::ChunkTooBig, @@ -766,8 +719,8 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC } stdx::lock_guard lk(_mutex); - _averageObjectSizeForCloneLocs = collectionAverageObjectSize + defaultObjectIdSize; - _averageObjectIdSize = std::max(averageObjectIdSize, defaultObjectIdSize); + _averageObjectSizeForCloneLocs = collectionAverageObjectSize + 12; + return Status::OK(); } diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h index aa9b5271119..0e8c53feab1 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h @@ -252,9 +252,6 @@ private: // pre-allocation (initial clone). uint64_t _averageObjectSizeForCloneLocs{0}; - // The estimated average object _id size during the clone phase. - uint64_t _averageObjectIdSize{0}; - // List of _id of documents that were modified that must be re-cloned (xfer mods) std::list _reload; diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index c2a19f9cf2b..cf0a076e3f4 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -69,9 +69,6 @@ #include "mongo/util/scopeguard.h" namespace mongo { - -constexpr StringData StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp; - namespace { const auto getMigrationDestinationManager = @@ -298,7 +295,6 @@ void MigrationDestinationManager::report(BSONObjBuilder& b, b.append("min", _min); b.append("max", _max); b.append("shardKeyPattern", _shardKeyPattern); - b.append(StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp, true); b.append("state", stateToString(_state)); @@ -466,25 +462,6 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio stdx::unique_lock lock(_mutex); - const auto convergenceTimeout = - Shard::kDefaultConfigCommandTimeout + Shard::kDefaultConfigCommandTimeout / 4; - - // The donor may have started the commit while the recipient is still busy processing - // the last batch of mods sent in the catch up phase. Allow some time for synching up. - auto deadline = Date_t::now() + convergenceTimeout; - - while (_state == CATCHUP) { - if (stdx::cv_status::timeout == - _stateChangedCV.wait_until(lock, deadline.toSystemTimePoint())) { - return {ErrorCodes::CommandFailed, - str::stream() << "startCommit timed out waiting for the catch up completion. " - << "Sender's session is " - << sessionId.toString() - << ". Current session is " - << (_sessionId ? _sessionId->toString() : "none.")}; - } - } - if (_state != STEADY) { return {ErrorCodes::CommandFailed, str::stream() << "Migration startCommit attempted when not in STEADY state." @@ -512,9 +489,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio _state = COMMIT_START; _stateChangedCV.notify_all(); - // Assigning a timeout slightly higher than the one used for network requests to the config - // server. Enough time to retry at least once in case of network failures (SERVER-51397). - deadline = Date_t::now() + convergenceTimeout; + auto const deadline = Date_t::now() + Seconds(30); while (_sessionId) { if (stdx::cv_status::timeout == _isActiveCV.wait_until(lock, deadline.toSystemTimePoint())) { @@ -938,7 +913,6 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) { const auto& mods = res.response; if (mods["size"].number() == 0) { - // There are no more pending modifications to be applied. End the catchup phase break; } diff --git a/src/mongo/db/s/start_chunk_clone_request.h b/src/mongo/db/s/start_chunk_clone_request.h index 4a3e359372d..c3d63fc807b 100644 --- a/src/mongo/db/s/start_chunk_clone_request.h +++ b/src/mongo/db/s/start_chunk_clone_request.h @@ -49,9 +49,6 @@ class StatusWith; */ class StartChunkCloneRequest { public: - static constexpr auto kSupportsCriticalSectionDuringCatchUp = - "supportsCriticalSectionDuringCatchUp"_sd; - /** * Parses the input command and produces a request corresponding to its arguments. */ -- cgit v1.2.1