summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Polato <paolo.polato@mongodb.com>2021-05-12 07:30:03 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-06-21 15:14:59 +0000
commitc35c05f6e55a44a15972040f8c552a18a7725dfa (patch)
treed5d3a2da77e326b1df1e6653d7d799310102c9fe
parent1cb38af8b458f14ff47a2c03fc769dcad4a90b76 (diff)
downloadmongo-c35c05f6e55a44a15972040f8c552a18a7725dfa.tar.gz
SERVER-56307 Allow the donor to enter the critical section when the untransferred mods are within a convergence threshold.
(cherry picked from commit 44bdd2c38fd5c80881e15f40e2da6cacf5f35eb4)
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp48
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h3
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp23
-rw-r--r--src/mongo/db/s/sharding_runtime_d_params.idl13
-rw-r--r--src/mongo/db/s/start_chunk_clone_request.h3
5 files changed, 83 insertions, 7 deletions
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index e6815d05889..6b65ecd7a50 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -39,11 +39,13 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/exec/working_set_common.h"
+#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_process.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/migration_source_manager.h"
+#include "mongo/db/s/sharding_runtime_d_params_gen.h"
#include "mongo/db/s/sharding_statistics.h"
#include "mongo/db/s/start_chunk_clone_request.h"
#include "mongo/db/service_context.h"
@@ -796,18 +798,18 @@ MigrationChunkClonerSourceLegacy::_getIndexScanExecutor(
InternalPlanner::IndexScanOptions scanOption) {
// Allow multiKey based on the invariant that shard keys must be single-valued. Therefore, any
// multi-key index prefixed by shard key cannot be multikey over the shard key fields.
- const IndexDescriptor* idx =
+ const IndexDescriptor* shardKeyIdx =
collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx,
_shardKeyPattern.toBSON(),
false); // requireSingleKey
- if (!idx) {
+ if (!shardKeyIdx) {
return {ErrorCodes::IndexNotFound,
str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON()
<< " in storeCurrentLocs for " << _args.getNss().ns()};
}
// Assume both min and max non-empty, append MinKey's to make them fit chosen index
- const KeyPattern kp(idx->keyPattern());
+ const KeyPattern kp(shardKeyIdx->keyPattern());
BSONObj min = Helpers::toKeyFormat(kp.extendRangeBound(_args.getMinKey(), false));
BSONObj max = Helpers::toKeyFormat(kp.extendRangeBound(_args.getMaxKey(), false));
@@ -816,7 +818,7 @@ MigrationChunkClonerSourceLegacy::_getIndexScanExecutor(
// being queued and will migrate in the 'transferMods' stage.
return InternalPlanner::indexScan(opCtx,
&collection,
- idx,
+ shardKeyIdx,
min,
max,
BoundInclusion::kIncludeStartKeyOnly,
@@ -896,6 +898,17 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
const uint64_t collectionAverageObjectSize = collection->averageObjectSize(opCtx);
+ uint64_t averageObjectIdSize = OID::kOIDSize;
+ if (totalRecs > 0) {
+ const auto idIdx = collection->getIndexCatalog()->findIdIndex(opCtx)->getEntry();
+ if (!idIdx) {
+ return {ErrorCodes::IndexNotFound,
+ str::stream() << "can't find index '_id' in storeCurrentLocs for "
+ << _args.getNss().ns()};
+ }
+ averageObjectIdSize = idIdx->accessMethod()->getSpaceUsedBytes(opCtx) / totalRecs;
+ }
+
if (isLargeChunk) {
return {
ErrorCodes::ChunkTooBig,
@@ -909,7 +922,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
stdx::lock_guard<Latch> lk(_mutex);
_averageObjectSizeForCloneLocs = collectionAverageObjectSize + 12;
-
+ _averageObjectIdSize = averageObjectIdSize;
return Status::OK();
}
@@ -1023,6 +1036,31 @@ Status MigrationChunkClonerSourceLegacy::_checkRecipientCloningStatus(OperationC
return Status::OK();
}
+ bool supportsCriticalSectionDuringCatchUp = false;
+ if (auto featureSupportedField =
+ res[StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp]) {
+ if (!featureSupportedField.booleanSafe()) {
+ return {ErrorCodes::Error(563070),
+ str::stream()
+ << "Illegal value for "
+ << StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp};
+ }
+ supportsCriticalSectionDuringCatchUp = true;
+ }
+
+ if (res["state"].String() == "catchup" && supportsCriticalSectionDuringCatchUp) {
+ int64_t estimatedUntransferredModsSize = _deleted.size() * _averageObjectIdSize +
+ _reload.size() * _averageObjectSizeForCloneLocs;
+ auto estimatedUntransferredChunkPercentage =
+ (std::min(_args.getMaxChunkSizeBytes(), estimatedUntransferredModsSize) * 100) /
+ _args.getMaxChunkSizeBytes();
+ if (estimatedUntransferredChunkPercentage < minCatchUpPercentageBeforeBlockingWrites) {
+ // The recipient is sufficiently caught-up with the writes on the donor.
+ // Block writes, so that it can drain everything.
+ return Status::OK();
+ }
+ }
+
if (res["state"].String() == "fail") {
return {ErrorCodes::OperationFailed,
str::stream() << "Data transfer error: " << res["errmsg"].str()};
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index 3f866595cda..f41127afad1 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -357,6 +357,9 @@ private:
// pre-allocation (initial clone).
uint64_t _averageObjectSizeForCloneLocs{0};
+ // The estimated average object _id size during the clone phase.
+ uint64_t _averageObjectIdSize{0};
+
// Represents all of the requested but not yet fulfilled operations to be tracked, with regards
// to the chunk being cloned.
uint64_t _outstandingOperationTrackRequests{0};
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index a6ce98edcb7..877cc863702 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -360,6 +360,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
b.append("min", _min);
b.append("max", _max);
b.append("shardKeyPattern", _shardKeyPattern);
+ b.append(StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp, true);
b.append("state", stateToString(_state));
@@ -547,6 +548,24 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
stdx::unique_lock<Latch> lock(_mutex);
+ const auto convergenceTimeout =
+ Shard::kDefaultConfigCommandTimeout + Shard::kDefaultConfigCommandTimeout / 4;
+
+ // The donor may have started the commit while the recipient is still busy processing
+ // the last batch of mods sent in the catch up phase. Allow some time for synching up.
+ auto deadline = Date_t::now() + convergenceTimeout;
+
+ while (_state == CATCHUP) {
+ if (stdx::cv_status::timeout ==
+ _stateChangedCV.wait_until(lock, deadline.toSystemTimePoint())) {
+ return {ErrorCodes::CommandFailed,
+ str::stream() << "startCommit timed out waiting for the catch up completion. "
+ << "Sender's session is " << sessionId.toString()
+ << ". Current session is "
+ << (_sessionId ? _sessionId->toString() : "none.")};
+ }
+ }
+
if (_state != STEADY) {
return {ErrorCodes::CommandFailed,
str::stream() << "Migration startCommit attempted when not in STEADY state."
@@ -574,8 +593,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
// Assigning a timeout slightly higher than the one used for network requests to the config
// server. Enough time to retry at least once in case of network failures (SERVER-51397).
- auto const deadline = Date_t::now() + Shard::kDefaultConfigCommandTimeout +
- Shard::kDefaultConfigCommandTimeout / 4;
+ deadline = Date_t::now() + convergenceTimeout;
while (_sessionId) {
if (stdx::cv_status::timeout ==
_isActiveCV.wait_until(lock, deadline.toSystemTimePoint())) {
@@ -1174,6 +1192,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
const auto& mods = res.response;
if (mods["size"].number() == 0) {
+ // There are no more pending modifications to be applied. End the catchup phase
break;
}
diff --git a/src/mongo/db/s/sharding_runtime_d_params.idl b/src/mongo/db/s/sharding_runtime_d_params.idl
index 58a7f95de2f..ef964aac54d 100644
--- a/src/mongo/db/s/sharding_runtime_d_params.idl
+++ b/src/mongo/db/s/sharding_runtime_d_params.idl
@@ -84,6 +84,19 @@ server_parameters:
cpp_varname: migrationLockAcquisitionMaxWaitMS
default: 500
+ minCatchUpPercentageBeforeBlockingWrites:
+ description: >-
+ The maximum percentage of untrasferred chunk mods at the end of a catch up iteration
+ that may be deferred to the next phase of the migration protocol
+ (where new writes get blocked).
+ set_at: [startup]
+ cpp_vartype: int
+ cpp_varname: minCatchUpPercentageBeforeBlockingWrites
+ validator:
+ gte: 0
+ lte: 100
+ default: 10
+
orphanCleanupDelaySecs:
description: 'How long to wait before starting cleanup of an emigrated chunk range.'
set_at: [startup, runtime]
diff --git a/src/mongo/db/s/start_chunk_clone_request.h b/src/mongo/db/s/start_chunk_clone_request.h
index c88986887fb..c6ecba1f839 100644
--- a/src/mongo/db/s/start_chunk_clone_request.h
+++ b/src/mongo/db/s/start_chunk_clone_request.h
@@ -49,6 +49,9 @@ class StatusWith;
*/
class StartChunkCloneRequest {
public:
+ static constexpr auto kSupportsCriticalSectionDuringCatchUp =
+ "supportsCriticalSectionDuringCatchUp"_sd;
+
/**
* Parses the input command and produces a request corresponding to its arguments.
*/