summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorPaolo Polato <paolo.polato@mongodb.com>2021-06-15 15:13:14 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-06-15 19:50:48 +0000
commit6833d9572ee12cbf9dd2e74a006c38cc35416fde (patch)
treeb168f1be9313ce1d68a19265f1ead3f5096915e1 /src/mongo
parent539377ab3d632ab96e24874410a9b9b42ddaaea1 (diff)
downloadmongo-6833d9572ee12cbf9dd2e74a006c38cc35416fde.tar.gz
Revert "SERVER-56307 Allow the donor to enter the critical section when the untransferred mods are within a convergence threshold."
This reverts commit 3394ccd20ee2a77f3526c481799943c7418bff00.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp53
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h3
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp27
-rw-r--r--src/mongo/db/s/sharding_runtime_d_params.idl11
-rw-r--r--src/mongo/db/s/start_chunk_clone_request.h3
5 files changed, 7 insertions, 90 deletions
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index ab1ba26f7f2..b17f14298d7 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -39,14 +39,12 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_process.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/migration_source_manager.h"
-#include "mongo/db/s/sharding_runtime_d_params_gen.h"
#include "mongo/db/s/sharding_statistics.h"
#include "mongo/db/s/start_chunk_clone_request.h"
#include "mongo/db/service_context.h"
@@ -341,31 +339,6 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
return Status::OK();
}
- bool supportsCriticalSectionDuringCatchUp = false;
- if (auto featureSupportedField =
- res[StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp]) {
- if (!featureSupportedField.booleanSafe()) {
- return {ErrorCodes::Error(5576604),
- str::stream()
- << "Illegal value for "
- << StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp};
- }
- supportsCriticalSectionDuringCatchUp = true;
- }
-
- if (res["state"].String() == "catchup" && supportsCriticalSectionDuringCatchUp) {
- int64_t estimatedUntransferredModsSize = _deleted.size() * _averageObjectIdSize +
- _reload.size() * _averageObjectSizeForCloneLocs;
- auto estimatedUntransferredChunkPercentage =
- (std::min(_args.getMaxChunkSizeBytes(), estimatedUntransferredModsSize) * 100) /
- _args.getMaxChunkSizeBytes();
- if (estimatedUntransferredChunkPercentage < maxCatchUpPercentageBeforeBlockingWrites) {
- // The recipient is sufficiently caught-up with the writes on the donor.
- // Block writes, so that it can drain everything.
- return Status::OK();
- }
- }
-
if (res["state"].String() == "fail") {
return {ErrorCodes::OperationFailed,
str::stream() << "Data transfer error: " << res["errmsg"].str()};
@@ -770,18 +743,18 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
// Allow multiKey based on the invariant that shard keys must be single-valued. Therefore, any
// multi-key index prefixed by shard key cannot be multikey over the shard key fields.
- const IndexDescriptor* shardKeyIdx =
+ const IndexDescriptor* idx =
collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx,
_shardKeyPattern.toBSON(),
false); // requireSingleKey
- if (!shardKeyIdx) {
+ if (!idx) {
return {ErrorCodes::IndexNotFound,
str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON()
<< " in storeCurrentLocs for " << _args.getNss().ns()};
}
// Assume both min and max non-empty, append MinKey's to make them fit chosen index
- const KeyPattern kp(shardKeyIdx->keyPattern());
+ const KeyPattern kp(idx->keyPattern());
BSONObj min = Helpers::toKeyFormat(kp.extendRangeBound(_args.getMinKey(), false));
BSONObj max = Helpers::toKeyFormat(kp.extendRangeBound(_args.getMaxKey(), false));
@@ -790,7 +763,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
// being queued and will migrate in the 'transferMods' stage.
auto exec = InternalPlanner::indexScan(opCtx,
collection,
- shardKeyIdx,
+ idx,
min,
max,
BoundInclusion::kIncludeStartKeyOnly,
@@ -845,20 +818,6 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
const uint64_t collectionAverageObjectSize = collection->averageObjectSize(opCtx);
- uint64_t averageObjectIdSize = 0;
- const uint64_t defaultObjectIdSize = OID::kOIDSize;
- if (totalRecs > 0) {
- const auto indexCatalog = collection->getIndexCatalog();
- const auto idIdx = indexCatalog->findIdIndex(opCtx);
- if (!idIdx) {
- return {ErrorCodes::IndexNotFound,
- str::stream() << "can't find index '_id' in storeCurrentLocs for "
- << _args.getNss().ns()};
- }
- averageObjectIdSize =
- indexCatalog->getEntry(idIdx)->accessMethod()->getSpaceUsedBytes(opCtx) / totalRecs;
- }
-
if (isLargeChunk) {
return {
ErrorCodes::ChunkTooBig,
@@ -871,8 +830,8 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
}
stdx::lock_guard<Latch> lk(_mutex);
- _averageObjectSizeForCloneLocs = collectionAverageObjectSize + defaultObjectIdSize;
- _averageObjectIdSize = std::max(averageObjectIdSize, defaultObjectIdSize);
+ _averageObjectSizeForCloneLocs = collectionAverageObjectSize + 12;
+
return Status::OK();
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index de22918f89e..e5263466c11 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -347,9 +347,6 @@ private:
// Indicates whether new requests to track an operation are accepted.
bool _acceptingNewOperationTrackRequests{true};
- // The estimated average object _id size during the clone phase.
- uint64_t _averageObjectIdSize{0};
-
// List of _id of documents that were modified that must be re-cloned (xfer mods)
std::list<BSONObj> _reload;
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index f8307753044..bff99cf7029 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -70,9 +70,6 @@
#include "mongo/util/str.h"
namespace mongo {
-
-constexpr StringData StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp;
-
namespace {
const auto getMigrationDestinationManager =
@@ -299,7 +296,6 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
b.append("min", _min);
b.append("max", _max);
b.append("shardKeyPattern", _shardKeyPattern);
- b.append(StartChunkCloneRequest::kSupportsCriticalSectionDuringCatchUp, true);
b.append("state", stateToString(_state));
@@ -475,24 +471,6 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
stdx::unique_lock<Latch> lock(_mutex);
- const auto convergenceTimeout =
- Shard::kDefaultConfigCommandTimeout + Shard::kDefaultConfigCommandTimeout / 4;
-
- // The donor may have started the commit while the recipient is still busy processing
- // the last batch of mods sent in the catch up phase. Allow some time for synching up.
- auto deadline = Date_t::now() + convergenceTimeout;
-
- while (_state == CATCHUP) {
- if (stdx::cv_status::timeout ==
- _stateChangedCV.wait_until(lock, deadline.toSystemTimePoint())) {
- return {ErrorCodes::CommandFailed,
- str::stream() << "startCommit timed out waiting for the catch up completion. "
- << "Sender's session is " << sessionId.toString()
- << ". Current session is "
- << (_sessionId ? _sessionId->toString() : "none.")};
- }
- }
-
if (_state != STEADY) {
return {ErrorCodes::CommandFailed,
str::stream() << "Migration startCommit attempted when not in STEADY state."
@@ -518,9 +496,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
_state = COMMIT_START;
_stateChangedCV.notify_all();
- // Assigning a timeout slightly higher than the one used for network requests to the config
- // server. Enough time to retry at least once in case of network failures (SERVER-51397).
- deadline = Date_t::now() + convergenceTimeout;
+ auto const deadline = Date_t::now() + Seconds(30);
while (_sessionId) {
if (stdx::cv_status::timeout ==
_isActiveCV.wait_until(lock, deadline.toSystemTimePoint())) {
@@ -925,7 +901,6 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
const auto& mods = res.response;
if (mods["size"].number() == 0) {
- // There are no more pending modifications to be applied. End the catchup phase
break;
}
diff --git a/src/mongo/db/s/sharding_runtime_d_params.idl b/src/mongo/db/s/sharding_runtime_d_params.idl
index e536fa5dd25..ee943867ba9 100644
--- a/src/mongo/db/s/sharding_runtime_d_params.idl
+++ b/src/mongo/db/s/sharding_runtime_d_params.idl
@@ -52,17 +52,6 @@ server_parameters:
gte: 0
default: 20
- maxCatchUpPercentageBeforeBlockingWrites:
- description: >-
- The maximum percentage of untrasferred chunk mods at the end of a catch up iteration
- that may be deferred to the next phase of the migration protocol
- (where new writes get blocked).
- set_at: [startup]
- cpp_vartype: int
- cpp_varname: maxCatchUpPercentageBeforeBlockingWrites
- validator: { gte: 0, lte: 100 }
- default: 10
-
migrateCloneInsertionBatchSize:
description: >-
The maximum number of documents to insert in a single batch during the cloning step of
diff --git a/src/mongo/db/s/start_chunk_clone_request.h b/src/mongo/db/s/start_chunk_clone_request.h
index 751e1781b3b..40235a8dc0e 100644
--- a/src/mongo/db/s/start_chunk_clone_request.h
+++ b/src/mongo/db/s/start_chunk_clone_request.h
@@ -48,9 +48,6 @@ class StatusWith;
*/
class StartChunkCloneRequest {
public:
- static constexpr auto kSupportsCriticalSectionDuringCatchUp =
- "supportsCriticalSectionDuringCatchUp"_sd;
-
/**
* Parses the input command and produces a request corresponding to its arguments.
*/