summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2022-05-17 11:12:35 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-17 12:03:40 +0000
commit1a44e197ee6d2e8ebdec97f8fd817619e84aacaa (patch)
treeea53cb3641f99f37ae66cb7a02b38be9dd1cbc6b
parent76fce4412c973c738a02333813dc394191a826f1 (diff)
downloadmongo-1a44e197ee6d2e8ebdec97f8fd817619e84aacaa.tar.gz
SERVER-65930 DDL coordinators and rename participant initial checkpoint may incur in DuplicateKey error
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.cpp19
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.cpp7
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.h17
3 files changed, 33 insertions, 10 deletions
diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp
index e09cb196980..e99c16dff44 100644
--- a/src/mongo/db/s/rename_collection_participant_service.cpp
+++ b/src/mongo/db/s/rename_collection_participant_service.cpp
@@ -201,12 +201,23 @@ void RenameParticipantInstance::_enterPhase(Phase newPhase) {
PersistentTaskStore<StateDoc> store(NamespaceString::kShardingRenameParticipantsNamespace);
if (_doc.getPhase() == Phase::kUnset) {
- store.add(opCtx.get(), newDoc, WriteConcerns::kMajorityWriteConcernShardingTimeout);
+ try {
+ store.add(opCtx.get(), newDoc, WriteConcerns::kMajorityWriteConcernNoTimeout);
+ } catch (const ExceptionFor<ErrorCodes::DuplicateKey>&) {
+ // A series of step-up and step-down events can cause a node to try and insert the
+ // document when it has already been persisted locally, but we must still wait for
+ // majority commit.
+ const auto replCoord = repl::ReplicationCoordinator::get(opCtx.get());
+ const auto lastLocalOpTime = replCoord->getMyLastAppliedOpTime();
+ WaitForMajorityService::get(opCtx->getServiceContext())
+ .waitUntilMajority(lastLocalOpTime, opCtx.get()->getCancellationToken())
+ .get(opCtx.get());
+ }
} else {
store.update(opCtx.get(),
BSON(StateDoc::kFromNssFieldName << fromNss().ns()),
newDoc.toBSON(),
- WriteConcerns::kMajorityWriteConcernShardingTimeout);
+ WriteConcerns::kMajorityWriteConcernNoTimeout);
}
_doc = std::move(newDoc);
@@ -222,7 +233,7 @@ void RenameParticipantInstance::_removeStateDocument(OperationContext* opCtx) {
PersistentTaskStore<StateDoc> store(NamespaceString::kShardingRenameParticipantsNamespace);
store.remove(opCtx,
BSON(StateDoc::kFromNssFieldName << fromNss().ns()),
- WriteConcerns::kMajorityWriteConcernShardingTimeout);
+ WriteConcerns::kMajorityWriteConcernNoTimeout);
_doc = {};
}
@@ -375,7 +386,7 @@ SemiFuture<void> RenameParticipantInstance::_runImpl(
service->releaseRecoverableCriticalSection(
opCtx, fromNss(), reason, ShardingCatalogClient::kLocalWriteConcern);
service->releaseRecoverableCriticalSection(
- opCtx, toNss(), reason, ShardingCatalogClient::kMajorityWriteConcern);
+ opCtx, toNss(), reason, WriteConcerns::kMajorityWriteConcernNoTimeout);
LOGV2(5515107, "CRUD unblocked", "fromNs"_attr = fromNss(), "toNs"_attr = toNss());
}))
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp
index 481a9868aa5..12925bad589 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp
@@ -123,10 +123,9 @@ bool ShardingDDLCoordinator::_removeDocument(OperationContext* opCtx) {
batchedResponse.parseBSON(commandReply, &unusedErrmsg);
WriteConcernResult ignoreResult;
- const WriteConcernOptions majorityWriteConcern{
- WriteConcernOptions::kMajority,
- WriteConcernOptions::SyncMode::UNSET,
- WriteConcernOptions::kWriteConcernTimeoutSharding};
+ const WriteConcernOptions majorityWriteConcern{WriteConcernOptions::kMajority,
+ WriteConcernOptions::SyncMode::UNSET,
+ WriteConcernOptions::kNoTimeout};
auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
uassertStatusOK(waitForWriteConcern(opCtx, latestOpTime, majorityWriteConcern, &ignoreResult));
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.h b/src/mongo/db/s/sharding_ddl_coordinator.h
index eb648ca0e8f..5972c7ce9e6 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.h
+++ b/src/mongo/db/s/sharding_ddl_coordinator.h
@@ -33,6 +33,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/persistent_task_store.h"
+#include "mongo/db/repl/replication_coordinator.h"
+#include "mongo/db/repl/wait_for_majority_service.h"
#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/forwardable_operation_metadata.h"
#include "mongo/db/s/sharding_ddl_coordinator_gen.h"
@@ -123,7 +125,18 @@ protected:
auto opCtx = cc().makeOperationContext();
PersistentTaskStore<StateDoc> store(NamespaceString::kShardingDDLCoordinatorsNamespace);
- store.add(opCtx.get(), newDoc, WriteConcerns::kMajorityWriteConcernShardingTimeout);
+ try {
+ store.add(opCtx.get(), newDoc, WriteConcerns::kMajorityWriteConcernNoTimeout);
+ } catch (const ExceptionFor<ErrorCodes::DuplicateKey>&) {
+ // A series of step-up and step-down events can cause a node to try and insert the
+ // document when it has already been persisted locally, but we must still wait for
+ // majority commit.
+ const auto replCoord = repl::ReplicationCoordinator::get(opCtx.get());
+ const auto lastLocalOpTime = replCoord->getMyLastAppliedOpTime();
+ WaitForMajorityService::get(opCtx->getServiceContext())
+ .waitUntilMajority(lastLocalOpTime, opCtx.get()->getCancellationToken())
+ .get(opCtx.get());
+ }
return std::move(newDoc);
}
@@ -135,7 +148,7 @@ protected:
store.update(opCtx,
BSON(StateDoc::kIdFieldName << newDoc.getId().toBSON()),
newDoc.toBSON(),
- WriteConcerns::kMajorityWriteConcernShardingTimeout);
+ WriteConcerns::kMajorityWriteConcernNoTimeout);
return std::move(newDoc);
}