summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcos José Grillo Ramirez <marcos.grillo@mongodb.com>2022-03-02 11:18:25 +0100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-03-02 10:47:58 +0000
commitcee57ffcdbf134093e465cb36f50941de25f6330 (patch)
tree0ee3dbbdffa169a7b400cfbe2312d5a044516530
parent2551b634920f2fec7e8d780e7ea99d078b3af3bd (diff)
downloadmongo-cee57ffcdbf134093e465cb36f50941de25f6330.tar.gz
Revert "SERVER-62521 Ensure distributed locks are being released even if a remote stepdown error occurs"
This reverts commit 2551b634920f2fec7e8d780e7ea99d078b3af3bd.
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.cpp26
1 files changed, 8 insertions, 18 deletions
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp
index b184f596053..95bee8bcfc0 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp
@@ -269,22 +269,15 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas
bool isSteppingDown = status.isA<ErrorCategory::NotPrimaryError>() ||
status.isA<ErrorCategory::ShutdownError>();
- // If we are stepping down the token MUST be cancelled. Each implementation of the
- // coordinator must retry remote stepping down errors, unless, we allow finalizing the
- // coordinator in the presence of errors.
- dassert(!isSteppingDown || token.isCanceled() || _completeOnError);
+ // Release the coordinator only in case the node is not stepping down or in case of
+ // acceptable error
+ if (!isSteppingDown || (!status.isOK() && _completeOnError)) {
+ LOGV2(
+ 5565601, "Releasing sharding DDL coordinator", "coordinatorId"_attr = _coordId);
- // Remove the ddl coordinator and release locks if the execution was successfull or if
- // there was any error and we have the _completeOnError flag set or if we are not
- // stepping down.
- auto cleanup = [&]() { return status.isOK() || _completeOnError || !isSteppingDown; };
+ auto session = metadata().getSession();
- if (cleanup()) {
try {
- LOGV2(5565601,
- "Releasing sharding DDL coordinator",
- "coordinatorId"_attr = _coordId);
-
// We need to execute this in another executor to ensure the remove work is
// done.
const auto docWasRemoved = _removeDocumentUntillSuccessOrStepdown(
@@ -298,8 +291,6 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas
Status::OK());
}
- auto session = metadata().getSession();
-
if (status.isOK() && session) {
// Return lsid to the SessionCache. If status is not OK, let the lsid be
// discarded.
@@ -308,7 +299,6 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas
}
} catch (const DBException& ex) {
completionStatus = ex.toStatus();
- // Ensure the only possible error is that we're stepping down.
isSteppingDown = completionStatus.isA<ErrorCategory::NotPrimaryError>() ||
completionStatus.isA<ErrorCategory::ShutdownError>() ||
completionStatus.isA<ErrorCategory::CancellationError>();
@@ -316,7 +306,7 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas
}
}
- if (!cleanup()) {
+ if (isSteppingDown) {
LOGV2(5950000,
"Not releasing distributed locks because the node is stepping down or "
"shutting down",
@@ -325,7 +315,7 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas
}
while (!_scopedLocks.empty()) {
- if (cleanup()) {
+ if (!isSteppingDown) {
// (SERVER-59500) Only release the remote locks in case of no stepdown/shutdown
const auto& resource = _scopedLocks.top().getNs();
DistLockManager::get(opCtx)->unlock(opCtx, resource);