summaryrefslogtreecommitdiff
path: root/src/mongo/db/s/sharding_ddl_coordinator.cpp
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2021-08-27 12:48:56 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-08-27 13:12:58 +0000
commit979ecf1842bb431badd5ad6b6025ac257742c684 (patch)
treee713b8b40cc5941e47735d4b4eb0ad0f49909366 /src/mongo/db/s/sharding_ddl_coordinator.cpp
parente502f2d3965ac4147d303e956a582b7c4eef8232 (diff)
downloadmongo-979ecf1842bb431badd5ad6b6025ac257742c684.tar.gz
SERVER-59500 DDL coordinators must not release dist locks on stepdown
Diffstat (limited to 'src/mongo/db/s/sharding_ddl_coordinator.cpp')
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.cpp32
1 files changed, 25 insertions, 7 deletions
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp
index 7285f42d37b..87c4fd2cb34 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp
@@ -117,9 +117,13 @@ ExecutorFuture<void> ShardingDDLCoordinator::_acquireLockAsync(
auto distLockManager = DistLockManager::get(opCtx);
const auto coorName = DDLCoordinatorType_serializer(_coordId.getOperationType());
- auto distLock = uassertStatusOK(distLockManager->lock(
+
+ auto distLock = distLockManager->lockDirectLocally(
+ opCtx, resource, DistLockManager::kDefaultLockTimeout);
+ _scopedLocks.emplace(std::move(distLock));
+
+ uassertStatusOK(distLockManager->lockDirect(
opCtx, resource, coorName, DistLockManager::kDefaultLockTimeout));
- _scopedLocks.emplace(distLock.moveToAnotherThread());
})
.until([this](Status status) { return (!_recoveredFromDisk) || status.isOK(); })
.withBackoffBetweenIterations(kExponentialBackoff)
@@ -237,10 +241,12 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas
auto completionStatus = status;
- // Release the coordinator only if we are not stepping down
- if ((!status.isA<ErrorCategory::NotPrimaryError>() &&
- !status.isA<ErrorCategory::ShutdownError>()) ||
- (!status.isOK() && _completeOnError)) {
+ bool isSteppingDown = status.isA<ErrorCategory::NotPrimaryError>() ||
+ status.isA<ErrorCategory::ShutdownError>();
+
+ // Release the coordinator only in case the node is not stepping down or in case of
+ // acceptable error
+ if (!isSteppingDown || (!status.isOK() && _completeOnError)) {
try {
LOGV2(5565601,
"Releasing sharding DDL coordinator",
@@ -271,8 +277,20 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas
}
}
+ if (isSteppingDown) {
+ LOGV2(5950000,
+ "Not releasing distributed locks because the node is stepping down or "
+ "shutting down",
+ "coordinatorId"_attr = _coordId,
+ "status"_attr = status);
+ }
+
while (!_scopedLocks.empty()) {
- _scopedLocks.top().assignNewOpCtx(opCtx);
+ if (!isSteppingDown) {
+ // (SERVER-59500) Only release the remote locks in case of no stepdown/shutdown
+ const auto& resource = _scopedLocks.top().getNs();
+ DistLockManager::get(opCtx)->unlock(opCtx, resource);
+ }
_scopedLocks.pop();
}