summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorTommaso Tocci <tommaso.tocci@mongodb.com>2022-08-10 10:57:07 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-08-10 12:17:20 +0000
commit16f229e305e71b400382cbef94eedce4d412aadd (patch)
tree65e34ed2de93f507181ed379685b001570884152 /src/mongo/db/s
parentb4ce104d4eb4753c7b7e6286e2efce16fb0865be (diff)
downloadmongo-16f229e305e71b400382cbef94eedce4d412aadd.tar.gz
SERVER-68592 Rename DDL lock manager
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/README.md2
-rw-r--r--src/mongo/db/s/SConscript2
-rw-r--r--src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp24
-rw-r--r--src/mongo/db/s/balancer/balancer_dist_locks.cpp10
-rw-r--r--src/mongo/db/s/balancer/balancer_dist_locks.h6
-rw-r--r--src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp8
-rw-r--r--src/mongo/db/s/ddl_lock_manager.cpp (renamed from src/mongo/db/s/dist_lock_manager.cpp)38
-rw-r--r--src/mongo/db/s/ddl_lock_manager.h (renamed from src/mongo/db/s/dist_lock_manager.h)58
-rw-r--r--src/mongo/db/s/drop_database_coordinator.cpp4
-rw-r--r--src/mongo/db/s/refine_collection_shard_key_coordinator.cpp1
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.cpp6
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.h4
-rw-r--r--src/mongo/db/s/sharding_mongod_test_fixture.h1
-rw-r--r--src/mongo/db/s/shardsvr_drop_indexes_command.cpp10
15 files changed, 84 insertions, 100 deletions
diff --git a/src/mongo/db/s/README.md b/src/mongo/db/s/README.md
index 3cf28692403..19ed6cdf624 100644
--- a/src/mongo/db/s/README.md
+++ b/src/mongo/db/s/README.md
@@ -514,7 +514,7 @@ collections, such as config.shards, config.chunks, and config.tags. For example,
mergeChunks, and moveChunk all take the chunk ResourceMutex.
#### Code references
-* [**DistLockManager class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/dist_lock_manager.h)
+* [**DDLLockManager class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/ddl_lock_manager.h)
* The
[**global ResourceMutexes**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager.h#L555-L581)
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 2eee9f26e94..832d0c2ae87 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -284,7 +284,7 @@ env.Library(
'config/sharding_catalog_manager_zone_operations.cpp',
'config/sharding_catalog_manager.cpp',
'config/index_on_config.cpp',
- 'dist_lock_manager.cpp',
+ 'ddl_lock_manager.cpp',
'participant_block.idl',
'remove_tags.idl',
'sharded_index_catalog_commands.idl',
diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
index fbbdada8b03..a488c414c2c 100644
--- a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
@@ -185,8 +185,8 @@ TEST_F(BalancerCommandsSchedulerTest, SuccessfulMoveChunkCommand) {
auto opCtx = Client::getCurrent()->getOperationContext();
const std::string whyMessage(str::stream()
<< "Test acquisition of distLock for " << kNss.ns());
- ASSERT_DOES_NOT_THROW(DistLockManager::get(opCtx)->lock(
- opCtx, kNss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout));
+ ASSERT_DOES_NOT_THROW(DDLLockManager::get(opCtx)->lock(
+ opCtx, kNss.ns(), whyMessage, DDLLockManager::kSingleLockAttemptTimeout));
}
deferredCleanupCompletedCheckpoint->setMode(FailPoint::off, 0);
_scheduler.stop();
@@ -219,8 +219,8 @@ TEST_F(BalancerCommandsSchedulerTest, SuccessfulMoveRangeCommand) {
auto opCtx = Client::getCurrent()->getOperationContext();
const std::string whyMessage(str::stream()
<< "Test acquisition of distLock for " << kNss.ns());
- ASSERT_DOES_NOT_THROW(DistLockManager::get(opCtx)->lock(
- opCtx, kNss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout));
+ ASSERT_DOES_NOT_THROW(DDLLockManager::get(opCtx)->lock(
+ opCtx, kNss.ns(), whyMessage, DDLLockManager::kSingleLockAttemptTimeout));
}
deferredCleanupCompletedCheckpoint->setMode(FailPoint::off, 0);
_scheduler.stop();
@@ -355,8 +355,8 @@ TEST_F(BalancerCommandsSchedulerTest, CommandFailsWhenNetworkReturnsError) {
auto opCtx = Client::getCurrent()->getOperationContext();
const std::string whyMessage(str::stream()
<< "Test acquisition of distLock for " << kNss.ns());
- ASSERT_DOES_NOT_THROW(DistLockManager::get(opCtx)->lock(
- opCtx, kNss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout));
+ ASSERT_DOES_NOT_THROW(DDLLockManager::get(opCtx)->lock(
+ opCtx, kNss.ns(), whyMessage, DDLLockManager::kSingleLockAttemptTimeout));
}
deferredCleanupCompletedCheckpoint->setMode(FailPoint::off, 0);
_scheduler.stop();
@@ -374,8 +374,8 @@ TEST_F(BalancerCommandsSchedulerTest, CommandFailsWhenSchedulerIsStopped) {
auto opCtx = Client::getCurrent()->getOperationContext();
const std::string whyMessage(str::stream()
<< "Test acquisition of distLock for " << kNss.ns());
- ASSERT_DOES_NOT_THROW(DistLockManager::get(opCtx)->lock(
- opCtx, kNss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout));
+ ASSERT_DOES_NOT_THROW(DDLLockManager::get(opCtx)->lock(
+ opCtx, kNss.ns(), whyMessage, DDLLockManager::kSingleLockAttemptTimeout));
}
}
@@ -401,8 +401,8 @@ TEST_F(BalancerCommandsSchedulerTest, CommandCanceledIfUnsubmittedBeforeBalancer
auto opCtx = Client::getCurrent()->getOperationContext();
const std::string whyMessage(str::stream()
<< "Test acquisition of distLock for " << kNss.ns());
- ASSERT_DOES_NOT_THROW(DistLockManager::get(opCtx)->lock(
- opCtx, kNss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout));
+ ASSERT_DOES_NOT_THROW(DDLLockManager::get(opCtx)->lock(
+ opCtx, kNss.ns(), whyMessage, DDLLockManager::kSingleLockAttemptTimeout));
}
}
@@ -503,8 +503,8 @@ TEST_F(BalancerCommandsSchedulerTest, DistLockPreventsMoveChunkWithConcurrentDDL
opCtx = Client::getCurrent()->getOperationContext();
const std::string whyMessage(str::stream()
<< "Test acquisition of distLock for " << kNss.ns());
- auto scopedDistLock = DistLockManager::get(opCtx)->lock(
- opCtx, kNss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
+ auto scopedDDLLock = DDLLockManager::get(opCtx)->lock(
+ opCtx, kNss.ns(), whyMessage, DDLLockManager::kSingleLockAttemptTimeout);
failpoint->setMode(FailPoint::Mode::off);
MigrateInfo migrateInfo = makeMigrationInfo(0, kShardId1, kShardId0);
auto futureResponse = _scheduler.requestMoveChunk(operationContext(),
diff --git a/src/mongo/db/s/balancer/balancer_dist_locks.cpp b/src/mongo/db/s/balancer/balancer_dist_locks.cpp
index 7bf36edaaa9..58526875649 100644
--- a/src/mongo/db/s/balancer/balancer_dist_locks.cpp
+++ b/src/mongo/db/s/balancer/balancer_dist_locks.cpp
@@ -43,13 +43,13 @@ Status BalancerDistLocks::acquireFor(OperationContext* opCtx, const NamespaceStr
++it->second.references;
return Status::OK();
} else {
- boost::optional<DistLockManager::ScopedLock> scopedLock;
+ boost::optional<DDLLockManager::ScopedLock> scopedLock;
try {
scopedLock.emplace(
- DistLockManager::get(opCtx)->lock(opCtx,
- nss.ns(),
- "moveRange" /* reason */,
- DistLockManager::kSingleLockAttemptTimeout));
+ DDLLockManager::get(opCtx)->lock(opCtx,
+ nss.ns(),
+ "moveRange" /* reason */,
+ DDLLockManager::kSingleLockAttemptTimeout));
} catch (const DBException& ex) {
return ex.toStatus(str::stream() << "Could not acquire collection lock for " << nss.ns()
<< " to migrate chunks");
diff --git a/src/mongo/db/s/balancer/balancer_dist_locks.h b/src/mongo/db/s/balancer/balancer_dist_locks.h
index 891d5a4e323..9b64f15d247 100644
--- a/src/mongo/db/s/balancer/balancer_dist_locks.h
+++ b/src/mongo/db/s/balancer/balancer_dist_locks.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/ddl_lock_manager.h"
namespace mongo {
@@ -50,10 +50,10 @@ public:
private:
struct ReferenceCountedLock {
- ReferenceCountedLock(DistLockManager::ScopedLock&& lock)
+ ReferenceCountedLock(DDLLockManager::ScopedLock&& lock)
: lock(std::move(lock)), references(1) {}
- DistLockManager::ScopedLock lock;
+ DDLLockManager::ScopedLock lock;
int references;
};
diff --git a/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp b/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp
index 15b5c67bbc2..4ef146d30d7 100644
--- a/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp
+++ b/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp
@@ -35,7 +35,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/ddl_lock_manager.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
@@ -70,10 +70,10 @@ public:
const auto catalogClient = Grid::get(opCtx)->catalogClient();
// Acquire distlocks on the namespace's database and collection.
- const auto dbDDLock = DistLockManager::get(opCtx)->lock(
- opCtx, nss.db(), "clearJumboFlag", DistLockManager::kDefaultLockTimeout);
- const auto collDDLLock = DistLockManager::get(opCtx)->lock(
- opCtx, nss.ns(), "clearJumboFlag", DistLockManager::kDefaultLockTimeout);
+ const auto dbDDLock = DDLLockManager::get(opCtx)->lock(
+ opCtx, nss.db(), "clearJumboFlag", DDLLockManager::kDefaultLockTimeout);
+ const auto collDDLLock = DDLLockManager::get(opCtx)->lock(
+ opCtx, nss.ns(), "clearJumboFlag", DDLLockManager::kDefaultLockTimeout);
CollectionType collType;
try {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index 0f9092cddf8..19587e579e6 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/repl/repl_client_info.h"
-#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/ddl_lock_manager.h"
#include "mongo/db/server_options.h"
#include "mongo/db/vector_clock.h"
#include "mongo/db/write_concern.h"
@@ -113,7 +113,7 @@ DatabaseType ShardingCatalogManager::createDatabase(
DBDirectClient client(opCtx);
- boost::optional<DistLockManager::ScopedLock> dbLock;
+ boost::optional<DDLLockManager::ScopedLock> dbLock;
const auto dbMatchFilter = [&] {
BSONObjBuilder filterBuilder;
@@ -143,8 +143,8 @@ DatabaseType ShardingCatalogManager::createDatabase(
// Do another loop, with the db lock held in order to avoid taking the expensive path on
// concurrent create database operations
- dbLock.emplace(DistLockManager::get(opCtx)->lock(
- opCtx, dbName, "createDatabase" /* reason */, DistLockManager::kDefaultLockTimeout));
+ dbLock.emplace(DDLLockManager::get(opCtx)->lock(
+ opCtx, dbName, "createDatabase" /* reason */, DDLLockManager::kDefaultLockTimeout));
}
// Expensive createDatabase code path
diff --git a/src/mongo/db/s/dist_lock_manager.cpp b/src/mongo/db/s/ddl_lock_manager.cpp
index 37f513d4d26..5437d1da4ab 100644
--- a/src/mongo/db/s/dist_lock_manager.cpp
+++ b/src/mongo/db/s/ddl_lock_manager.cpp
@@ -28,7 +28,7 @@
*/
-#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/ddl_lock_manager.h"
#include "mongo/db/operation_context.h"
#include "mongo/logv2/log.h"
@@ -42,25 +42,25 @@ namespace {
// TODO SERVER-68551: Remove once 7.0 becomes last-lts
MONGO_FAIL_POINT_DEFINE(disableReplSetDistLockManager);
-const auto distLockManagerDecorator = ServiceContext::declareDecoration<DistLockManager>();
+const auto ddlLockManagerDecorator = ServiceContext::declareDecoration<DDLLockManager>();
} // namespace
-const Minutes DistLockManager::kDefaultLockTimeout(5);
-const Milliseconds DistLockManager::kSingleLockAttemptTimeout(0);
+const Minutes DDLLockManager::kDefaultLockTimeout(5);
+const Milliseconds DDLLockManager::kSingleLockAttemptTimeout(0);
-DistLockManager* DistLockManager::get(ServiceContext* service) {
- return &distLockManagerDecorator(service);
+DDLLockManager* DDLLockManager::get(ServiceContext* service) {
+ return &ddlLockManagerDecorator(service);
}
-DistLockManager* DistLockManager::get(OperationContext* opCtx) {
+DDLLockManager* DDLLockManager::get(OperationContext* opCtx) {
return get(opCtx->getServiceContext());
}
-DistLockManager::ScopedLock DistLockManager::lock(OperationContext* opCtx,
- StringData ns,
- StringData reason,
- Milliseconds waitFor) {
+DDLLockManager::ScopedLock DDLLockManager::lock(OperationContext* opCtx,
+ StringData ns,
+ StringData reason,
+ Milliseconds timeout) {
stdx::unique_lock<Latch> lock(_mutex);
auto iter = _inProgressMap.find(ns);
@@ -71,12 +71,12 @@ DistLockManager::ScopedLock DistLockManager::lock(OperationContext* opCtx,
nsLock->numWaiting++;
ScopeGuard guard([&] { nsLock->numWaiting--; });
if (!opCtx->waitForConditionOrInterruptFor(
- nsLock->cvLocked, lock, waitFor, [nsLock]() { return !nsLock->isInProgress; })) {
+ nsLock->cvLocked, lock, timeout, [nsLock]() { return !nsLock->isInProgress; })) {
using namespace fmt::literals;
uasserted(
ErrorCodes::LockBusy,
"Failed to acquire DDL lock for namespace '{}' after {} that is currently locked with reason '{}'"_format(
- ns, waitFor.toString(), reason));
+ ns, timeout.toString(), reason));
}
guard.dismiss();
nsLock->reason = reason.toString();
@@ -87,19 +87,19 @@ DistLockManager::ScopedLock DistLockManager::lock(OperationContext* opCtx,
return {ns, reason, this};
}
-DistLockManager::ScopedLock::ScopedLock(StringData ns,
- StringData reason,
- DistLockManager* distLockManager)
- : _ns(ns.toString()), _reason(reason.toString()), _lockManager(distLockManager) {}
+DDLLockManager::ScopedLock::ScopedLock(StringData ns,
+ StringData reason,
+ DDLLockManager* lockManager)
+ : _ns(ns.toString()), _reason(reason.toString()), _lockManager(lockManager) {}
-DistLockManager::ScopedLock::ScopedLock(ScopedLock&& other)
+DDLLockManager::ScopedLock::ScopedLock(ScopedLock&& other)
: _ns(std::move(other._ns)),
_reason(std::move(other._reason)),
_lockManager(other._lockManager) {
other._lockManager = nullptr;
}
-DistLockManager::ScopedLock::~ScopedLock() {
+DDLLockManager::ScopedLock::~ScopedLock() {
if (_lockManager) {
stdx::unique_lock<Latch> lock(_lockManager->_mutex);
auto iter = _lockManager->_inProgressMap.find(_ns);
diff --git a/src/mongo/db/s/dist_lock_manager.h b/src/mongo/db/s/ddl_lock_manager.h
index c683ff9b4f7..c40e5efb2c0 100644
--- a/src/mongo/db/s/dist_lock_manager.h
+++ b/src/mongo/db/s/ddl_lock_manager.h
@@ -37,22 +37,9 @@
namespace mongo {
/**
- * Interface for handling distributed locks.
- *
- * Usage:
- *
- * auto scopedDistLock = mgr->lock(...);
- *
- * if (!scopedDistLock.isOK()) {
- * // Did not get lock. scopedLockStatus destructor will not call unlock.
- * }
- *
- * if (!status.isOK()) {
- * // Someone took over the lock! Unlock will still be called at destructor, but will
- * // practically be a no-op since it doesn't own the lock anymore.
- * }
+ * Service to manage DDL locks.
*/
-class DistLockManager {
+class DDLLockManager {
public:
// Default timeout which will be used if one is not passed to the lock method.
static const Minutes kDefaultLockTimeout;
@@ -62,14 +49,14 @@ public:
static const Milliseconds kSingleLockAttemptTimeout;
/**
- * RAII type for the local lock.
+ * RAII type for the DDL lock.
*/
class ScopedLock {
ScopedLock(const ScopedLock&) = delete;
ScopedLock& operator=(const ScopedLock&) = delete;
public:
- ScopedLock(StringData lockName, StringData reason, DistLockManager* distLockManager);
+ ScopedLock(StringData lockName, StringData reason, DDLLockManager* lockManager);
~ScopedLock();
ScopedLock(ScopedLock&& other);
@@ -84,35 +71,34 @@ public:
private:
std::string _ns;
std::string _reason;
- DistLockManager* _lockManager;
+ DDLLockManager* _lockManager;
};
- DistLockManager() = default;
- ~DistLockManager() = default;
+ DDLLockManager() = default;
+ ~DDLLockManager() = default;
/**
- * Retrieves the DistLockManager singleton for the node.
+ * Retrieves the DDLLockManager singleton.
*/
- static DistLockManager* get(ServiceContext* service);
- static DistLockManager* get(OperationContext* opCtx);
+ static DDLLockManager* get(ServiceContext* service);
+ static DDLLockManager* get(OperationContext* opCtx);
/**
- * Tries multiple times to lock, using the specified lock try interval, until
- * a certain amount of time has passed or when any error that is not LockBusy
- * occurred.
+ * Returns a RAII style lock on the given namespace @ns.
+ *
+ * @ns Namespace to lock (both database and collections).
+ * @reason Reson for which the lock is being acquired (e.g. 'createCollection').
+ * @timeout Time after which this acquisition attempt will give up in case of lock contention.
+ * A timeout value of -1 means the acquisition will be retried forever.
*
- * waitFor = 0 indicates there should only be one attempt to acquire the lock, and
- * no waiting.
- * waitFor = -1 indicates we should retry indefinitely.
*
- * Returns OK if the lock was successfully acquired.
- * Returns ErrorCodes::DistributedClockSkewed when a clock skew is detected.
- * Returns ErrorCodes::LockBusy if the lock is being held.
+ * Throws ErrorCodes::LockBusy in case the timeout is reached.
+ * Throws ErrorCategory::Interruption in case the opeartion context is interrupted.
*/
ScopedLock lock(OperationContext* opCtx,
- StringData name,
- StringData whyMessage,
- Milliseconds waitFor);
+ StringData ns,
+ StringData reason,
+ Milliseconds timeout);
protected:
struct NSLock {
@@ -124,7 +110,7 @@ protected:
std::string reason;
};
- Mutex _mutex = MONGO_MAKE_LATCH("NamespaceSerializer::_mutex");
+ Mutex _mutex = MONGO_MAKE_LATCH("DDLLockManager::_mutex");
StringMap<std::shared_ptr<NSLock>> _inProgressMap;
};
diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp
index da3dec33ec0..64c365d784a 100644
--- a/src/mongo/db/s/drop_database_coordinator.cpp
+++ b/src/mongo/db/s/drop_database_coordinator.cpp
@@ -123,8 +123,8 @@ void DropDatabaseCoordinator::_dropShardedCollection(
// Acquire the collection distributed lock in order to synchronize with an eventual ongoing
// moveChunk and to prevent new ones from happening.
const auto coorName = DDLCoordinatorType_serializer(_coordId.getOperationType());
- auto collDDLLock = DistLockManager::get(opCtx)->lock(
- opCtx, nss.ns(), coorName, DistLockManager::kDefaultLockTimeout);
+ auto collDDLLock = DDLLockManager::get(opCtx)->lock(
+ opCtx, nss.ns(), coorName, DDLLockManager::kDefaultLockTimeout);
sharding_ddl_util::removeCollAndChunksMetadataFromConfig(
opCtx, coll, ShardingCatalogClient::kMajorityWriteConcern);
diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
index a58b18aedbd..0a617517986 100644
--- a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
+++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
@@ -35,7 +35,6 @@
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/op_observer/op_observer.h"
-#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/shard_key_util.h"
#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/logv2/log.h"
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp
index 43ef37b7b39..f80127798fb 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp
@@ -199,7 +199,7 @@ ExecutorFuture<void> ShardingDDLCoordinator::_acquireLockAsync(
return AsyncTry([this, resource = resource.toString()] {
auto opCtxHolder = cc().makeOperationContext();
auto* opCtx = opCtxHolder.get();
- auto distLockManager = DistLockManager::get(opCtx);
+ auto ddlLockManager = DDLLockManager::get(opCtx);
const auto coorName = DDLCoordinatorType_serializer(_coordId.getOperationType());
@@ -213,10 +213,10 @@ ExecutorFuture<void> ShardingDDLCoordinator::_acquireLockAsync(
return timeoutMillisecs;
}
}
- return DistLockManager::kDefaultLockTimeout;
+ return DDLLockManager::kDefaultLockTimeout;
}();
- _scopedLocks.emplace(distLockManager->lock(opCtx, resource, coorName, lockTimeOut));
+ _scopedLocks.emplace(ddlLockManager->lock(opCtx, resource, coorName, lockTimeOut));
})
.until([this, resource = resource.toString()](Status status) {
if (!status.isOK()) {
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.h b/src/mongo/db/s/sharding_ddl_coordinator.h
index 19060b384e3..569a8c2fc87 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.h
+++ b/src/mongo/db/s/sharding_ddl_coordinator.h
@@ -35,7 +35,7 @@
#include "mongo/db/persistent_task_store.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/wait_for_majority_service.h"
-#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/ddl_lock_manager.h"
#include "mongo/db/s/forwardable_operation_metadata.h"
#include "mongo/db/s/sharding_ddl_coordinator_gen.h"
#include "mongo/db/s/sharding_ddl_coordinator_service.h"
@@ -170,7 +170,7 @@ private:
SharedPromise<void> _constructionCompletionPromise;
SharedPromise<void> _completionPromise;
- std::stack<DistLockManager::ScopedLock> _scopedLocks;
+ std::stack<DDLLockManager::ScopedLock> _scopedLocks;
};
template <class StateDoc>
diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.h b/src/mongo/db/s/sharding_mongod_test_fixture.h
index c55827c4f97..16b208ba123 100644
--- a/src/mongo/db/s/sharding_mongod_test_fixture.h
+++ b/src/mongo/db/s/sharding_mongod_test_fixture.h
@@ -30,7 +30,6 @@
#pragma once
#include "mongo/db/repl/replication_coordinator_mock.h"
-#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/s/sharding_test_fixture_common.h"
diff --git a/src/mongo/db/s/shardsvr_drop_indexes_command.cpp b/src/mongo/db/s/shardsvr_drop_indexes_command.cpp
index 4e78d4af873..dd08a353a44 100644
--- a/src/mongo/db/s/shardsvr_drop_indexes_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_indexes_command.cpp
@@ -34,7 +34,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/curop.h"
#include "mongo/db/s/database_sharding_state.h"
-#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/ddl_lock_manager.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/timeseries/catalog_helper.h"
#include "mongo/db/timeseries/timeseries_commands_conversion_helper.h"
@@ -153,13 +153,13 @@ ShardsvrDropIndexesCommand::Invocation::Response ShardsvrDropIndexesCommand::Inv
return timeoutMillisecs;
}
}
- return DistLockManager::kDefaultLockTimeout;
+ return DDLLockManager::kDefaultLockTimeout;
}();
static constexpr StringData lockReason{"dropIndexes"_sd};
- auto distLockManager = DistLockManager::get(opCtx);
- auto dbDDLLock = distLockManager->lock(opCtx, ns().db(), lockReason, lockTimeout);
+ auto ddlLockManager = DDLLockManager::get(opCtx);
+ auto dbDDLLock = ddlLockManager->lock(opCtx, ns().db(), lockReason, lockTimeout);
// Check under the dbLock if this is still the primary shard for the database
catalog_helper::assertIsPrimaryShardForDb(opCtx, ns().db());
@@ -177,7 +177,7 @@ ShardsvrDropIndexesCommand::Invocation::Response ShardsvrDropIndexesCommand::Inv
resolvedNs = ns().makeTimeseriesBucketsNamespace();
}
- auto collDDLLock = distLockManager->lock(opCtx, resolvedNs.ns(), lockReason, lockTimeout);
+ auto collDDLLock = ddlLockManager->lock(opCtx, resolvedNs.ns(), lockReason, lockTimeout);
StaleConfigRetryState retryState;
return shardVersionRetry(