summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/sharding/libs/mongos_api_params_util.js3
-rw-r--r--src/mongo/db/s/README.md3
-rw-r--r--src/mongo/db/s/SConscript7
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp43
-rw-r--r--src/mongo/db/s/balancer/migration_manager.h16
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp9
-rw-r--r--src/mongo/db/s/config/configsvr_create_database_command.cpp3
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp2
-rw-r--r--src/mongo/db/s/config/namespace_serializer.cpp89
-rw-r--r--src/mongo/db/s/config/namespace_serializer.h86
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h22
-rw-r--r--src/mongo/db/s/dist_lock_catalog.h8
-rw-r--r--src/mongo/db/s/dist_lock_catalog_mock.cpp20
-rw-r--r--src/mongo/db/s/dist_lock_catalog_mock.h4
-rw-r--r--src/mongo/db/s/dist_lock_catalog_replset.cpp10
-rw-r--r--src/mongo/db/s/dist_lock_catalog_replset.h2
-rw-r--r--src/mongo/db/s/dist_lock_catalog_replset_test.cpp24
-rw-r--r--src/mongo/db/s/dist_lock_manager.cpp85
-rw-r--r--src/mongo/db/s/dist_lock_manager.h76
-rw-r--r--src/mongo/db/s/dist_lock_manager_replset.cpp93
-rw-r--r--src/mongo/db/s/dist_lock_manager_replset.h32
-rw-r--r--src/mongo/db/s/dist_lock_manager_replset_test.cpp182
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp2
-rw-r--r--src/mongo/db/s/shard_collection_legacy.h2
-rw-r--r--src/mongo/db/s/sharding_mongod_test_fixture.cpp26
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection_command.cpp (renamed from src/mongo/db/s/shardsvr_shard_collection.cpp)0
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp123
-rw-r--r--src/mongo/s/sharded_collections_ddl_parameters.idl6
30 files changed, 347 insertions, 646 deletions
diff --git a/jstests/sharding/libs/mongos_api_params_util.js b/jstests/sharding/libs/mongos_api_params_util.js
index 57c32fd30dd..40e89db2e4b 100644
--- a/jstests/sharding/libs/mongos_api_params_util.js
+++ b/jstests/sharding/libs/mongos_api_params_util.js
@@ -1095,8 +1095,7 @@ let MongosAPIParametersUtil = (function() {
commandName: "shardCollection",
run: {
inAPIVersion1: false,
- configServerCommandName: "_configsvrShardCollection",
- shardCommandName: "_shardsvrShardCollection",
+ shardCommandName: "_shardsvrCreateCollection",
runsAgainstAdminDb: true,
permittedInTxn: false,
permittedOnShardedCollection: false,
diff --git a/src/mongo/db/s/README.md b/src/mongo/db/s/README.md
index 1c8712233c8..ea1857f5674 100644
--- a/src/mongo/db/s/README.md
+++ b/src/mongo/db/s/README.md
@@ -523,9 +523,6 @@ mergeChunks, and moveChunk all take the chunk ResourceMutex.
#### Code references
* [**DistLockManager class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/dist_lock_manager.h)
* [**DistLockCatalog class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/dist_lock_catalog.h)
-* [**NamespaceSerializer class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/namespace_serializer.h)
-* The interface for acquiring NamespaceSerializer locks
-[**via the ShardingCatalogManager**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager.h#L276)
* The
[**global ResourceMutexes**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager.h#L555-L581)
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index e14b58c834f..1163deccec7 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -250,7 +250,6 @@ env.Library(
'balancer/scoped_migration_request.cpp',
'balancer/type_migration.cpp',
'config/initial_split_policy.cpp',
- 'config/namespace_serializer.cpp',
'config/sharding_catalog_manager_chunk_operations.cpp',
'config/sharding_catalog_manager_collection_operations.cpp',
'config/sharding_catalog_manager_database_operations.cpp',
@@ -327,6 +326,7 @@ env.Library(
'config/configsvr_shard_collection_command.cpp',
'config/configsvr_split_chunk_command.cpp',
'config/configsvr_update_zone_key_range_command.cpp',
+ 'drop_collection_coordinator.cpp',
'flush_database_cache_updates_command.cpp',
'flush_routing_table_cache_updates_command.cpp',
'get_database_version_command.cpp',
@@ -337,7 +337,6 @@ env.Library(
'move_chunk_command.cpp',
'move_primary_command.cpp',
'set_shard_version_command.cpp',
- 'drop_collection_coordinator.cpp',
'sharding_ddl_coordinator.cpp',
'sharding_server_status.cpp',
'sharding_state_command.cpp',
@@ -345,9 +344,9 @@ env.Library(
'shardsvr_drop_collection_command.cpp',
'shardsvr_drop_collection_participant_command.cpp',
'shardsvr_drop_database_command.cpp',
- 'shardsvr_rename_collection.cpp',
'shardsvr_refine_collection_shard_key_command.cpp',
- 'shardsvr_shard_collection.cpp',
+ 'shardsvr_rename_collection.cpp',
+ 'shardsvr_shard_collection_command.cpp',
'split_chunk_command.cpp',
'split_vector_command.cpp',
'txn_two_phase_commit_cmds.cpp',
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 60bda5d988e..090df0de70c 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -269,10 +269,9 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
const std::string whyMessage(str::stream() << "Migrating chunk(s) in collection "
<< migrateType.getNss().ns());
- auto statusWithDistLockHandle =
- DistLockManager::get(opCtx)->tryLockWithLocalWriteConcern(
- opCtx, migrateType.getNss().ns(), whyMessage, _lockSessionID);
- if (!statusWithDistLockHandle.isOK()) {
+ auto status = DistLockManager::get(opCtx)->tryLockDirectWithLocalWriteConcern(
+ opCtx, migrateType.getNss().ns(), whyMessage);
+ if (!status.isOK()) {
LOGV2(21898,
"Failed to acquire distributed lock for collection {namespace} "
"during balancer recovery of an active migration. Abandoning balancer "
@@ -280,7 +279,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
"Failed to acquire distributed lock for collection "
"during balancer recovery of an active migration",
"namespace"_attr = migrateType.getNss().ns(),
- "error"_attr = redact(statusWithDistLockHandle.getStatus()));
+ "error"_attr = redact(status));
return;
}
}
@@ -373,7 +372,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
// If no migrations were scheduled for this namespace, free the dist lock
if (!scheduledMigrations) {
- DistLockManager::get(opCtx)->unlock(opCtx, _lockSessionID, nss.ns());
+ DistLockManager::get(opCtx)->unlock(opCtx, nss.ns());
}
}
@@ -520,26 +519,22 @@ void MigrationManager::_schedule(WithLock lock,
const std::string whyMessage(str::stream()
<< "Migrating chunk(s) in collection " << nss.ns());
- // Acquire the NamespaceSerializer lock for this nss (blocking call)
- auto scopedCollLock =
- ShardingCatalogManager::get(opCtx)->serializeCreateOrDropCollection(opCtx, nss);
+ // Acquire the local lock for this nss (blocking call)
+ auto scopedLock = DistLockManager::get(opCtx)->lockDirectLocally(
+ opCtx, nss.ns(), DistLockManager::kSingleLockAttemptTimeout);
// Acquire the collection distributed lock (blocking call)
- auto statusWithDistLockHandle = DistLockManager::get(opCtx)->lockWithSessionID(
- opCtx,
- nss.ns(),
- whyMessage,
- _lockSessionID,
- DistLockManager::kSingleLockAttemptTimeout);
-
- if (!statusWithDistLockHandle.isOK()) {
- migration.completionNotification->set(statusWithDistLockHandle.getStatus().withContext(
- str::stream() << "Could not acquire collection lock for " << nss.ns()
- << " to migrate chunks"));
+ auto status = DistLockManager::get(opCtx)->lockDirect(
+ opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
+
+ if (!status.isOK()) {
+ migration.completionNotification->set(
+ status.withContext(str::stream() << "Could not acquire collection lock for "
+ << nss.ns() << " to migrate chunks"));
return;
}
- MigrationsState migrationsState(std::move(scopedCollLock));
+ MigrationsState migrationsState(std::move(scopedLock));
it = _activeMigrations.insert(std::make_pair(nss, std::move(migrationsState))).first;
}
@@ -611,7 +606,7 @@ void MigrationManager::_complete(WithLock lock,
migrations->erase(itMigration);
if (migrations->empty()) {
- DistLockManager::get(opCtx)->unlock(opCtx, _lockSessionID, nss.ns());
+ DistLockManager::get(opCtx)->unlock(opCtx, nss.ns());
_activeMigrations.erase(it);
_checkDrained(lock);
}
@@ -713,7 +708,7 @@ MigrationManager::Migration::~Migration() {
invariant(completionNotification);
}
-MigrationManager::MigrationsState::MigrationsState(NamespaceSerializer::ScopedLock lock)
- : nsSerializerLock(std::move(lock)) {}
+MigrationManager::MigrationsState::MigrationsState(DistLockManager::ScopedLock lock)
+ : lock(std::move(lock)) {}
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/migration_manager.h b/src/mongo/db/s/balancer/migration_manager.h
index fd5db687100..ce02f008fab 100644
--- a/src/mongo/db/s/balancer/migration_manager.h
+++ b/src/mongo/db/s/balancer/migration_manager.h
@@ -38,6 +38,7 @@
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/executor/task_executor.h"
#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
@@ -48,12 +49,7 @@
namespace mongo {
-class OperationContext;
class ScopedMigrationRequest;
-class ServiceContext;
-class Status;
-template <typename T>
-class StatusWith;
// Uniquely identifies a migration, regardless of shard and version.
typedef std::string MigrationIdentifier;
@@ -178,12 +174,11 @@ private:
// NamespaceSerializer lock for the corresponding nss, which will be released when all of the
// scheduled chunk migrations for this collection have completed.
struct MigrationsState {
- MigrationsState(NamespaceSerializer::ScopedLock lock);
+ MigrationsState(DistLockManager::ScopedLock lock);
MigrationsList migrationsList;
- NamespaceSerializer::ScopedLock nsSerializerLock;
+ DistLockManager::ScopedLock lock;
};
-
using CollectionMigrationsStateMap = stdx::unordered_map<NamespaceString, MigrationsState>;
using ScopedMigrationRequestsMap =
@@ -271,11 +266,6 @@ private:
// The service context under which this migration manager runs.
ServiceContext* const _serviceContext;
- // Used as a constant session ID for all distributed locks that this MigrationManager holds.
- // Currently required so that locks can be reacquired for the balancer in startRecovery and then
- // overtaken in later operations.
- const OID _lockSessionID{OID::gen()};
-
// Carries migration information over from startRecovery to finishRecovery. Should only be set
// in startRecovery and then accessed in finishRecovery.
stdx::unordered_map<NamespaceString, std::list<MigrationType>> _migrationRecoveryMap;
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 9f77f4ddd36..b65feb6d514 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -586,11 +586,10 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
// it fails. Any dist lock held by the config server will be released via proccessId, so the
// session ID used here doesn't matter.
ASSERT_OK(DistLockManager::get(operationContext())
- ->lockWithSessionID(operationContext(),
- collName.ns(),
- "MigrationManagerTest",
- OID::gen(),
- DistLockManager::kSingleLockAttemptTimeout));
+ ->lockDirect(operationContext(),
+ collName.ns(),
+ "MigrationManagerTest",
+ DistLockManager::kSingleLockAttemptTimeout));
_migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
_migrationManager->finishRecovery(operationContext(), 0, kDefaultSecondaryThrottle);
diff --git a/src/mongo/db/s/config/configsvr_create_database_command.cpp b/src/mongo/db/s/config/configsvr_create_database_command.cpp
index 17725f4451d..d875dd50482 100644
--- a/src/mongo/db/s/config/configsvr_create_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_create_database_command.cpp
@@ -88,9 +88,6 @@ public:
ON_BLOCK_EXIT(
[opCtx, dbname] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbname); });
- auto scopedLock =
- ShardingCatalogManager::get(opCtx)->serializeCreateOrDropDatabase(opCtx, dbname);
-
auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, dbname, "createDatabase", DistLockManager::kDefaultLockTimeout));
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index aa8b22651e6..11657f40f17 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -121,11 +121,6 @@ public:
setDropCollDistLockWait.execute(
[&](const BSONObj& data) { waitFor = Seconds(data["waitForSecs"].numberInt()); });
- auto scopedDbLock =
- ShardingCatalogManager::get(opCtx)->serializeCreateOrDropDatabase(opCtx, nss.db());
- auto scopedCollLock =
- ShardingCatalogManager::get(opCtx)->serializeCreateOrDropCollection(opCtx, nss);
-
auto dbDistLock = uassertStatusOK(
DistLockManager::get(opCtx)->lock(opCtx, nss.db(), "dropCollection", waitFor));
auto collDistLock = uassertStatusOK(
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index 7ef3658490c..79d1f5abd8c 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -120,8 +120,6 @@ public:
auto const catalogClient = Grid::get(opCtx)->catalogClient();
auto const catalogManager = ShardingCatalogManager::get(opCtx);
- auto scopedLock = catalogManager->serializeCreateOrDropDatabase(opCtx, dbname);
-
auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, dbname, "dropDatabase", DistLockManager::kDefaultLockTimeout));
diff --git a/src/mongo/db/s/config/namespace_serializer.cpp b/src/mongo/db/s/config/namespace_serializer.cpp
deleted file mode 100644
index 89ce4b1b0b1..00000000000
--- a/src/mongo/db/s/config/namespace_serializer.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/s/config/namespace_serializer.h"
-
-#include <map>
-#include <memory>
-#include <string>
-
-#include "mongo/db/namespace_string.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/util/scopeguard.h"
-
-namespace mongo {
-
-NamespaceSerializer::NamespaceSerializer() {}
-
-NamespaceSerializer::ScopedLock::ScopedLock(StringData ns, NamespaceSerializer& nsSerializer)
- : _ns(ns.toString()), _nsSerializer(nsSerializer), _owns(true) {}
-
-NamespaceSerializer::ScopedLock::ScopedLock(ScopedLock&& other)
- : _ns(std::move(other._ns)), _nsSerializer(other._nsSerializer), _owns(true) {
- other._owns = false;
-}
-
-NamespaceSerializer::ScopedLock::~ScopedLock() {
- if (_owns) {
- stdx::unique_lock<Latch> lock(_nsSerializer._mutex);
- auto iter = _nsSerializer._inProgressMap.find(_ns);
-
- iter->second->numWaiting--;
- iter->second->isInProgress = false;
- iter->second->cvLocked.notify_all();
-
- if (iter->second->numWaiting == 0) {
- _nsSerializer._inProgressMap.erase(_ns);
- }
- }
-}
-
-NamespaceSerializer::ScopedLock NamespaceSerializer::lock(OperationContext* opCtx, StringData nss) {
- stdx::unique_lock<Latch> lock(_mutex);
- auto iter = _inProgressMap.find(nss);
-
- if (iter == _inProgressMap.end()) {
- _inProgressMap.try_emplace(nss, std::make_shared<NSLock>());
- } else {
- auto nsLock = iter->second;
- nsLock->numWaiting++;
- auto guard = makeGuard([&] { nsLock->numWaiting--; });
- opCtx->waitForConditionOrInterrupt(
- nsLock->cvLocked, lock, [nsLock]() { return !nsLock->isInProgress; });
- guard.dismiss();
- nsLock->isInProgress = true;
- }
-
- return ScopedLock(nss, *this);
-}
-
-} // namespace mongo
diff --git a/src/mongo/db/s/config/namespace_serializer.h b/src/mongo/db/s/config/namespace_serializer.h
deleted file mode 100644
index ce17c30fbb0..00000000000
--- a/src/mongo/db/s/config/namespace_serializer.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-
-#include "mongo/base/status.h"
-#include "mongo/base/status_with.h"
-#include "mongo/db/namespace_string.h"
-#include "mongo/platform/mutex.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/util/string_map.h"
-
-namespace mongo {
-
-class OperationContext;
-
-class NamespaceSerializer {
- NamespaceSerializer(const NamespaceSerializer&) = delete;
- NamespaceSerializer& operator=(const NamespaceSerializer&) = delete;
-
-public:
- class ScopedLock {
- public:
- ~ScopedLock();
-
- ScopedLock(const ScopedLock&) = delete;
- ScopedLock& operator=(const ScopedLock&) = delete;
-
- ScopedLock(ScopedLock&& other);
- ScopedLock& operator=(ScopedLock&& other) = delete;
-
- private:
- friend class NamespaceSerializer;
- ScopedLock(StringData ns, NamespaceSerializer& nsSerializer);
-
- std::string _ns;
- NamespaceSerializer& _nsSerializer;
- bool _owns;
- };
-
- NamespaceSerializer();
-
- ScopedLock lock(OperationContext* opCtx, StringData ns);
-
-private:
- struct NSLock {
- stdx::condition_variable cvLocked;
- int numWaiting = 1;
- bool isInProgress = true;
- };
-
- Mutex _mutex = MONGO_MAKE_LATCH("NamespaceSerializer::_mutex");
- StringMap<std::shared_ptr<NSLock>> _inProgressMap;
-};
-
-} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 9bdb508247e..5c94431b8f3 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -207,16 +207,6 @@ void ShardingCatalogManager::create(ServiceContext* serviceContext,
shardingCatalogManager.emplace(serviceContext, std::move(addShardExecutor));
}
-NamespaceSerializer::ScopedLock ShardingCatalogManager::serializeCreateOrDropDatabase(
- OperationContext* opCtx, StringData dbName) {
- return _namespaceSerializer.lock(opCtx, dbName);
-}
-
-NamespaceSerializer::ScopedLock ShardingCatalogManager::serializeCreateOrDropCollection(
- OperationContext* opCtx, const NamespaceString& nss) {
- return _namespaceSerializer.lock(opCtx, nss.ns());
-}
-
void ShardingCatalogManager::clearForTests(ServiceContext* serviceContext) {
auto& shardingCatalogManager = getShardingCatalogManager(serviceContext);
invariant(shardingCatalogManager);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 85e6b6f9158..ca39305b048 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -34,7 +34,6 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/logical_session_cache.h"
#include "mongo/db/repl/optime_with.h"
-#include "mongo/db/s/config/namespace_serializer.h"
#include "mongo/executor/task_executor.h"
#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -311,13 +310,6 @@ public:
const ShardId& primaryShard);
/**
- * Creates a ScopedLock on the database name in _namespaceSerializer. This is to prevent
- * timeouts waiting on the dist lock if multiple threads attempt to create or drop the same db.
- */
- NamespaceSerializer::ScopedLock serializeCreateOrDropDatabase(OperationContext* opCtx,
- StringData dbName);
-
- /**
* Creates the database if it does not exist, then marks its entry in config.databases as
* sharding-enabled.
*
@@ -386,14 +378,6 @@ public:
const bool upsert,
TxnNumber txnNumber);
- /**
- * Creates a ScopedLock on the collection name in _namespaceSerializer. This is to prevent
- * timeouts waiting on the dist lock if multiple threads attempt to create or drop the same
- * collection.
- */
- NamespaceSerializer::ScopedLock serializeCreateOrDropCollection(OperationContext* opCtx,
- const NamespaceString& nss);
-
//
// Shard Operations
//
@@ -714,12 +698,6 @@ private:
* requests).
*/
Lock::ResourceMutex _kShardMembershipLock;
-
- /**
- * Optimization for DDL operations, which might be tried concurrently by multiple threads.
- * Avoids convoying and timeouts on the database/collection distributed lock.
- */
- NamespaceSerializer _namespaceSerializer;
};
} // namespace mongo
diff --git a/src/mongo/db/s/dist_lock_catalog.h b/src/mongo/db/s/dist_lock_catalog.h
index 7f774915b66..8c6810ba957 100644
--- a/src/mongo/db/s/dist_lock_catalog.h
+++ b/src/mongo/db/s/dist_lock_catalog.h
@@ -133,14 +133,6 @@ public:
StringData why) = 0;
/**
- * Attempts to set the state of the lock document with lockSessionID to unlocked. Returns OK,
- * if at the end of this call it is determined that the lock is definitely not owned by the
- * specified session (i.e., it is not owned at all or if it is owned by a different session).
- * Otherwise, it returns an error status. Common errors include socket errors.
- */
- virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID) = 0;
-
- /**
* Same as unlock() above except that it unlocks the lock document that matches "lockSessionID"
* AND "name", rather than just "lockSessionID". This is necessary if multiple documents have
* been locked with the same lockSessionID.
diff --git a/src/mongo/db/s/dist_lock_catalog_mock.cpp b/src/mongo/db/s/dist_lock_catalog_mock.cpp
index fba94f75502..1e536a4b0f2 100644
--- a/src/mongo/db/s/dist_lock_catalog_mock.cpp
+++ b/src/mongo/db/s/dist_lock_catalog_mock.cpp
@@ -65,9 +65,9 @@ void noOvertakeLockFuncSet(StringData lockID,
<< ", who: " << who << ", processId: " << processId << ", why: " << why);
}
-void noUnLockFuncSet(const OID& lockSessionID) {
+void noUnLockFuncSet(const OID& lockSessionID, StringData name) {
FAIL(str::stream() << "unlock not expected to be called. "
- << "lockSessionID: " << lockSessionID);
+ << "lockSessionID: " << lockSessionID << "name: " << name);
}
void noPingFuncSet(StringData processID, Date_t ping) {
@@ -193,20 +193,6 @@ StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* opCtx,
return ret;
}
-Status DistLockCatalogMock::unlock(OperationContext* opCtx, const OID& lockSessionID) {
- auto ret = kBadRetValue;
- UnlockFunc checkerFunc = noUnLockFuncSet;
-
- {
- stdx::lock_guard<Latch> lk(_mutex);
- ret = _unlockReturnValue;
- checkerFunc = _unlockChecker;
- }
-
- checkerFunc(lockSessionID);
- return ret;
-}
-
Status DistLockCatalogMock::unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) {
@@ -219,7 +205,7 @@ Status DistLockCatalogMock::unlock(OperationContext* opCtx,
checkerFunc = _unlockChecker;
}
- checkerFunc(lockSessionID);
+ checkerFunc(lockSessionID, name);
return ret;
}
diff --git a/src/mongo/db/s/dist_lock_catalog_mock.h b/src/mongo/db/s/dist_lock_catalog_mock.h
index e97621f8b2f..2d71657a62a 100644
--- a/src/mongo/db/s/dist_lock_catalog_mock.h
+++ b/src/mongo/db/s/dist_lock_catalog_mock.h
@@ -82,7 +82,7 @@ public:
StringData processId,
Date_t time,
StringData why)>;
- using UnlockFunc = std::function<void(const OID& lockSessionID)>;
+ using UnlockFunc = std::function<void(const OID& lockSessionID, StringData name)>;
using PingFunc = std::function<void(StringData processID, Date_t ping)>;
using StopPingFunc = std::function<void(StringData processID)>;
using GetPingFunc = StopPingFunc;
@@ -113,8 +113,6 @@ public:
Date_t time,
StringData why) override;
- virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID) override;
-
virtual Status unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) override;
diff --git a/src/mongo/db/s/dist_lock_catalog_replset.cpp b/src/mongo/db/s/dist_lock_catalog_replset.cpp
index 862a2a24844..b0587b2664e 100644
--- a/src/mongo/db/s/dist_lock_catalog_replset.cpp
+++ b/src/mongo/db/s/dist_lock_catalog_replset.cpp
@@ -324,16 +324,6 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* opCtx,
return locksTypeResult.getValue();
}
-Status DistLockCatalogImpl::unlock(OperationContext* opCtx, const OID& lockSessionID) {
- auto request =
- makeFindAndModifyRequest(_locksNS,
- BSON(LocksType::lockID(lockSessionID)),
- write_ops::UpdateModification::parseFromClassicUpdate(
- BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED)))));
- request.setWriteConcern(kMajorityWriteConcern.toBSON());
- return _unlock(opCtx, request);
-}
-
Status DistLockCatalogImpl::unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) {
diff --git a/src/mongo/db/s/dist_lock_catalog_replset.h b/src/mongo/db/s/dist_lock_catalog_replset.h
index 9421d290af3..6f8a0d34546 100644
--- a/src/mongo/db/s/dist_lock_catalog_replset.h
+++ b/src/mongo/db/s/dist_lock_catalog_replset.h
@@ -71,8 +71,6 @@ public:
Date_t time,
StringData why) override;
- Status unlock(OperationContext* opCtx, const OID& lockSessionID) override;
-
Status unlock(OperationContext* opCtx, const OID& lockSessionID, StringData name) override;
Status unlockAll(OperationContext* opCtx, const std::string& processID) override;
diff --git a/src/mongo/db/s/dist_lock_catalog_replset_test.cpp b/src/mongo/db/s/dist_lock_catalog_replset_test.cpp
index 67fff471f77..1ecaec25bd4 100644
--- a/src/mongo/db/s/dist_lock_catalog_replset_test.cpp
+++ b/src/mongo/db/s/dist_lock_catalog_replset_test.cpp
@@ -922,7 +922,8 @@ TEST_F(DistLockCatalogReplSetTest, OvertakeLockUnsupportedResponseFormat) {
TEST_F(DistLockCatalogReplSetTest, BasicUnlock) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = _distLockCatalog.unlock(operationContext(), OID("555f99712c99a78c5b083358"));
+ auto status = _distLockCatalog.unlock(
+ operationContext(), OID("555f99712c99a78c5b083358"), "TestName");
ASSERT_OK(status);
});
@@ -932,7 +933,7 @@ TEST_F(DistLockCatalogReplSetTest, BasicUnlock) {
BSONObj expectedCmd(fromjson(R"({
findAndModify: "locks",
- query: { ts: ObjectId("555f99712c99a78c5b083358") },
+ query: { ts: ObjectId("555f99712c99a78c5b083358"), _id: "TestName" },
update: { $set: { state: 0 }},
writeConcern: { w: "majority", wtimeout: 15000 },
maxTimeMS: 30000
@@ -989,7 +990,8 @@ TEST_F(DistLockCatalogReplSetTest, BasicUnlockWithName) {
TEST_F(DistLockCatalogReplSetTest, UnlockWithNoNewDoc) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = _distLockCatalog.unlock(operationContext(), OID("555f99712c99a78c5b083358"));
+ auto status = _distLockCatalog.unlock(
+ operationContext(), OID("555f99712c99a78c5b083358"), "TestName");
ASSERT_OK(status);
});
@@ -999,7 +1001,7 @@ TEST_F(DistLockCatalogReplSetTest, UnlockWithNoNewDoc) {
BSONObj expectedCmd(fromjson(R"({
findAndModify: "locks",
- query: { ts: ObjectId("555f99712c99a78c5b083358") },
+ query: { ts: ObjectId("555f99712c99a78c5b083358"), _id: "TestName" },
update: { $set: { state: 0 }},
writeConcern: { w: "majority", wtimeout: 15000 },
maxTimeMS: 30000
@@ -1048,21 +1050,21 @@ TEST_F(DistLockCatalogReplSetTest, UnlockWithNameWithNoNewDoc) {
TEST_F(DistLockCatalogReplSetTest, UnlockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status = _distLockCatalog.unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID(), "TestName");
ASSERT_NOT_OK(status);
}
TEST_F(DistLockCatalogReplSetTest, UnlockRunCmdError) {
shutdownExecutorPool();
- auto status = _distLockCatalog.unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID(), "TestName");
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
TEST_F(DistLockCatalogReplSetTest, UnlockCommandError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = _distLockCatalog.unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID(), "TestName");
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1080,7 +1082,7 @@ TEST_F(DistLockCatalogReplSetTest, UnlockCommandError) {
TEST_F(DistLockCatalogReplSetTest, UnlockWriteError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = _distLockCatalog.unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID(), "TestName");
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1098,7 +1100,7 @@ TEST_F(DistLockCatalogReplSetTest, UnlockWriteError) {
TEST_F(DistLockCatalogReplSetTest, UnlockWriteConcernError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = _distLockCatalog.unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID(), "TestName");
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1123,7 +1125,7 @@ TEST_F(DistLockCatalogReplSetTest, UnlockWriteConcernError) {
TEST_F(DistLockCatalogReplSetTest, UnlockUnsupportedWriteConcernResponse) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = _distLockCatalog.unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID(), "TestName");
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1145,7 +1147,7 @@ TEST_F(DistLockCatalogReplSetTest, UnlockUnsupportedWriteConcernResponse) {
TEST_F(DistLockCatalogReplSetTest, UnlockUnsupportedResponseFormat) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = _distLockCatalog.unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID(), "TestName");
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
diff --git a/src/mongo/db/s/dist_lock_manager.cpp b/src/mongo/db/s/dist_lock_manager.cpp
index f58c54d8d56..39c08cc1dd7 100644
--- a/src/mongo/db/s/dist_lock_manager.cpp
+++ b/src/mongo/db/s/dist_lock_manager.cpp
@@ -47,23 +47,26 @@ const Seconds DistLockManager::kDefaultLockTimeout(20);
const Milliseconds DistLockManager::kSingleLockAttemptTimeout(0);
DistLockManager::ScopedDistLock::ScopedDistLock(OperationContext* opCtx,
- DistLockHandle lockHandle,
+ StringData lockName,
+ ScopedLock&& scopedLock,
DistLockManager* lockManager)
- : _opCtx(opCtx), _lockID(std::move(lockHandle)), _lockManager(lockManager) {}
+ : _opCtx(opCtx),
+ _lockName(lockName.toString()),
+ _scopedLock(std::move(scopedLock)),
+ _lockManager(lockManager) {}
DistLockManager::ScopedDistLock::~ScopedDistLock() {
if (_lockManager) {
- _lockManager->unlock(_opCtx, _lockID);
+ _lockManager->unlock(_opCtx, _lockName);
}
}
-DistLockManager::ScopedDistLock::ScopedDistLock(ScopedDistLock&& other) {
- _opCtx = other._opCtx;
+DistLockManager::ScopedDistLock::ScopedDistLock(ScopedDistLock&& other)
+ : ScopedDistLock(other._opCtx,
+ std::move(other._lockName),
+ std::move(other._scopedLock),
+ other._lockManager) {
other._opCtx = nullptr;
-
- _lockID = std::move(other._lockID);
-
- _lockManager = other._lockManager;
other._lockManager = nullptr;
}
@@ -73,6 +76,8 @@ DistLockManager::ScopedDistLock DistLockManager::ScopedDistLock::moveToAnotherTh
return unownedScopedDistLock;
}
+DistLockManager::DistLockManager(OID lockSessionID) : _lockSessionID(std::move(lockSessionID)) {}
+
DistLockManager* DistLockManager::get(ServiceContext* service) {
return getDistLockManager(service).get();
}
@@ -91,12 +96,66 @@ StatusWith<DistLockManager::ScopedDistLock> DistLockManager::lock(OperationConte
StringData name,
StringData whyMessage,
Milliseconds waitFor) {
- auto distLockHandleStatus = lockWithSessionID(opCtx, name, whyMessage, OID::gen(), waitFor);
- if (!distLockHandleStatus.isOK()) {
- return distLockHandleStatus.getStatus();
+ boost::optional<ScopedLock> scopedLock;
+ try {
+ scopedLock.emplace(lockDirectLocally(opCtx, name, waitFor));
+ } catch (const DBException& ex) {
+ return ex.toStatus();
+ }
+
+ auto status = lockDirect(opCtx, name, whyMessage, waitFor);
+ if (!status.isOK()) {
+ return status;
}
- return DistLockManager::ScopedDistLock(opCtx, std::move(distLockHandleStatus.getValue()), this);
+ return DistLockManager::ScopedDistLock(opCtx, name, std::move(*scopedLock), this);
+}
+
+DistLockManager::ScopedLock DistLockManager::lockDirectLocally(OperationContext* opCtx,
+ StringData ns,
+ Milliseconds waitFor) {
+ stdx::unique_lock<Latch> lock(_mutex);
+ auto iter = _inProgressMap.find(ns);
+
+ if (iter == _inProgressMap.end()) {
+ _inProgressMap.try_emplace(ns, std::make_shared<NSLock>());
+ } else {
+ auto nsLock = iter->second;
+ nsLock->numWaiting++;
+ auto guard = makeGuard([&] { nsLock->numWaiting--; });
+ if (!opCtx->waitForConditionOrInterruptFor(
+ nsLock->cvLocked, lock, waitFor, [nsLock]() { return !nsLock->isInProgress; })) {
+ uasserted(ErrorCodes::LockBusy,
+ str::stream() << "Failed to acquire dist lock " << ns << " locally");
+ }
+ guard.dismiss();
+ nsLock->isInProgress = true;
+ }
+
+ return ScopedLock(ns, this);
+}
+
+DistLockManager::ScopedLock::ScopedLock(StringData ns, DistLockManager* distLockManager)
+ : _ns(ns.toString()), _lockManager(distLockManager) {}
+
+DistLockManager::ScopedLock::ScopedLock(ScopedLock&& other)
+ : _ns(std::move(other._ns)), _lockManager(other._lockManager) {
+ other._lockManager = nullptr;
+}
+
+DistLockManager::ScopedLock::~ScopedLock() {
+ if (_lockManager) {
+ stdx::unique_lock<Latch> lock(_lockManager->_mutex);
+ auto iter = _lockManager->_inProgressMap.find(_ns);
+
+ iter->second->numWaiting--;
+ iter->second->isInProgress = false;
+ iter->second->cvLocked.notify_all();
+
+ if (iter->second->numWaiting == 0) {
+ _lockManager->_inProgressMap.erase(_ns);
+ }
+ }
}
} // namespace mongo
diff --git a/src/mongo/db/s/dist_lock_manager.h b/src/mongo/db/s/dist_lock_manager.h
index bffb49eaf7e..120aea7a55e 100644
--- a/src/mongo/db/s/dist_lock_manager.h
+++ b/src/mongo/db/s/dist_lock_manager.h
@@ -32,12 +32,12 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/oid.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/chrono.h"
+#include "mongo/stdx/condition_variable.h"
namespace mongo {
-using DistLockHandle = OID;
-
/**
* Interface for handling distributed locks.
*
@@ -64,7 +64,25 @@ public:
static const Milliseconds kSingleLockAttemptTimeout;
/**
- * RAII type for distributed lock. Not meant to be shared across multiple threads.
+ * RAII type for the local lock.
+ */
+ class ScopedLock {
+ ScopedLock(const ScopedLock&) = delete;
+ ScopedLock& operator=(const ScopedLock&) = delete;
+
+ public:
+ ScopedLock(StringData lockName, DistLockManager* distLockManager);
+ ~ScopedLock();
+
+ ScopedLock(ScopedLock&& other);
+
+ private:
+ std::string _ns;
+ DistLockManager* _lockManager;
+ };
+
+ /**
+ * RAII type for the distributed lock.
*/
class ScopedDistLock {
ScopedDistLock(const ScopedDistLock&) = delete;
@@ -72,7 +90,8 @@ public:
public:
ScopedDistLock(OperationContext* opCtx,
- DistLockHandle lockHandle,
+ StringData lockName,
+ ScopedLock&& scopedLock,
DistLockManager* lockManager);
~ScopedDistLock();
@@ -82,8 +101,9 @@ public:
private:
OperationContext* _opCtx;
- DistLockHandle _lockID;
- DistLockManager* _lockManager; // Not owned here.
+ std::string _lockName;
+ ScopedLock _scopedLock;
+ DistLockManager* _lockManager;
};
virtual ~DistLockManager() = default;
@@ -131,28 +151,31 @@ public:
Milliseconds waitFor);
/**
- * Same behavior as lock(...) above, except takes a specific lock session ID "lockSessionID"
- * instead of randomly generating one internally.
+ * Ensures that two dist lock within the same process will serialise with each other.
+ */
+ ScopedLock lockDirectLocally(OperationContext* opCtx, StringData ns, Milliseconds waitFor);
+
+ /**
+ * Same behavior as lock(...) above, except doesn't return a scoped object, so it is the
+ * responsibility of the caller to call unlock for the same name.
*
* This is useful for a process running on the config primary after a failover. A lock can be
* immediately reacquired if "lockSessionID" matches that of the lock, rather than waiting for
* the inactive lock to expire.
*/
- virtual StatusWith<DistLockHandle> lockWithSessionID(OperationContext* opCtx,
- StringData name,
- StringData whyMessage,
- const OID& lockSessionID,
- Milliseconds waitFor) = 0;
+ virtual Status lockDirect(OperationContext* opCtx,
+ StringData name,
+ StringData whyMessage,
+ Milliseconds waitFor) = 0;
/**
* Specialized locking method, which only succeeds if the specified lock name is not held by
* anyone. Uses local write concern and does not attempt to overtake the lock or check whether
* the lock lease has expired.
*/
- virtual StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* opCtx,
- StringData name,
- StringData whyMessage,
- const OID& lockSessionID) = 0;
+ virtual Status tryLockDirectWithLocalWriteConcern(OperationContext* opCtx,
+ StringData name,
+ StringData whyMessage) = 0;
/**
* Unlocks the given lockHandle. Will keep retrying (asynchronously) until the lock is freed or
@@ -161,13 +184,28 @@ public:
* The provided interruptible object can be nullptr in which case the method will not attempt to
* wait for the unlock to be confirmed.
*/
- virtual void unlock(Interruptible* intr, const DistLockHandle& lockHandle) = 0;
- virtual void unlock(Interruptible* intr, const DistLockHandle& lockHandle, StringData name) = 0;
+ virtual void unlock(Interruptible* intr, StringData name) = 0;
/**
* Makes a best-effort attempt to unlock all locks owned by the given processID.
*/
virtual void unlockAll(OperationContext* opCtx) = 0;
+
+protected:
+ friend class MigrationManager;
+
+ DistLockManager(OID lockSessionID);
+
+ const OID _lockSessionID;
+
+ struct NSLock {
+ stdx::condition_variable cvLocked;
+ int numWaiting = 1;
+ bool isInProgress = true;
+ };
+
+ Mutex _mutex = MONGO_MAKE_LATCH("NamespaceSerializer::_mutex");
+ StringMap<std::shared_ptr<NSLock>> _inProgressMap;
};
} // namespace mongo
diff --git a/src/mongo/db/s/dist_lock_manager_replset.cpp b/src/mongo/db/s/dist_lock_manager_replset.cpp
index a2c6622d4fe..cde47a53a82 100644
--- a/src/mongo/db/s/dist_lock_manager_replset.cpp
+++ b/src/mongo/db/s/dist_lock_manager_replset.cpp
@@ -58,17 +58,39 @@ const Milliseconds kLockRetryInterval(500);
MONGO_FAIL_POINT_DEFINE(setDistLockTimeout);
MONGO_FAIL_POINT_DEFINE(disableReplSetDistLockManager);
+/**
+ * With this logic, the LockID handle for the config server is always fixed and different from that
+ * of the shards, but all shards have the same LockID handle. This means that locks taken from
+ * different shards OR different nodes from the same shard are always compatible.
+ *
+ * This is OK and is only needed as a step for upgrade from 4.4 to 4.9+ in order to ensure that the
+ * new DDL operations, which are driven by the DB primaries, lock out the legacy DDL operations,
+ * which are driven by a 4.4 config server.
+ */
+OID shardNameToOID(StringData name) {
+ std::string oidData(OID::kOIDSize, 0);
+
+ if (name == ShardId::kConfigServerId) {
+ oidData[0] = 1;
+ } else {
+ oidData[0] = 2;
+ }
+
+ return OID::from(oidData.c_str());
+}
+
} // namespace
const Seconds ReplSetDistLockManager::kDistLockPingInterval{30};
const Minutes ReplSetDistLockManager::kDistLockExpirationTime{15};
-ReplSetDistLockManager::ReplSetDistLockManager(ServiceContext* globalContext,
+ReplSetDistLockManager::ReplSetDistLockManager(ServiceContext* service,
StringData processID,
std::unique_ptr<DistLockCatalog> catalog,
Milliseconds pingInterval,
Milliseconds lockExpiration)
- : _serviceContext(globalContext),
+ : DistLockManager(shardNameToOID(processID)),
+ _serviceContext(service),
_processID(processID.toString()),
_catalog(std::move(catalog)),
_pingInterval(pingInterval),
@@ -175,10 +197,7 @@ void ReplSetDistLockManager::doTask() {
continue;
}
- Status unlockStatus = toUnlock.name
- ? _catalog->unlock(opCtx, toUnlock.lockId, *toUnlock.name)
- : _catalog->unlock(opCtx, toUnlock.lockId);
-
+ Status unlockStatus = _catalog->unlock(opCtx, toUnlock.lockId, toUnlock.name);
toUnlock.unlockCompleted.setFrom(unlockStatus);
if (!unlockStatus.isOK()) {
@@ -328,11 +347,10 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* opCtx,
return false;
}
-StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationContext* opCtx,
- StringData name,
- StringData whyMessage,
- const OID& lockSessionID,
- Milliseconds waitFor) {
+Status ReplSetDistLockManager::lockDirect(OperationContext* opCtx,
+ StringData name,
+ StringData whyMessage,
+ Milliseconds waitFor) {
Timer timer(_serviceContext->getTickSource());
Timer msgTimer(_serviceContext->getTickSource());
@@ -365,17 +383,16 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
"reason: {reason} )",
"Trying to acquire new distributed lock",
"lockName"_attr = name,
- "lockSessionId"_attr = lockSessionID,
+ "lockSessionId"_attr = _lockSessionID,
"processId"_attr = _processID,
"lockExpirationTimeout"_attr = lockExpiration,
"pingInterval"_attr = _pingInterval,
"reason"_attr = whyMessage);
auto lockResult = _catalog->grabLock(
- opCtx, name, lockSessionID, who, _processID, Date_t::now(), whyMessage.toString());
+ opCtx, name, _lockSessionID, who, _processID, Date_t::now(), whyMessage.toString());
auto status = lockResult.getStatus();
-
if (status.isOK()) {
// Lock is acquired since findAndModify was able to successfully modify
// the lock document.
@@ -384,9 +401,9 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
"{reason}",
"Acquired distributed lock",
"lockName"_attr = name,
- "lockSessionId"_attr = lockSessionID,
+ "lockSessionId"_attr = _lockSessionID,
"reason"_attr = whyMessage);
- return lockSessionID;
+ return Status::OK();
}
// If a network error occurred, unlock the lock synchronously and try again
@@ -404,7 +421,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
networkErrorRetries++;
- status = _catalog->unlock(opCtx, lockSessionID, name);
+ status = _catalog->unlock(opCtx, _lockSessionID, name);
if (status.isOK()) {
// We certainly do not own the lock, so we can retry
continue;
@@ -423,7 +440,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
if (status != ErrorCodes::LockStateChangeFailed) {
// An error occurred but the write might have actually been applied on the
// other side. Schedule an unlock to clean it up just in case.
- (void)queueUnlock(lockSessionID, name.toString());
+ (void)queueUnlock(_lockSessionID, name.toString());
return status;
}
@@ -445,10 +462,10 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
return isLockExpiredResult.getStatus();
}
- if (isLockExpiredResult.getValue() || (lockSessionID == currentLock.getLockID())) {
+ if (isLockExpiredResult.getValue() || (_lockSessionID == currentLock.getLockID())) {
auto overtakeResult = _catalog->overtakeLock(opCtx,
name,
- lockSessionID,
+ _lockSessionID,
currentLock.getLockID(),
who,
_processID,
@@ -465,14 +482,14 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
"Acquired distributed lock {lockName} with sessionId {lockSessionId}",
"Acquired distributed lock",
"lockName"_attr = name,
- "lockSessionId"_attr = lockSessionID);
- return lockSessionID;
+ "lockSessionId"_attr = _lockSessionID);
+ return Status::OK();
}
if (overtakeStatus != ErrorCodes::LockStateChangeFailed) {
// An error occurred but the write might have actually been applied on the
// other side. Schedule an unlock to clean it up just in case.
- (void)queueUnlock(lockSessionID, boost::none);
+ (void)queueUnlock(_lockSessionID, name.toString());
return overtakeStatus;
}
}
@@ -512,8 +529,9 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
return {ErrorCodes::LockBusy, str::stream() << "timed out waiting for " << name};
}
-StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
- OperationContext* opCtx, StringData name, StringData whyMessage, const OID& lockSessionID) {
+Status ReplSetDistLockManager::tryLockDirectWithLocalWriteConcern(OperationContext* opCtx,
+ StringData name,
+ StringData whyMessage) {
const std::string who = str::stream() << _processID << ":" << getThreadName();
LOGV2_DEBUG(22662,
@@ -526,7 +544,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
"reason: {reason} )",
"Trying to acquire new distributed lock",
"lockName"_attr = name,
- "lockSessionId"_attr = lockSessionID,
+ "lockSessionId"_attr = _lockSessionID,
"processId"_attr = _processID,
"lockExpirationTimeout"_attr = _lockExpiration,
"pingInterval"_attr = _pingInterval,
@@ -534,22 +552,21 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
auto lockStatus = _catalog->grabLock(opCtx,
name,
- lockSessionID,
+ _lockSessionID,
who,
_processID,
Date_t::now(),
whyMessage.toString(),
DistLockCatalog::kLocalWriteConcern);
-
if (lockStatus.isOK()) {
LOGV2(22663,
"Acquired distributed lock {lockName} with session ID {lockSessionId} for "
"{reason}",
"Acquired distributed lock",
"lockName"_attr = name,
- "lockSessionId"_attr = lockSessionID,
+ "lockSessionId"_attr = _lockSessionID,
"reason"_attr = whyMessage);
- return lockSessionID;
+ return Status::OK();
}
LOGV2_DEBUG(22664,
@@ -565,16 +582,8 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
return lockStatus.getStatus();
}
-void ReplSetDistLockManager::unlock(Interruptible* intr, const DistLockHandle& lockSessionID) {
- auto unlockFuture = queueUnlock(lockSessionID, boost::none);
- if (intr)
- unlockFuture.getNoThrow(intr).ignore();
-}
-
-void ReplSetDistLockManager::unlock(Interruptible* intr,
- const DistLockHandle& lockSessionID,
- StringData name) {
- auto unlockFuture = queueUnlock(lockSessionID, name.toString());
+void ReplSetDistLockManager::unlock(Interruptible* intr, StringData name) {
+ auto unlockFuture = queueUnlock(_lockSessionID, name.toString());
if (intr)
unlockFuture.getNoThrow(intr).ignore();
}
@@ -591,8 +600,8 @@ void ReplSetDistLockManager::unlockAll(OperationContext* opCtx) {
}
}
-SharedSemiFuture<void> ReplSetDistLockManager::queueUnlock(
- const DistLockHandle& lockSessionID, const boost::optional<std::string>& name) {
+SharedSemiFuture<void> ReplSetDistLockManager::queueUnlock(const OID& lockSessionID,
+ const std::string& name) {
stdx::unique_lock<Latch> lk(_mutex);
auto& req = _unlockList.emplace_back(lockSessionID, name);
_shutDownCV.notify_all();
diff --git a/src/mongo/db/s/dist_lock_manager_replset.h b/src/mongo/db/s/dist_lock_manager_replset.h
index 2ad6aa1662d..2eddb76326b 100644
--- a/src/mongo/db/s/dist_lock_manager_replset.h
+++ b/src/mongo/db/s/dist_lock_manager_replset.h
@@ -33,8 +33,6 @@
#include "mongo/db/s/dist_lock_catalog.h"
#include "mongo/db/s/dist_lock_manager.h"
-#include "mongo/platform/mutex.h"
-#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/future.h"
@@ -50,7 +48,7 @@ public:
// How long should the lease on a distributed lock last
static const Minutes kDistLockExpirationTime;
- ReplSetDistLockManager(ServiceContext* globalContext,
+ ReplSetDistLockManager(ServiceContext* service,
StringData processID,
std::unique_ptr<DistLockCatalog> catalog,
Milliseconds pingInterval,
@@ -63,19 +61,16 @@ public:
std::string getProcessID() override;
- StatusWith<DistLockHandle> lockWithSessionID(OperationContext* opCtx,
- StringData name,
- StringData whyMessage,
- const OID& lockSessionID,
- Milliseconds waitFor) override;
+ Status lockDirect(OperationContext* opCtx,
+ StringData name,
+ StringData whyMessage,
+ Milliseconds waitFor) override;
- StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* opCtx,
- StringData name,
- StringData whyMessage,
- const OID& lockSessionID) override;
+ Status tryLockDirectWithLocalWriteConcern(OperationContext* opCtx,
+ StringData name,
+ StringData whyMessage) override;
- void unlock(Interruptible* intr, const DistLockHandle& lockSessionID) override;
- void unlock(Interruptible* intr, const DistLockHandle& lockSessionID, StringData name) override;
+ void unlock(Interruptible* intr, StringData name) override;
void unlockAll(OperationContext* opCtx) override;
@@ -83,8 +78,7 @@ private:
/**
* Queue a lock to be unlocked asynchronously with retry until it doesn't error.
*/
- SharedSemiFuture<void> queueUnlock(const DistLockHandle& lockSessionID,
- const boost::optional<std::string>& name);
+ SharedSemiFuture<void> queueUnlock(const OID& lockSessionID, const std::string& name);
/**
* Periodically pings and checks if there are locks queued that needs unlocking.
@@ -159,11 +153,11 @@ private:
// whether the modification was actually applied or not, and call unlock to make
// sure that it was cleaned up.
struct UnlockRequest {
- UnlockRequest(DistLockHandle lockId, boost::optional<std::string> name)
+ UnlockRequest(OID lockId, std::string name)
: lockId(std::move(lockId)), name(std::move(name)) {}
- DistLockHandle lockId;
- boost::optional<std::string> name;
+ OID lockId;
+ std::string name;
// Will be signaled when the unlock request has completed
SharedPromise<void> unlockCompleted;
diff --git a/src/mongo/db/s/dist_lock_manager_replset_test.cpp b/src/mongo/db/s/dist_lock_manager_replset_test.cpp
index 6649937895e..d3408d3d3f3 100644
--- a/src/mongo/db/s/dist_lock_manager_replset_test.cpp
+++ b/src/mongo/db/s/dist_lock_manager_replset_test.cpp
@@ -61,19 +61,19 @@ const Seconds kJoinTimeout(30);
const Milliseconds kPingInterval(2);
const Seconds kLockExpiration(10);
-std::string mapToString(const std::map<OID, int>& map) {
+std::string mapToString(const StringMap<int>& map) {
StringBuilder str;
for (const auto& entry : map) {
- str << "(" << entry.first.toString() << ": " << entry.second << ")";
+ str << "(" << entry.first << ": " << entry.second << ")";
}
return str.str();
}
-std::string vectorToString(const std::vector<OID>& list) {
+std::string vectorToString(const std::vector<std::string>& list) {
StringBuilder str;
for (const auto& entry : list) {
- str << "(" << entry.toString() << ")";
+ str << "(" << entry << ")";
}
return str.str();
@@ -180,7 +180,7 @@ TEST_F(DistLockManagerReplSetTest, BasicLockLifeCycle) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID) {
+ [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID, StringData name) {
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
},
@@ -229,7 +229,7 @@ TEST_F(DistLockManagerReplSetTest, MustUnlockOnLockError) {
getMockCatalog()->expectUnLock(
[&unlockMutex, &unlockCV, &unlockCallCount, &unlockSessionIDPassed](
- const OID& lockSessionID) {
+ const OID& lockSessionID, StringData name) {
stdx::unique_lock<Latch> lk(unlockMutex);
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
@@ -331,13 +331,14 @@ TEST_F(DistLockManagerReplSetTest, UnlockUntilNoError) {
getMockCatalog()->expectUnLock(
[this, &unlockMutex, &unlockCV, &kUnlockErrorCount, &lockSessionIDPassed](
- const OID& lockSessionID) {
+ const OID& lockSessionID, StringData name) {
stdx::unique_lock<Latch> lk(unlockMutex);
lockSessionIDPassed.push_back(lockSessionID);
if (lockSessionIDPassed.size() >= kUnlockErrorCount) {
getMockCatalog()->expectUnLock(
- [&lockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID) {
+ [&lockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID,
+ StringData name) {
stdx::unique_lock<Latch> lk(unlockMutex);
lockSessionIDPassed.push_back(lockSessionID);
unlockCV.notify_all();
@@ -409,14 +410,14 @@ TEST_F(DistLockManagerReplSetTest, UnlockUntilNoError) {
TEST_F(DistLockManagerReplSetTest, MultipleQueuedUnlock) {
auto testMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
- std::vector<OID> lockSessionIDPassed;
- std::map<OID, int> unlockIDMap; // id -> count
+ std::vector<std::string> lockIdsPassed;
+ StringMap<int> unlockNameMap; // id -> count
/**
* Returns true if all values in the map are greater than 2.
*/
- auto mapEntriesGreaterThanTwo = [](const decltype(unlockIDMap)& map) -> bool {
- auto iter = find_if(
+ auto mapEntriesGreaterThanTwo = [](const decltype(unlockNameMap)& map) -> bool {
+ auto iter = std::find_if(
map.begin(),
map.end(),
[](const std::remove_reference<decltype(map)>::type::value_type& entry) -> bool {
@@ -427,15 +428,15 @@ TEST_F(DistLockManagerReplSetTest, MultipleQueuedUnlock) {
};
getMockCatalog()->expectUnLock(
- [this, &unlockIDMap, &testMutex, &unlockCV, &mapEntriesGreaterThanTwo](
- const OID& lockSessionID) {
+ [this, &unlockNameMap, &testMutex, &unlockCV, &mapEntriesGreaterThanTwo](
+ const OID& lockSessionID, StringData name) {
stdx::unique_lock<Latch> lk(testMutex);
- unlockIDMap[lockSessionID]++;
+ unlockNameMap[name]++;
// Wait until we see at least 2 unique lockSessionID more than twice.
- if (unlockIDMap.size() >= 2 && mapEntriesGreaterThanTwo(unlockIDMap)) {
+ if (unlockNameMap.size() >= 2 && mapEntriesGreaterThanTwo(unlockNameMap)) {
getMockCatalog()->expectUnLock(
- [&testMutex, &unlockCV](const OID& lockSessionID) {
+ [&testMutex, &unlockCV](const OID& lockSessionID, StringData name) {
stdx::unique_lock<Latch> lk(testMutex);
unlockCV.notify_all();
},
@@ -454,14 +455,14 @@ TEST_F(DistLockManagerReplSetTest, MultipleQueuedUnlock) {
retLockDoc.setLockID(OID::gen());
getMockCatalog()->expectGrabLock(
- [&testMutex, &lockSessionIDPassed](StringData lockID,
- const OID& lockSessionIDArg,
- StringData who,
- StringData processId,
- Date_t time,
- StringData why) {
+ [&testMutex, &lockIdsPassed](StringData lockID,
+ const OID& lockSessionIDArg,
+ StringData who,
+ StringData processId,
+ Date_t time,
+ StringData why) {
stdx::unique_lock<Latch> lk(testMutex);
- lockSessionIDPassed.push_back(lockSessionIDArg);
+ lockIdsPassed.push_back(lockID.toString());
},
retLockDoc);
@@ -476,7 +477,7 @@ TEST_F(DistLockManagerReplSetTest, MultipleQueuedUnlock) {
{
stdx::unique_lock<Latch> lk(testMutex);
- if (unlockIDMap.size() < 2 || !mapEntriesGreaterThanTwo(unlockIDMap)) {
+ if (unlockNameMap.size() < 2 || !mapEntriesGreaterThanTwo(unlockNameMap)) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
}
@@ -493,12 +494,11 @@ TEST_F(DistLockManagerReplSetTest, MultipleQueuedUnlock) {
// No need to grab testMutex since there is only one thread running at this point.
ASSERT_FALSE(didTimeout);
- ASSERT_EQUALS(2u, lockSessionIDPassed.size());
+ ASSERT_EQUALS(2u, lockIdsPassed.size());
- for (const auto& id : lockSessionIDPassed) {
- ASSERT_GREATER_THAN(unlockIDMap[id], 2)
- << "lockIDList: " << vectorToString(lockSessionIDPassed)
- << ", map: " << mapToString(unlockIDMap);
+ for (const auto& id : lockIdsPassed) {
+ ASSERT_GREATER_THAN(unlockNameMap[id], 2) << "lockIDList: " << vectorToString(lockIdsPassed)
+ << ", map: " << mapToString(unlockNameMap);
}
}
@@ -540,7 +540,7 @@ TEST_F(DistLockManagerReplSetTest, CheckLockStatusOK) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [](const OID&) {
+ [](const OID&, StringData) {
// Don't care
},
Status::OK());
@@ -573,7 +573,7 @@ TEST_F(DistLockManagerReplSetTest, CheckLockStatusNoLongerOwn) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [](const OID&) {
+ [](const OID&, StringData) {
// Don't care
},
Status::OK());
@@ -606,7 +606,7 @@ TEST_F(DistLockManagerReplSetTest, CheckLockStatusError) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [](const OID&) {
+ [](const OID&, StringData) {
// Don't care
},
Status::OK());
@@ -695,7 +695,7 @@ TEST_F(DistLockManagerReplSetTest, LockOvertakingAfterLockExpiration) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID) {
+ [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID, StringData name) {
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
},
@@ -706,66 +706,6 @@ TEST_F(DistLockManagerReplSetTest, LockOvertakingAfterLockExpiration) {
ASSERT_EQUALS(lastTS, unlockSessionIDPassed);
}
-/**
- * Test scenario:
- * 1. Attempt to grab lock with lockSessionID fails because lock is already owned.
- * 2. Then the the lock is overtaken because the lockSessionID matches the lock owner.
- */
-TEST_F(DistLockManagerReplSetTest, LockOvertakingWithSessionID) {
- OID passedLockSessionID("5572007fda9e476582bf3716");
-
- LocksType currentLockDoc;
- currentLockDoc.setName("bar");
- currentLockDoc.setState(LocksType::LOCKED);
- currentLockDoc.setProcess("otherProcess");
- currentLockDoc.setLockID(passedLockSessionID);
- currentLockDoc.setWho("me");
- currentLockDoc.setWhy("why");
-
- getMockCatalog()->expectGrabLock(
- [&passedLockSessionID, &currentLockDoc](
- StringData, const OID& lockSessionID, StringData, StringData, Date_t, StringData) {
- ASSERT_EQUALS(passedLockSessionID, lockSessionID);
- },
- {ErrorCodes::LockStateChangeFailed, "nMod 0"});
-
- getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
- currentLockDoc);
-
- LockpingsType pingDoc;
- pingDoc.setProcess("otherProcess");
- pingDoc.setPing(Date_t());
-
- getMockCatalog()->expectGetPing(
- [](StringData process) { ASSERT_EQUALS("otherProcess", process); }, pingDoc);
-
- getMockCatalog()->expectGetServerInfo([]() {}, DistLockCatalog::ServerInfo(Date_t(), OID()));
-
- getMockCatalog()->expectOvertakeLock(
- [this, &passedLockSessionID, &currentLockDoc](StringData lockID,
- const OID& lockSessionID,
- const OID& currentHolderTS,
- StringData who,
- StringData processId,
- Date_t time,
- StringData why) {
- ASSERT_EQUALS("bar", lockID);
- ASSERT_EQUALS(passedLockSessionID, lockSessionID);
- ASSERT_EQUALS(currentLockDoc.getLockID(), currentHolderTS);
- ASSERT_EQUALS(getProcessID(), processId);
- ASSERT_EQUALS("foo", why);
- },
- currentLockDoc);
-
- auto distLockHandleStatus =
- DistLockManager::get(operationContext())
- ->lockWithSessionID(
- operationContext(), "bar", "foo", passedLockSessionID, Milliseconds(0));
- ASSERT_OK(distLockHandleStatus.getStatus());
-
- getMockCatalog()->expectNoGrabLock();
-}
-
TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfExpirationHasNotElapsed) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
@@ -1033,7 +973,7 @@ TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfPingIsActive) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID) {
+ [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID, StringData name) {
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
},
@@ -1133,7 +1073,7 @@ TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfOwnerJustChanged) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID) {
+ [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID, StringData name) {
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
},
@@ -1236,7 +1176,7 @@ TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfElectionIdChanged) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID) {
+ [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID, StringData name) {
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
},
@@ -1343,7 +1283,7 @@ TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfNoMaster) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID) {
+ [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID, StringData name) {
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
},
@@ -1428,7 +1368,8 @@ TEST_F(DistLockManagerReplSetTest, LockOvertakingResultsInError) {
auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
getMockCatalog()->expectUnLock(
- [&unlockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID) {
+ [&unlockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID,
+ StringData name) {
stdx::unique_lock<Latch> lk(unlockMutex);
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
@@ -1621,7 +1562,7 @@ TEST_F(DistLockManagerReplSetTest, LockAcquisitionRetriesOnNetworkErrorSuccess)
},
{ErrorCodes::NetworkTimeout, "network error"});
- getMockCatalog()->expectUnLock([&](const OID& lockSessionID) {}, Status::OK());
+ getMockCatalog()->expectUnLock([&](const OID& lockSessionID, StringData name) {}, Status::OK());
auto status = DistLockManager::get(operationContext())
->lock(operationContext(), "LockName", "Lock reason", Milliseconds(0))
@@ -1634,7 +1575,7 @@ TEST_F(DistLockManagerReplSetTest, LockAcquisitionRetriesOnInterruptionNeverSucc
[&](StringData, const OID&, StringData, StringData, Date_t, StringData) {},
{ErrorCodes::Interrupted, "operation interrupted"});
- getMockCatalog()->expectUnLock([&](const OID& lockSessionID) {}, Status::OK());
+ getMockCatalog()->expectUnLock([&](const OID& lockSessionID, StringData name) {}, Status::OK());
auto status = DistLockManager::get(operationContext())
->lock(operationContext(), "bar", "foo", Milliseconds(0))
@@ -1780,7 +1721,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
getMockCatalog()->expectNoGrabLock();
getMockCatalog()->expectUnLock(
- [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID) {
+ [&unlockCallCount, &unlockSessionIDPassed](const OID& lockSessionID, StringData name) {
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
},
@@ -1868,7 +1809,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
getMockCatalog()->expectUnLock(
[&unlockMutex, &unlockCV, &unlockCallCount, &unlockSessionIDPassed](
- const OID& lockSessionID) {
+ const OID& lockSessionID, StringData name) {
stdx::unique_lock<Latch> lk(unlockMutex);
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
@@ -2056,7 +1997,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, CanOvertakeIfNoPingDocument) {
currentLockDoc); // return arbitrary valid lock document, for testing purposes only.
getMockCatalog()->expectUnLock(
- [](const OID&) {
+ [](const OID&, StringData) {
// Don't care
},
Status::OK());
@@ -2074,40 +2015,27 @@ TEST_F(DistLockManagerReplSetTest, TryLockWithLocalWriteConcernBusy) {
Date_t now(Date_t::now());
std::string whyMsg("because");
- LocksType retLockDoc;
- retLockDoc.setName(lockName);
- retLockDoc.setState(LocksType::LOCKED);
- retLockDoc.setProcess(getProcessID());
- retLockDoc.setWho("me");
- retLockDoc.setWhy(whyMsg);
- // Will be different from the actual lock session id. For testing only.
- retLockDoc.setLockID(OID::gen());
-
- OID lockSessionIDPassed = OID::gen();
-
getMockCatalog()->expectGrabLock(
- [this, &lockName, &now, &whyMsg, &lockSessionIDPassed](StringData lockID,
- const OID& lockSessionID,
- StringData who,
- StringData processId,
- Date_t time,
- StringData why) {
+ [this, &lockName, &now, &whyMsg](StringData lockID,
+ const OID& lockSessionID,
+ StringData who,
+ StringData processId,
+ Date_t time,
+ StringData why) {
ASSERT_EQUALS(lockName, lockID);
ASSERT_TRUE(lockSessionID.isSet());
ASSERT_EQUALS(getProcessID(), processId);
ASSERT_GREATER_THAN_OR_EQUALS(time, now);
ASSERT_EQUALS(whyMsg, why);
- ASSERT_EQUALS(lockSessionIDPassed, lockSessionID);
getMockCatalog()->expectNoGrabLock(); // Call only once.
},
{ErrorCodes::LockStateChangeFailed, "Unable to take lock"});
- auto lockStatus = DistLockManager::get(operationContext())
- ->tryLockWithLocalWriteConcern(
- operationContext(), lockName, whyMsg, lockSessionIDPassed);
- ASSERT_EQ(ErrorCodes::LockBusy, lockStatus.getStatus());
+ ASSERT_EQ(ErrorCodes::LockBusy,
+ DistLockManager::get(operationContext())
+ ->tryLockDirectWithLocalWriteConcern(operationContext(), lockName, whyMsg));
}
-} // unnamed namespace
+} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 5bcdcaff9b9..20050bddd72 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -82,7 +82,7 @@ void mergeChunks(OperationContext* opCtx,
<< redact(minKey) << " to " << redact(maxKey);
auto scopedDistLock = uassertStatusOKWithContext(
DistLockManager::get(opCtx)->lock(
- opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout),
+ opCtx, nss.ns(), whyMessage, DistLockManager::kDefaultLockTimeout),
str::stream() << "could not acquire collection lock for " << nss.ns()
<< " to merge chunks in [" << redact(minKey) << ", " << redact(maxKey)
<< ")");
diff --git a/src/mongo/db/s/shard_collection_legacy.h b/src/mongo/db/s/shard_collection_legacy.h
index e9ab1034cbc..2cf109feabf 100644
--- a/src/mongo/db/s/shard_collection_legacy.h
+++ b/src/mongo/db/s/shard_collection_legacy.h
@@ -39,4 +39,4 @@ CreateCollectionResponse shardCollectionLegacy(OperationContext* opCtx,
const BSONObj& cmdObj,
bool mustTakeDistLock);
-} // namespace mongo \ No newline at end of file
+} // namespace mongo
diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.cpp b/src/mongo/db/s/sharding_mongod_test_fixture.cpp
index b31b0e07d86..7de0b0b5825 100644
--- a/src/mongo/db/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/db/s/sharding_mongod_test_fixture.cpp
@@ -220,28 +220,24 @@ std::unique_ptr<ShardRegistry> ShardingMongodTestFixture::makeShardRegistry(
std::unique_ptr<DistLockManager> ShardingMongodTestFixture::makeDistLockManager() {
class DistLockManagerNoop : public DistLockManager {
public:
+ DistLockManagerNoop() : DistLockManager(OID::gen()) {}
void startUp() override {}
void shutDown(OperationContext* opCtx) {}
std::string getProcessID() override {
return "DistLockManagerNoop";
}
- StatusWith<DistLockHandle> lockWithSessionID(OperationContext* opCtx,
- StringData name,
- StringData whyMessage,
- const OID& lockSessionID,
- Milliseconds waitFor) override {
- return DistLockHandle::gen();
+ Status lockDirect(OperationContext* opCtx,
+ StringData name,
+ StringData whyMessage,
+ Milliseconds waitFor) override {
+ return Status::OK();
}
- StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* opCtx,
- StringData name,
- StringData whyMessage,
- const OID& lockSessionID) override {
- return DistLockHandle::gen();
+ Status tryLockDirectWithLocalWriteConcern(OperationContext* opCtx,
+ StringData name,
+ StringData whyMessage) override {
+ return Status::OK();
}
- void unlock(Interruptible* intr, const DistLockHandle& lockHandle) override {}
- void unlock(Interruptible* intr,
- const DistLockHandle& lockHandle,
- StringData name) override {}
+ void unlock(Interruptible* intr, StringData name) override {}
void unlockAll(OperationContext* opCtx) override {}
};
return std::make_unique<DistLockManagerNoop>();
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection_command.cpp
index b6c6e659135..b6c6e659135 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection_command.cpp
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 2c26ba02add..00f080c75fa 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -53,86 +53,6 @@
namespace mongo {
namespace {
-void createCollection(OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardCollection& shardCollRequest,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) {
- ShardsvrCreateCollection shardsvrCollRequest(nss);
- shardsvrCollRequest.setShardKey(shardCollRequest.getKey());
- shardsvrCollRequest.setUnique(shardCollRequest.getUnique());
- shardsvrCollRequest.setNumInitialChunks(shardCollRequest.getNumInitialChunks());
- shardsvrCollRequest.setPresplitHashedZones(shardCollRequest.getPresplitHashedZones());
- shardsvrCollRequest.setCollation(shardCollRequest.getCollation());
- shardsvrCollRequest.setDbName(nss.db());
-
- auto catalogCache = Grid::get(opCtx)->catalogCache();
- const auto dbInfo = uassertStatusOK(catalogCache->getDatabase(opCtx, nss.db()));
-
- ShardId shardId;
- if (nss.db() == NamespaceString::kConfigDb) {
- const auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
- uassert(ErrorCodes::IllegalOperation, "there are no shards to target", !shardIds.empty());
- shardId = shardIds[0];
- } else {
- shardId = dbInfo.primaryId();
- }
-
- auto shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
-
- auto cmdResponse = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- nss.db().toString(),
- CommandHelpers::appendMajorityWriteConcern(
- CommandHelpers::appendGenericCommandArgs(cmdObj, shardsvrCollRequest.toBSON({})),
- opCtx->getWriteConcern()),
- Shard::RetryPolicy::kIdempotent));
-
- uassertStatusOK(cmdResponse.commandStatus);
-
- CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
-
- result.append("collectionsharded", nss.toString());
-
- auto createCollResp = CreateCollectionResponse::parse(IDLParserErrorContext("createCollection"),
- cmdResponse.response);
- catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection(
- nss, createCollResp.getCollectionVersion(), dbInfo.primaryId());
-}
-
-void shardCollection(OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardCollection& shardCollRequest,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) {
- ConfigsvrShardCollectionRequest configShardCollRequest;
- configShardCollRequest.set_configsvrShardCollection(nss);
- configShardCollRequest.setKey(shardCollRequest.getKey());
- configShardCollRequest.setUnique(shardCollRequest.getUnique());
- configShardCollRequest.setNumInitialChunks(shardCollRequest.getNumInitialChunks());
- configShardCollRequest.setPresplitHashedZones(shardCollRequest.getPresplitHashedZones());
- configShardCollRequest.setCollation(shardCollRequest.getCollation());
-
- // Invalidate the routing table cache entry for this collection so that we reload the
- // collection the next time it's accessed, even if we receive a failure, e.g. NetworkError.
- ON_BLOCK_EXIT([opCtx, nss] {
- Grid::get(opCtx)->catalogCache()->invalidateCollectionEntry_LINEARIZABLE(nss);
- });
-
- auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- auto cmdResponse = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- "admin",
- CommandHelpers::appendMajorityWriteConcern(
- CommandHelpers::appendGenericCommandArgs(cmdObj, configShardCollRequest.toBSON()),
- opCtx->getWriteConcern()),
- Shard::RetryPolicy::kIdempotent));
-
- CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
-}
-
class ShardCollectionCmd : public BasicCommand {
public:
ShardCollectionCmd() : BasicCommand("shardCollection", "shardcollection") {}
@@ -179,13 +99,48 @@ public:
auto shardCollRequest =
ShardCollection::parse(IDLParserErrorContext("ShardCollection"), cmdObj);
- if (feature_flags::gShardingFullDDLSupportDistLocksOnStepDown.isEnabled(
- serverGlobalParams.featureCompatibility)) {
- createCollection(opCtx, nss, shardCollRequest, cmdObj, result);
+ ShardsvrCreateCollection shardsvrCollRequest(nss);
+ shardsvrCollRequest.setShardKey(shardCollRequest.getKey());
+ shardsvrCollRequest.setUnique(shardCollRequest.getUnique());
+ shardsvrCollRequest.setNumInitialChunks(shardCollRequest.getNumInitialChunks());
+ shardsvrCollRequest.setPresplitHashedZones(shardCollRequest.getPresplitHashedZones());
+ shardsvrCollRequest.setCollation(shardCollRequest.getCollation());
+ shardsvrCollRequest.setDbName(nss.db());
+
+ auto catalogCache = Grid::get(opCtx)->catalogCache();
+ const auto dbInfo = uassertStatusOK(catalogCache->getDatabase(opCtx, nss.db()));
+
+ ShardId shardId;
+ if (nss.db() == NamespaceString::kConfigDb) {
+ const auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
+ uassert(
+ ErrorCodes::IllegalOperation, "there are no shards to target", !shardIds.empty());
+ shardId = shardIds[0];
} else {
- shardCollection(opCtx, nss, shardCollRequest, cmdObj, result);
+ shardId = dbInfo.primaryId();
}
+ auto shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
+
+ auto cmdResponse = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly),
+ nss.db().toString(),
+ CommandHelpers::appendMajorityWriteConcern(
+ CommandHelpers::appendGenericCommandArgs(cmdObj, shardsvrCollRequest.toBSON({})),
+ opCtx->getWriteConcern()),
+ Shard::RetryPolicy::kIdempotent));
+ uassertStatusOK(cmdResponse.commandStatus);
+
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
+ result.append("collectionsharded", nss.toString());
+
+ auto createCollResp = CreateCollectionResponse::parse(
+ IDLParserErrorContext("createCollection"), cmdResponse.response);
+
+ catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection(
+ nss, createCollResp.getCollectionVersion(), dbInfo.primaryId());
+
return true;
}
diff --git a/src/mongo/s/sharded_collections_ddl_parameters.idl b/src/mongo/s/sharded_collections_ddl_parameters.idl
index 18b5b8251cb..cf35479884b 100644
--- a/src/mongo/s/sharded_collections_ddl_parameters.idl
+++ b/src/mongo/s/sharded_collections_ddl_parameters.idl
@@ -41,13 +41,7 @@ feature_flags:
cpp_varname: gDisableIncompleteShardingDDLSupport
default: false
- shardingFullDDLSupportDistLocksOnStepDown:
- description: "Once implemented allow usage of Distributed locks on shards."
- cpp_varname: gShardingFullDDLSupportDistLocksOnStepDown
- default: false
-
featureFlagShardingFullDDLSupportTimestampedVersion:
description: "Enables the usage of timestamps in Database/Chunk versions."
cpp_varname: gShardingFullDDLSupportTimestampedVersion
default: false
- #version: 4.9