summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime_noop_write.js5
-rw-r--r--src/mongo/client/remote_command_targeter_factory.h6
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/auth/SConscript1
-rw-r--r--src/mongo/db/key_generator_update_test.cpp10
-rw-r--r--src/mongo/db/keys_collection_cache_test.cpp1
-rw-r--r--src/mongo/db/keys_collection_manager_sharding_test.cpp14
-rw-r--r--src/mongo/db/logical_time_validator_test.cpp10
-rw-r--r--src/mongo/db/repl/SConscript1
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp3
-rw-r--r--src/mongo/db/s/README.md4
-rw-r--r--src/mongo/db/s/SConscript16
-rw-r--r--src/mongo/db/s/add_shard_cmd.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp81
-rw-r--r--src/mongo/db/s/balancer/migration_manager.h1
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp18
-rw-r--r--src/mongo/db/s/balancer/migration_test_fixture.h1
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request_test.cpp1
-rw-r--r--src/mongo/db/s/clone_catalog_data_command.cpp4
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.cpp20
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.h8
-rw-r--r--src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp6
-rw-r--r--src/mongo/db/s/config/configsvr_create_database_command.cpp6
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp8
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp10
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp11
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp18
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp5
-rw-r--r--src/mongo/db/s/config/initial_split_policy.h53
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp34
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp1
-rw-r--r--src/mongo/db/s/dist_lock_catalog.cpp (renamed from src/mongo/s/catalog/dist_lock_catalog.cpp)2
-rw-r--r--src/mongo/db/s/dist_lock_catalog.h (renamed from src/mongo/s/catalog/dist_lock_catalog.h)0
-rw-r--r--src/mongo/db/s/dist_lock_catalog_mock.cpp (renamed from src/mongo/s/catalog/dist_lock_catalog_mock.cpp)6
-rw-r--r--src/mongo/db/s/dist_lock_catalog_mock.h (renamed from src/mongo/s/catalog/dist_lock_catalog_mock.h)4
-rw-r--r--src/mongo/db/s/dist_lock_catalog_replset.cpp (renamed from src/mongo/s/catalog/dist_lock_catalog_impl.cpp)2
-rw-r--r--src/mongo/db/s/dist_lock_catalog_replset.h (renamed from src/mongo/s/catalog/dist_lock_catalog_impl.h)2
-rw-r--r--src/mongo/db/s/dist_lock_catalog_replset_test.cpp (renamed from src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp)470
-rw-r--r--src/mongo/db/s/dist_lock_manager.cpp (renamed from src/mongo/s/catalog/dist_lock_manager.cpp)33
-rw-r--r--src/mongo/db/s/dist_lock_manager.h (renamed from src/mongo/s/catalog/dist_lock_manager.h)13
-rw-r--r--src/mongo/db/s/dist_lock_manager_mock.cpp (renamed from src/mongo/s/catalog/dist_lock_manager_mock.cpp)13
-rw-r--r--src/mongo/db/s/dist_lock_manager_mock.h (renamed from src/mongo/s/catalog/dist_lock_manager_mock.h)17
-rw-r--r--src/mongo/db/s/dist_lock_manager_replset.cpp (renamed from src/mongo/s/catalog/replset_dist_lock_manager.cpp)35
-rw-r--r--src/mongo/db/s/dist_lock_manager_replset.h (renamed from src/mongo/s/catalog/replset_dist_lock_manager.h)44
-rw-r--r--src/mongo/db/s/dist_lock_manager_replset_test.cpp (renamed from src/mongo/s/catalog/replset_dist_lock_manager_test.cpp)267
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp3
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp5
-rw-r--r--src/mongo/db/s/migration_util_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service_test.cpp12
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp1
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp6
-rw-r--r--src/mongo/db/s/resharding_destined_recipient_test.cpp6
-rw-r--r--src/mongo/db/s/resharding_util.cpp1
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination_test.cpp5
-rw-r--r--src/mongo/db/s/shard_collection_legacy.cpp7
-rw-r--r--src/mongo/db/s/shard_server_test_fixture.cpp18
-rw-r--r--src/mongo/db/s/shard_server_test_fixture.h17
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp131
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.h12
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod_test.cpp18
-rw-r--r--src/mongo/db/s/sharding_initialization_op_observer_test.cpp1
-rw-r--r--src/mongo/db/s/sharding_mongod_test_fixture.cpp34
-rw-r--r--src/mongo/db/s/sharding_mongod_test_fixture.h21
-rw-r--r--src/mongo/db/s/shardsvr_drop_collection_command.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_drop_database_command.cpp1
-rw-r--r--src/mongo/db/s/shardsvr_refine_collection_shard_key_command.cpp1
-rw-r--r--src/mongo/db/s/split_chunk.cpp3
-rw-r--r--src/mongo/db/s/split_chunk_test.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util_test.cpp5
-rw-r--r--src/mongo/db/s/transaction_coordinator_test_fixture.cpp7
-rw-r--r--src/mongo/db/s/transaction_coordinator_test_fixture.h3
-rw-r--r--src/mongo/db/s/vector_clock_config_server_test.cpp8
-rw-r--r--src/mongo/s/SConscript18
-rw-r--r--src/mongo/s/append_raw_responses_test.cpp10
-rw-r--r--src/mongo/s/catalog/SConscript100
-rw-r--r--src/mongo/s/catalog/dist_lock_ping_info.cpp45
-rw-r--r--src/mongo/s/catalog/dist_lock_ping_info.h67
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h26
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp31
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h35
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp24
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h10
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_test.cpp17
-rw-r--r--src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp1
-rw-r--r--src/mongo/s/mongos_main.cpp5
-rw-r--r--src/mongo/s/sharding_initialization.cpp21
-rw-r--r--src/mongo/s/sharding_router_test_fixture.cpp19
-rw-r--r--src/mongo/s/sharding_router_test_fixture.h4
-rw-r--r--src/mongo/s/sharding_test_fixture_common.cpp4
-rw-r--r--src/mongo/s/sharding_test_fixture_common.h36
96 files changed, 831 insertions, 1286 deletions
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
index 417dca14a61..968b43b773d 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
@@ -39,10 +39,7 @@ const shardingUptimeFailpointName = jsTestOptions().mongosBinVersion == 'last-lt
? "failpoint.disableShardingUptimeReporterPeriodicThread"
: "failpoint.disableShardingUptimeReporting";
const mongosFailpointParams = {
- setParameter: {
- "failpoint.disableReplSetDistLockManager": "{mode: 'alwaysOn'}",
- [shardingUptimeFailpointName]: "{mode: 'alwaysOn'}"
- }
+ setParameter: {[shardingUptimeFailpointName]: "{mode: 'alwaysOn'}"}
};
const st = new ShardingTest({
diff --git a/src/mongo/client/remote_command_targeter_factory.h b/src/mongo/client/remote_command_targeter_factory.h
index f8cc15ec02e..4518b9abec4 100644
--- a/src/mongo/client/remote_command_targeter_factory.h
+++ b/src/mongo/client/remote_command_targeter_factory.h
@@ -29,14 +29,10 @@
#pragma once
-#include <memory>
-
+#include "mongo/client/remote_command_targeter.h"
namespace mongo {
-class ConnectionString;
-class RemoteCommandTargeter;
-
/**
* Constructs RemoteCommandTargeters based on the specific type of the target (standalone,
* replica set, etc).
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index e1b65d58e63..6b70018a274 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -2401,7 +2401,6 @@ envWithAsio.CppUnitTest(
'logical_time_validator_test.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
'auth/authmocks',
's/config_server_test_fixture',
'vector_clock',
diff --git a/src/mongo/db/auth/SConscript b/src/mongo/db/auth/SConscript
index 01817351d97..39ed7017f5b 100644
--- a/src/mongo/db/auth/SConscript
+++ b/src/mongo/db/auth/SConscript
@@ -334,7 +334,6 @@ env.Library(
'user_cache_invalidator_job_parameters.idl',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager',
'$BUILD_DIR/mongo/s/coreshard',
'authservercommon',
],
diff --git a/src/mongo/db/key_generator_update_test.cpp b/src/mongo/db/key_generator_update_test.cpp
index d0ed2cace3f..9590d3fda9a 100644
--- a/src/mongo/db/key_generator_update_test.cpp
+++ b/src/mongo/db/key_generator_update_test.cpp
@@ -39,7 +39,6 @@
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/vector_clock_mutable.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/fail_point.h"
@@ -62,15 +61,6 @@ protected:
return _catalogClient.get();
}
- /**
- * Intentionally create a DistLockManagerMock, even though this is a config serfver test in
- * order to avoid the lock pinger thread from executing and accessing uninitialized state.
- */
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- return std::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
- }
-
private:
std::unique_ptr<KeysCollectionClient> _catalogClient;
};
diff --git a/src/mongo/db/keys_collection_cache_test.cpp b/src/mongo/db/keys_collection_cache_test.cpp
index add4a06e40b..51ada59e365 100644
--- a/src/mongo/db/keys_collection_cache_test.cpp
+++ b/src/mongo/db/keys_collection_cache_test.cpp
@@ -35,7 +35,6 @@
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/grid.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
diff --git a/src/mongo/db/keys_collection_manager_sharding_test.cpp b/src/mongo/db/keys_collection_manager_sharding_test.cpp
index ced4613ef26..fb2cf3d98e0 100644
--- a/src/mongo/db/keys_collection_manager_sharding_test.cpp
+++ b/src/mongo/db/keys_collection_manager_sharding_test.cpp
@@ -29,17 +29,12 @@
#include "mongo/platform/basic.h"
-#include <memory>
-#include <set>
-#include <string>
-
#include "mongo/db/jsobj.h"
#include "mongo/db/keys_collection_client_sharded.h"
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/keys_collection_manager.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/vector_clock_mutable.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/fail_point.h"
@@ -75,15 +70,6 @@ protected:
ConfigServerTestFixture::tearDown();
}
- /**
- * Intentionally create a DistLockManagerMock, even though this is a config serfver test in
- * order to avoid the lock pinger thread from executing and accessing uninitialized state.
- */
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- return std::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
- }
-
private:
std::unique_ptr<KeysCollectionManager> _keyManager;
};
diff --git a/src/mongo/db/logical_time_validator_test.cpp b/src/mongo/db/logical_time_validator_test.cpp
index faddd8aa802..cad15fb328a 100644
--- a/src/mongo/db/logical_time_validator_test.cpp
+++ b/src/mongo/db/logical_time_validator_test.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/signed_logical_time.h"
#include "mongo/db/time_proof_service.h"
#include "mongo/db/vector_clock_mutable.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
@@ -78,15 +77,6 @@ protected:
}
/**
- * Intentionally create a DistLockManagerMock, even though this is a config serfver test in
- * order to avoid the lock pinger thread from executing and accessing uninitialized state.
- */
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- return std::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
- }
-
- /**
* Forces KeyManager to refresh cache and generate new keys.
*/
void refreshKeyManager() {
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 6459f563fb7..e3e5c2a4e11 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -1389,7 +1389,6 @@ env.Library(
'$BUILD_DIR/mongo/db/query_exec',
'$BUILD_DIR/mongo/db/repl/oplog_buffer_proxy',
'$BUILD_DIR/mongo/db/repl/replication_metrics',
- '$BUILD_DIR/mongo/db/s/sharding_catalog_manager',
'$BUILD_DIR/mongo/db/s/sharding_runtime_d',
'$BUILD_DIR/mongo/db/service_context',
'$BUILD_DIR/mongo/db/stats/counters',
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 1c883e80166..e7d8355a7db 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -78,6 +78,7 @@
#include "mongo/db/s/balancer/balancer.h"
#include "mongo/db/s/chunk_splitter.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/migration_util.h"
#include "mongo/db/s/periodic_balancer_config_refresher.h"
#include "mongo/db/s/periodic_sharded_index_consistency_checker.h"
@@ -862,7 +863,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
}
// Free any leftover locks from previous instantiations.
- auto distLockManager = Grid::get(opCtx)->catalogClient()->getDistLockManager();
+ auto distLockManager = DistLockManager::get(opCtx);
distLockManager->unlockAll(opCtx, distLockManager->getProcessID());
if (auto validator = LogicalTimeValidator::get(_service)) {
diff --git a/src/mongo/db/s/README.md b/src/mongo/db/s/README.md
index 0f906fee8d7..1c8712233c8 100644
--- a/src/mongo/db/s/README.md
+++ b/src/mongo/db/s/README.md
@@ -521,8 +521,8 @@ collections, such as config.shards, config.chunks, and config.tags. For example,
mergeChunks, and moveChunk all take the chunk ResourceMutex.
#### Code references
-* [**DistLockManager class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/catalog/dist_lock_manager.h)
-* [**DistLockCatalog class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/catalog/dist_lock_catalog.h)
+* [**DistLockManager class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/dist_lock_manager.h)
+* [**DistLockCatalog class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/dist_lock_catalog.h)
* [**NamespaceSerializer class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/namespace_serializer.h)
* The interface for acquiring NamespaceSerializer locks
[**via the ShardingCatalogManager**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager.h#L276)
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 83d1e7488ee..2cbb4cc140e 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -263,6 +263,10 @@ env.Library(
'config/sharding_catalog_manager_shard_operations.cpp',
'config/sharding_catalog_manager_zone_operations.cpp',
'config/sharding_catalog_manager.cpp',
+ 'dist_lock_catalog_replset.cpp',
+ 'dist_lock_catalog.cpp',
+ 'dist_lock_manager_replset.cpp',
+ 'dist_lock_manager.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/catalog_raii',
@@ -289,7 +293,6 @@ env.Library(
'$BUILD_DIR/mongo/db/repl/replica_set_aware_service',
'$BUILD_DIR/mongo/db/snapshot_window_options',
'$BUILD_DIR/mongo/db/vector_clock_mongod',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager',
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl',
'$BUILD_DIR/mongo/util/log_and_backoff',
'$BUILD_DIR/mongo/util/options_parser/options_parser',
@@ -366,7 +369,6 @@ env.Library(
'$BUILD_DIR/mongo/s/sharding_initialization',
'$BUILD_DIR/mongo/s/sharding_router_api',
'resharding_util',
- 'sharding_catalog_manager',
'sharding_runtime_d',
],
)
@@ -400,6 +402,8 @@ env.Library(
env.Library(
target='sharding_mongod_test_fixture',
source=[
+ 'dist_lock_catalog_mock.cpp',
+ 'dist_lock_manager_mock.cpp',
'sharding_mongod_test_fixture.cpp',
],
LIBDEPS=[
@@ -417,8 +421,6 @@ env.Library(
'shard_server_test_fixture.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_mock',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
'sharding_mongod_test_fixture',
],
)
@@ -429,7 +431,6 @@ env.Library(
'config/config_server_test_fixture.cpp',
],
LIBDEPS=[
- 'sharding_catalog_manager',
'sharding_mongod_test_fixture',
],
)
@@ -444,6 +445,8 @@ env.CppUnitTest(
'collection_metadata_filtering_test.cpp',
'collection_metadata_test.cpp',
'collection_sharding_runtime_test.cpp',
+ 'dist_lock_catalog_replset_test.cpp',
+ 'dist_lock_manager_replset_test.cpp',
'metadata_manager_test.cpp',
'migration_chunk_cloner_source_legacy_test.cpp',
'migration_destination_manager_test.cpp',
@@ -501,7 +504,6 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/repl/storage_interface_impl',
'$BUILD_DIR/mongo/db/repl/wait_for_majority_service',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_mock',
'$BUILD_DIR/mongo/s/sharding_router_test_fixture',
'resharding_util',
@@ -554,10 +556,8 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/pipeline/document_source_mock',
'$BUILD_DIR/mongo/db/read_write_concern_defaults_mock',
'$BUILD_DIR/mongo/db/repl/replication_info',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
'$BUILD_DIR/mongo/util/version_impl',
'config_server_test_fixture',
'resharding_util',
- 'sharding_catalog_manager',
],
)
diff --git a/src/mongo/db/s/add_shard_cmd.cpp b/src/mongo/db/s/add_shard_cmd.cpp
index ab4c58a0272..38b39b7bd47 100644
--- a/src/mongo/db/s/add_shard_cmd.cpp
+++ b/src/mongo/db/s/add_shard_cmd.cpp
@@ -39,13 +39,13 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/s/add_shard_cmd_gen.h"
#include "mongo/db/s/add_shard_util.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/grid.h"
namespace mongo {
namespace {
+
/**
* Internal sharding command run on mongod to initialize itself as a shard in the cluster.
*/
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index f20edc35e43..49b2dcd90c6 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -33,8 +33,6 @@
#include "mongo/db/s/balancer/migration_manager.h"
-#include <memory>
-
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/remote_command_targeter.h"
@@ -42,6 +40,7 @@
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/balancer/scoped_migration_request.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -53,14 +52,10 @@
#include "mongo/util/scopeguard.h"
namespace mongo {
+namespace {
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
-using std::shared_ptr;
-using std::vector;
-using str::stream;
-
-namespace {
const char kChunkTooBig[] = "chunkTooBig"; // TODO: delete in 3.8
@@ -86,7 +81,6 @@ Status extractMigrationStatusFromCommandResponse(const BSONObj& commandResponse)
return commandStatus;
}
-
/**
* Returns whether the specified status is an error caused by stepdown of the primary config node
* currently running the balancer.
@@ -109,7 +103,7 @@ MigrationManager::~MigrationManager() {
MigrationStatuses MigrationManager::executeMigrationsForAutoBalance(
OperationContext* opCtx,
- const vector<MigrateInfo>& migrateInfos,
+ const std::vector<MigrateInfo>& migrateInfos,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
bool waitForDelete) {
@@ -117,7 +111,8 @@ MigrationStatuses MigrationManager::executeMigrationsForAutoBalance(
MigrationStatuses migrationStatuses;
ScopedMigrationRequestsMap scopedMigrationRequests;
- vector<std::pair<shared_ptr<Notification<RemoteCommandResponse>>, MigrateInfo>> responses;
+ std::vector<std::pair<std::shared_ptr<Notification<RemoteCommandResponse>>, MigrateInfo>>
+ responses;
for (const auto& migrateInfo : migrateInfos) {
responses.emplace_back(_schedule(opCtx,
@@ -229,8 +224,6 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
_abandonActiveMigrationsAndEnableManager(opCtx);
});
- auto distLockManager = Grid::get(opCtx)->catalogClient()->getDistLockManager();
-
// Load the active migrations from the config.migrations collection.
auto statusWithMigrationsQueryResponse =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
@@ -273,11 +266,12 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
it = _migrationRecoveryMap.insert(std::make_pair(migrateType.getNss(), list)).first;
// Reacquire the matching distributed lock for this namespace.
- const std::string whyMessage(stream() << "Migrating chunk(s) in collection "
- << migrateType.getNss().ns());
+ const std::string whyMessage(str::stream() << "Migrating chunk(s) in collection "
+ << migrateType.getNss().ns());
- auto statusWithDistLockHandle = distLockManager->tryLockWithLocalWriteConcern(
- opCtx, migrateType.getNss().ns(), whyMessage, _lockSessionID);
+ auto statusWithDistLockHandle =
+ DistLockManager::get(opCtx)->tryLockWithLocalWriteConcern(
+ opCtx, migrateType.getNss().ns(), whyMessage, _lockSessionID);
if (!statusWithDistLockHandle.isOK()) {
LOGV2(21898,
"Failed to acquire distributed lock for collection {namespace} "
@@ -322,8 +316,8 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
});
// Schedule recovered migrations.
- vector<ScopedMigrationRequest> scopedMigrationRequests;
- vector<shared_ptr<Notification<RemoteCommandResponse>>> responses;
+ std::vector<ScopedMigrationRequest> scopedMigrationRequests;
+ std::vector<std::shared_ptr<Notification<RemoteCommandResponse>>> responses;
for (auto& nssAndMigrateInfos : _migrationRecoveryMap) {
auto& nss = nssAndMigrateInfos.first;
@@ -379,8 +373,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
// If no migrations were scheduled for this namespace, free the dist lock
if (!scheduledMigrations) {
- Grid::get(opCtx)->catalogClient()->getDistLockManager()->unlock(
- opCtx, _lockSessionID, nss.ns());
+ DistLockManager::get(opCtx)->unlock(opCtx, _lockSessionID, nss.ns());
}
}
@@ -432,7 +425,7 @@ void MigrationManager::drainActiveMigrations() {
_state = State::kStopped;
}
-shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
+std::shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
OperationContext* opCtx,
const MigrateInfo& migrateInfo,
uint64_t maxChunkSizeBytes,
@@ -524,25 +517,25 @@ void MigrationManager::_schedule(WithLock lock,
auto it = _activeMigrations.find(nss);
if (it == _activeMigrations.end()) {
- const std::string whyMessage(stream() << "Migrating chunk(s) in collection " << nss.ns());
+ const std::string whyMessage(str::stream()
+ << "Migrating chunk(s) in collection " << nss.ns());
// Acquire the NamespaceSerializer lock for this nss (blocking call)
auto scopedCollLock =
ShardingCatalogManager::get(opCtx)->serializeCreateOrDropCollection(opCtx, nss);
// Acquire the collection distributed lock (blocking call)
- auto statusWithDistLockHandle =
- Grid::get(opCtx)->catalogClient()->getDistLockManager()->lockWithSessionID(
- opCtx,
- nss.ns(),
- whyMessage,
- _lockSessionID,
- DistLockManager::kSingleLockAttemptTimeout);
+ auto statusWithDistLockHandle = DistLockManager::get(opCtx)->lockWithSessionID(
+ opCtx,
+ nss.ns(),
+ whyMessage,
+ _lockSessionID,
+ DistLockManager::kSingleLockAttemptTimeout);
if (!statusWithDistLockHandle.isOK()) {
migration.completionNotification->set(statusWithDistLockHandle.getStatus().withContext(
- stream() << "Could not acquire collection lock for " << nss.ns()
- << " to migrate chunks"));
+ str::stream() << "Could not acquire collection lock for " << nss.ns()
+ << " to migrate chunks"));
return;
}
@@ -618,8 +611,7 @@ void MigrationManager::_complete(WithLock lock,
migrations->erase(itMigration);
if (migrations->empty()) {
- Grid::get(opCtx)->catalogClient()->getDistLockManager()->unlock(
- opCtx, _lockSessionID, nss.ns());
+ DistLockManager::get(opCtx)->unlock(opCtx, _lockSessionID, nss.ns());
_activeMigrations.erase(it);
_checkDrained(lock);
}
@@ -651,15 +643,14 @@ void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext
}
invariant(_state == State::kRecovering);
- auto catalogClient = Grid::get(opCtx)->catalogClient();
-
// Unlock all balancer distlocks we aren't using anymore.
- auto distLockManager = catalogClient->getDistLockManager();
+ auto distLockManager = DistLockManager::get(opCtx);
distLockManager->unlockAll(opCtx, distLockManager->getProcessID());
// Clear the config.migrations collection so that those chunks can be scheduled for migration
// again.
- catalogClient
+ Grid::get(opCtx)
+ ->catalogClient()
->removeConfigDocuments(
opCtx, MigrationType::ConfigNS, BSONObj(), ShardingCatalogClient::kLocalWriteConcern)
.transitional_ignore();
@@ -680,8 +671,8 @@ Status MigrationManager::_processRemoteCommandResponse(
_state != State::kEnabled && _state != State::kRecovering)) {
scopedMigrationRequest->keepDocumentOnDestruct();
return {ErrorCodes::BalancerInterrupted,
- stream() << "Migration interrupted because the balancer is stopping."
- << " Command status: " << remoteCommandResponse.status.toString()};
+ str::stream() << "Migration interrupted because the balancer is stopping."
+ << " Command status: " << remoteCommandResponse.status.toString()};
}
if (!remoteCommandResponse.isOK()) {
@@ -693,8 +684,8 @@ Status MigrationManager::_processRemoteCommandResponse(
if (!Shard::shouldErrorBePropagated(commandStatus.code())) {
commandStatus = {ErrorCodes::OperationFailed,
- stream() << "moveChunk command failed on source shard."
- << causedBy(commandStatus)};
+ str::stream() << "moveChunk command failed on source shard."
+ << causedBy(commandStatus)};
}
// Any failure to remove the migration document should be because the config server is
@@ -705,10 +696,10 @@ Status MigrationManager::_processRemoteCommandResponse(
if (!status.isOK()) {
commandStatus = {
ErrorCodes::BalancerInterrupted,
- stream() << "Migration interrupted because the balancer is stopping"
- << " and failed to remove the config.migrations document."
- << " Command status: "
- << (commandStatus.isOK() ? status.toString() : commandStatus.toString())};
+ str::stream() << "Migration interrupted because the balancer is stopping"
+ << " and failed to remove the config.migrations document."
+ << " Command status: "
+ << (commandStatus.isOK() ? status.toString() : commandStatus.toString())};
}
return commandStatus;
diff --git a/src/mongo/db/s/balancer/migration_manager.h b/src/mongo/db/s/balancer/migration_manager.h
index 1f0f3511198..fd5db687100 100644
--- a/src/mongo/db/s/balancer/migration_manager.h
+++ b/src/mongo/db/s/balancer/migration_manager.h
@@ -40,7 +40,6 @@
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/executor/task_executor.h"
#include "mongo/platform/mutex.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/unordered_map.h"
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 4948ac24e48..961036e6f92 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -29,14 +29,12 @@
#include "mongo/platform/basic.h"
-#include <memory>
-
#include "mongo/db/commands.h"
#include "mongo/db/read_write_concern_defaults.h"
#include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h"
#include "mongo/db/s/balancer/migration_manager.h"
#include "mongo/db/s/balancer/migration_test_fixture.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/s/request_types/move_chunk_request.h"
namespace mongo {
@@ -521,7 +519,7 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
setUpMigration(chunk2, kShardId3.toString());
// Mimic all config distlocks being released on config server stepup to primary.
- auto distLockManager = catalogClient()->getDistLockManager();
+ auto distLockManager = DistLockManager::get(operationContext());
distLockManager->unlockAll(operationContext(), distLockManager->getProcessID());
_migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
@@ -588,12 +586,12 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
// Take the distributed lock for the collection, which should be released during recovery when
// it fails. Any dist lock held by the config server will be released via proccessId, so the
// session ID used here doesn't matter.
- ASSERT_OK(catalogClient()->getDistLockManager()->lockWithSessionID(
- operationContext(),
- collName.ns(),
- "MigrationManagerTest",
- OID::gen(),
- DistLockManager::kSingleLockAttemptTimeout));
+ ASSERT_OK(DistLockManager::get(operationContext())
+ ->lockWithSessionID(operationContext(),
+ collName.ns(),
+ "MigrationManagerTest",
+ OID::gen(),
+ DistLockManager::kSingleLockAttemptTimeout));
_migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
_migrationManager->finishRecovery(operationContext(), 0, kDefaultSecondaryThrottle);
diff --git a/src/mongo/db/s/balancer/migration_test_fixture.h b/src/mongo/db/s/balancer/migration_test_fixture.h
index d2fef72b513..2e6dc250e1e 100644
--- a/src/mongo/db/s/balancer/migration_test_fixture.h
+++ b/src/mongo/db/s/balancer/migration_test_fixture.h
@@ -37,7 +37,6 @@
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/write_concern_options.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_locks.h"
diff --git a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
index 75442f5c09a..3731321ad2d 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/s/balancer/scoped_migration_request.h"
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
diff --git a/src/mongo/db/s/clone_catalog_data_command.cpp b/src/mongo/db/s/clone_catalog_data_command.cpp
index a10c599d499..a182aaeaa7f 100644
--- a/src/mongo/db/s/clone_catalog_data_command.cpp
+++ b/src/mongo/db/s/clone_catalog_data_command.cpp
@@ -35,8 +35,8 @@
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/cloner.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/grid.h"
@@ -136,7 +136,7 @@ public:
return true;
}
-} CloneCatalogDataCmd;
+} cloneCatalogDataCmd;
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp
index ab331cb2f4c..b8a21753d7e 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.cpp
+++ b/src/mongo/db/s/config/config_server_test_fixture.cpp
@@ -51,13 +51,13 @@
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_catalog_replset.h"
+#include "mongo/db/s/dist_lock_manager_replset.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/tracking_metadata.h"
#include "mongo/s/balancer_configuration.h"
-#include "mongo/s/catalog/dist_lock_catalog_impl.h"
-#include "mongo/s/catalog/replset_dist_lock_manager.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
@@ -161,25 +161,17 @@ void ConfigServerTestFixture::tearDown() {
ShardingMongodTestFixture::tearDown();
}
-std::unique_ptr<DistLockCatalog> ConfigServerTestFixture::makeDistLockCatalog() {
- return std::make_unique<DistLockCatalogImpl>();
-}
-
-std::unique_ptr<DistLockManager> ConfigServerTestFixture::makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) {
- invariant(distLockCatalog);
+std::unique_ptr<DistLockManager> ConfigServerTestFixture::makeDistLockManager() {
return std::make_unique<ReplSetDistLockManager>(
getServiceContext(),
"distLockProcessId",
- std::move(distLockCatalog),
+ std::make_unique<DistLockCatalogImpl>(),
ReplSetDistLockManager::kDistLockPingInterval,
ReplSetDistLockManager::kDistLockExpirationTime);
}
-std::unique_ptr<ShardingCatalogClient> ConfigServerTestFixture::makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
- invariant(distLockManager);
- return std::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
+std::unique_ptr<ShardingCatalogClient> ConfigServerTestFixture::makeShardingCatalogClient() {
+ return std::make_unique<ShardingCatalogClientImpl>();
}
std::unique_ptr<BalancerConfiguration> ConfigServerTestFixture::makeBalancerConfiguration() {
diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h
index b5264d9131c..3e82cd8fcc3 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.h
+++ b/src/mongo/db/s/config/config_server_test_fixture.h
@@ -167,13 +167,9 @@ protected:
*/
void setUpAndInitializeConfigDb();
- std::unique_ptr<DistLockCatalog> makeDistLockCatalog() override;
+ std::unique_ptr<DistLockManager> makeDistLockManager() override;
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override;
-
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override;
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override;
std::unique_ptr<ClusterCursorManager> makeClusterCursorManager() override;
diff --git a/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp b/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp
index ed955b9f209..c25d4dc9bdf 100644
--- a/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp
+++ b/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp
@@ -36,7 +36,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/clear_jumbo_flag_gen.h"
@@ -70,10 +70,10 @@ public:
// Acquire distlocks on the namespace's database and collection.
DistLockManager::ScopedDistLock dbDistLock(
- uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, nss.db(), "clearJumboFlag", DistLockManager::kDefaultLockTimeout)));
DistLockManager::ScopedDistLock collDistLock(
- uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, nss.ns(), "clearJumboFlag", DistLockManager::kDefaultLockTimeout)));
CollectionType collType;
diff --git a/src/mongo/db/s/config/configsvr_create_database_command.cpp b/src/mongo/db/s/config/configsvr_create_database_command.cpp
index ec6793676b6..17725f4451d 100644
--- a/src/mongo/db/s/config/configsvr_create_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_create_database_command.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
@@ -90,9 +91,8 @@ public:
auto scopedLock =
ShardingCatalogManager::get(opCtx)->serializeCreateOrDropDatabase(opCtx, dbname);
- auto dbDistLock =
- uassertStatusOK(Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
- opCtx, dbname, "createDatabase", DistLockManager::kDefaultLockTimeout));
+ auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
+ opCtx, dbname, "createDatabase", DistLockManager::kDefaultLockTimeout));
ShardingCatalogManager::get(opCtx)->createDatabase(opCtx, dbname, ShardId());
}
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index 005746b79f6..aa8b22651e6 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -36,8 +36,8 @@
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/operation_sharding_state.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_registry.h"
@@ -121,17 +121,15 @@ public:
setDropCollDistLockWait.execute(
[&](const BSONObj& data) { waitFor = Seconds(data["waitForSecs"].numberInt()); });
- auto const catalogClient = Grid::get(opCtx)->catalogClient();
-
auto scopedDbLock =
ShardingCatalogManager::get(opCtx)->serializeCreateOrDropDatabase(opCtx, nss.db());
auto scopedCollLock =
ShardingCatalogManager::get(opCtx)->serializeCreateOrDropCollection(opCtx, nss);
auto dbDistLock = uassertStatusOK(
- catalogClient->getDistLockManager()->lock(opCtx, nss.db(), "dropCollection", waitFor));
+ DistLockManager::get(opCtx)->lock(opCtx, nss.db(), "dropCollection", waitFor));
auto collDistLock = uassertStatusOK(
- catalogClient->getDistLockManager()->lock(opCtx, nss.ns(), "dropCollection", waitFor));
+ DistLockManager::get(opCtx)->lock(opCtx, nss.ns(), "dropCollection", waitFor));
ON_BLOCK_EXIT([opCtx, nss] {
Grid::get(opCtx)->catalogCache()->invalidateCollectionEntry_LINEARIZABLE(nss);
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index e2e69a6f042..7ef3658490c 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -37,15 +37,14 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/sharding_logging.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
-
namespace {
/**
@@ -121,10 +120,9 @@ public:
auto const catalogClient = Grid::get(opCtx)->catalogClient();
auto const catalogManager = ShardingCatalogManager::get(opCtx);
- auto scopedLock =
- ShardingCatalogManager::get(opCtx)->serializeCreateOrDropDatabase(opCtx, dbname);
+ auto scopedLock = catalogManager->serializeCreateOrDropDatabase(opCtx, dbname);
- auto dbDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, dbname, "dropDatabase", DistLockManager::kDefaultLockTimeout));
// Invalidate the database metadata so the next access kicks off a full reload.
@@ -153,7 +151,7 @@ public:
// Drop the database's collections.
for (const auto& nss : catalogClient->getAllShardedCollectionsForDb(
opCtx, dbname, repl::ReadConcernArgs::get(opCtx).getLevel())) {
- auto collDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ auto collDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, nss.ns(), "dropCollection", DistLockManager::kDefaultLockTimeout));
catalogManager->dropCollection(opCtx, nss);
}
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index aed8625b1b1..512448bf572 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -43,17 +43,13 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
-
-using std::set;
-using std::shared_ptr;
-using std::string;
-
namespace {
/**
@@ -137,9 +133,8 @@ public:
// Make sure to force update of any stale metadata
ON_BLOCK_EXIT([opCtx, dbname] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbname); });
- auto dbDistLock =
- uassertStatusOK(Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
- opCtx, dbname, "enableSharding", DistLockManager::kDefaultLockTimeout));
+ auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
+ opCtx, dbname, "enableSharding", DistLockManager::kDefaultLockTimeout));
ShardingCatalogManager::get(opCtx)->enableSharding(opCtx, dbname, shardId);
audit::logEnableSharding(Client::getCurrent(), dbname);
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index aabe58ef3a4..dc714ee9211 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -42,7 +42,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/repl_client_info.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/server_options.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/type_database.h"
@@ -145,7 +145,7 @@ public:
auto const catalogClient = Grid::get(opCtx)->catalogClient();
auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
- auto dbDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, dbname, "movePrimary", DistLockManager::kDefaultLockTimeout));
auto dbType =
diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
index d3fa6bc250e..05b82b1af50 100644
--- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
+++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
@@ -36,9 +36,9 @@
#include "mongo/db/commands.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/shard_key_util.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/refine_collection_shard_key_gen.h"
#include "mongo/s/stale_shard_version_helpers.h"
@@ -74,15 +74,15 @@ public:
// Acquire distlocks on the namespace's database and collection.
DistLockManager::ScopedDistLock dbDistLock(uassertStatusOK(
- catalogClient->getDistLockManager()->lock(opCtx,
- nss.db(),
- "refineCollectionShardKey",
- DistLockManager::kDefaultLockTimeout)));
+ DistLockManager::get(opCtx)->lock(opCtx,
+ nss.db(),
+ "refineCollectionShardKey",
+ DistLockManager::kDefaultLockTimeout)));
DistLockManager::ScopedDistLock collDistLock(uassertStatusOK(
- catalogClient->getDistLockManager()->lock(opCtx,
- nss.ns(),
- "refineCollectionShardKey",
- DistLockManager::kDefaultLockTimeout)));
+ DistLockManager::get(opCtx)->lock(opCtx,
+ nss.ns(),
+ "refineCollectionShardKey",
+ DistLockManager::kDefaultLockTimeout)));
// Validate the given namespace is (i) sharded, (ii) doesn't already have the proposed
// key, and (iii) has the same epoch as the router that received
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 7cfc6445161..816369f690a 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/shard_key_util.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/type_database.h"
@@ -248,10 +249,10 @@ public:
// Make the distlocks boost::optional so that they can be released by being reset below.
boost::optional<DistLockManager::ScopedDistLock> dbDistLock(
- uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, nss.db(), "shardCollection", DistLockManager::kDefaultLockTimeout)));
boost::optional<DistLockManager::ScopedDistLock> collDistLock(
- uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, nss.ns(), "shardCollection", DistLockManager::kDefaultLockTimeout)));
// Ensure sharding is allowed on the database.
diff --git a/src/mongo/db/s/config/initial_split_policy.h b/src/mongo/db/s/config/initial_split_policy.h
index 75f2380baff..6687c53c933 100644
--- a/src/mongo/db/s/config/initial_split_policy.h
+++ b/src/mongo/db/s/config/initial_split_policy.h
@@ -66,6 +66,31 @@ public:
size_t numShards,
bool collectionIsEmpty);
+ virtual ~InitialSplitPolicy() {}
+
+ /**
+ * Generates a list of initial chunks to be created during a shardCollection operation.
+ */
+ struct ShardCollectionConfig {
+ std::vector<ChunkType> chunks;
+ Timestamp creationTime;
+
+ const auto& collVersion() const {
+ return chunks.back().getVersion();
+ }
+ };
+ virtual ShardCollectionConfig createFirstChunks(OperationContext* opCtx,
+ const ShardKeyPattern& shardKeyPattern,
+ SplitPolicyParams params) = 0;
+
+ /**
+ * Returns whether the chunk generation strategy being used is optimized or not. Since there is
+ * only a single unoptimized policy, we return true by default here.
+ */
+ virtual bool isOptimized() {
+ return true;
+ }
+
/**
* Returns split points to use for creating chunks in cases where the shard key contains a
* hashed field. For new collections which use hashed shard keys, we can can pre-split the range
@@ -79,15 +104,6 @@ public:
BSONObj prefix,
int numInitialChunks);
- struct ShardCollectionConfig {
- std::vector<ChunkType> chunks;
- Timestamp creationTime;
-
- const auto& collVersion() const {
- return chunks.back().getVersion();
- }
- };
-
/**
* Produces the initial chunks that need to be written for an *empty* collection which is being
* sharded based on a set of 'splitPoints' and 'numContiguousChunksPerShard'.
@@ -111,24 +127,7 @@ public:
const Timestamp& validAfter,
const std::vector<BSONObj>& splitPoints,
const std::vector<ShardId>& allShardIds,
- const int numContiguousChunksPerShard = 1);
-
- /**
- * Generates a list of initial chunks to be created during a shardCollection operation.
- */
- virtual ShardCollectionConfig createFirstChunks(OperationContext* opCtx,
- const ShardKeyPattern& shardKeyPattern,
- SplitPolicyParams params) = 0;
-
- /**
- * Returns whether the chunk generation strategy being used is optimized or not. Since there is
- * only a single unoptimized policy, we return true by default here.
- */
- virtual bool isOptimized() {
- return true;
- }
-
- virtual ~InitialSplitPolicy() {}
+ int numContiguousChunksPerShard);
};
/**
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index b583c6b87c4..b653e8f59c4 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -278,8 +278,13 @@ private:
TEST_F(GenerateInitialHashedSplitChunksTest, NoSplitPoints) {
const std::vector<BSONObj> splitPoints;
const std::vector<ShardId> shardIds = makeShardIds(2);
- const auto shardCollectionConfig = InitialSplitPolicy::generateShardCollectionInitialChunks(
- {nss(), boost::none, shardIds[0]}, shardKeyPattern(), timeStamp(), splitPoints, shardIds);
+ const auto shardCollectionConfig =
+ InitialSplitPolicy::generateShardCollectionInitialChunks({nss(), boost::none, shardIds[0]},
+ shardKeyPattern(),
+ timeStamp(),
+ splitPoints,
+ shardIds,
+ 1);
// there should only be one chunk
const auto expectedChunks =
@@ -296,7 +301,8 @@ TEST_F(GenerateInitialHashedSplitChunksTest, SplitPointsMoreThanAvailableShards)
shardKeyPattern(),
timeStamp(),
hashedSplitPoints(),
- shardIds);
+ shardIds,
+ 1);
// // chunks should be distributed in a round-robin manner
const std::vector<ChunkType> expectedChunks = makeChunks(
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index d68bd3a4dad..e9f0a69269a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -47,14 +47,7 @@
namespace mongo {
-struct CollectionOptions;
-class OperationContext;
-class RemoteCommandTargeter;
-class ServiceContext;
-class UUID;
-
struct RemoveShardProgress {
-
/**
* Used to indicate to the caller of the removeShard method whether draining of chunks for
* a particular shard has started, is ongoing, or has been completed.
@@ -89,7 +82,6 @@ struct RemoveShardProgress {
class ShardingCatalogManager {
ShardingCatalogManager(const ShardingCatalogManager&) = delete;
ShardingCatalogManager& operator=(const ShardingCatalogManager&) = delete;
- friend class ConfigSvrShardCollectionCommand;
public:
ShardingCatalogManager(ServiceContext* serviceContext,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 83af868f7f7..89246451c01 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -218,40 +218,6 @@ void triggerFireAndForgetShardRefreshes(OperationContext* opCtx, const Namespace
} // namespace
-void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss) {
- BSONObjBuilder countBuilder;
- countBuilder.append("count", ChunkType::ConfigNS.coll());
- countBuilder.append("query", BSON(ChunkType::ns(nss.ns())));
-
- // OK to use limit=1, since if any chunks exist, we will fail.
- countBuilder.append("limit", 1);
-
- // Use readConcern local to guarantee we see any chunks that have been written and may
- // become committed; readConcern majority will not see the chunks if they have not made it
- // to the majority snapshot.
- repl::ReadConcernArgs readConcern(repl::ReadConcernLevel::kLocalReadConcern);
- readConcern.appendInfo(&countBuilder);
-
- auto cmdResponse = uassertStatusOK(
- Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- opCtx,
- kConfigReadSelector,
- ChunkType::ConfigNS.db().toString(),
- countBuilder.done(),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent));
- uassertStatusOK(cmdResponse.commandStatus);
-
- long long numChunks;
- uassertStatusOK(bsonExtractIntegerField(cmdResponse.response, "n", &numChunks));
- uassert(ErrorCodes::ManualInterventionRequired,
- str::stream() << "A previous attempt to shard collection " << nss.ns()
- << " failed after writing some initial chunks to config.chunks. Please "
- "manually delete the partially written chunks for collection "
- << nss.ns() << " from config.chunks",
- numChunks == 0);
-}
-
void sendDropCollectionToAllShards(OperationContext* opCtx, const NamespaceString& nss) {
const auto catalogClient = Grid::get(opCtx)->catalogClient();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
index 8e0cd0745f3..bc6d12ae396 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
@@ -38,9 +38,9 @@
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_catalog_replset.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/tracking_metadata.h"
-#include "mongo/s/catalog/dist_lock_catalog_impl.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/util/time_support.h"
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
index 393e8533e50..6a09a6f1358 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/tracking_metadata.h"
-#include "mongo/s/catalog/dist_lock_catalog_impl.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
diff --git a/src/mongo/s/catalog/dist_lock_catalog.cpp b/src/mongo/db/s/dist_lock_catalog.cpp
index f52a0cfe248..cb9ec13c1a9 100644
--- a/src/mongo/s/catalog/dist_lock_catalog.cpp
+++ b/src/mongo/db/s/dist_lock_catalog.cpp
@@ -29,7 +29,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/catalog/dist_lock_catalog.h"
+#include "mongo/db/s/dist_lock_catalog.h"
namespace mongo {
diff --git a/src/mongo/s/catalog/dist_lock_catalog.h b/src/mongo/db/s/dist_lock_catalog.h
index 7f774915b66..7f774915b66 100644
--- a/src/mongo/s/catalog/dist_lock_catalog.h
+++ b/src/mongo/db/s/dist_lock_catalog.h
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/db/s/dist_lock_catalog_mock.cpp
index 1a22526aa14..fba94f75502 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/db/s/dist_lock_catalog_mock.cpp
@@ -29,12 +29,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/catalog/dist_lock_catalog_mock.h"
+#include "mongo/db/s/dist_lock_catalog_mock.h"
-#include "mongo/base/status.h"
-#include "mongo/base/status_with.h"
-#include "mongo/s/catalog/type_lockpings.h"
-#include "mongo/s/catalog/type_locks.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/str.h"
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.h b/src/mongo/db/s/dist_lock_catalog_mock.h
index 1eab733dc85..04996f05633 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.h
+++ b/src/mongo/db/s/dist_lock_catalog_mock.h
@@ -31,9 +31,8 @@
#include <functional>
-#include "mongo/base/status_with.h"
+#include "mongo/db/s/dist_lock_catalog.h"
#include "mongo/platform/mutex.h"
-#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
@@ -220,4 +219,5 @@ private:
GetServerInfoFunc _getServerInfoChecker;
StatusWith<DistLockCatalog::ServerInfo> _getServerInfoReturnValue;
};
+
} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp b/src/mongo/db/s/dist_lock_catalog_replset.cpp
index 9dc62b9d360..a9997a6fd60 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
+++ b/src/mongo/db/s/dist_lock_catalog_replset.cpp
@@ -29,7 +29,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/catalog/dist_lock_catalog_impl.h"
+#include "mongo/db/s/dist_lock_catalog_replset.h"
#include <string>
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.h b/src/mongo/db/s/dist_lock_catalog_replset.h
index cdbb4bff12c..9421d290af3 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.h
+++ b/src/mongo/db/s/dist_lock_catalog_replset.h
@@ -36,8 +36,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/ops/find_and_modify_command_gen.h"
+#include "mongo/db/s/dist_lock_catalog.h"
#include "mongo/db/write_concern_options.h"
-#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/util/time_support.h"
namespace mongo {
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp b/src/mongo/db/s/dist_lock_catalog_replset_test.cpp
index a491d5710c9..c440d207085 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/db/s/dist_lock_catalog_replset_test.cpp
@@ -36,11 +36,11 @@
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/db/repl/read_concern_args.h"
+#include "mongo/db/s/dist_lock_catalog_replset.h"
+#include "mongo/db/s/dist_lock_manager_mock.h"
#include "mongo/db/s/shard_server_test_fixture.h"
#include "mongo/db/storage/duplicate_key_error_info.h"
#include "mongo/executor/network_test_env.h"
-#include "mongo/s/catalog/dist_lock_catalog_impl.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
@@ -67,20 +67,10 @@ const HostAndPort dummyHost("dummy", 123);
* NOTE: Even though the dist lock manager only runs on the config server, this test is using the
* ShardServerTestFixture and emulating the network due to legacy reasons.
*/
-class DistLockCatalogTest : public ShardServerTestFixture {
+class DistLockCatalogReplSetTest : public ShardServerTestFixture {
protected:
- std::unique_ptr<DistLockCatalog> makeDistLockCatalog() override {
- return std::make_unique<DistLockCatalogImpl>();
- }
-
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- return std::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
- }
-
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override {
- return std::make_unique<ShardingCatalogClientMock>(std::move(distLockManager));
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
+ return std::make_unique<ShardingCatalogClientMock>();
}
std::shared_ptr<RemoteCommandTargeterMock> configTargeter() {
@@ -95,6 +85,8 @@ protected:
func(opCtx.get());
});
}
+
+ DistLockCatalogImpl _distLockCatalog;
};
void checkReadConcern(const BSONObj& findCmd) {
@@ -103,10 +95,10 @@ void checkReadConcern(const BSONObj& findCmd) {
ASSERT(repl::ReadConcernLevel::kMajorityReadConcern == readConcernArgs.getLevel());
}
-TEST_F(DistLockCatalogTest, BasicPing) {
+TEST_F(DistLockCatalogReplSetTest, BasicPing) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t ping(dateFromISOString("2014-03-11T09:17:18.098Z").getValue());
- auto status = distLockCatalog()->ping(opCtx, "abcd", ping);
+ auto status = _distLockCatalog.ping(opCtx, "abcd", ping);
ASSERT_OK(status);
});
@@ -141,23 +133,23 @@ TEST_F(DistLockCatalogTest, BasicPing) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, PingTargetError) {
+TEST_F(DistLockCatalogReplSetTest, PingTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+ auto status = _distLockCatalog.ping(operationContext(), "abcd", Date_t::now());
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogTest, PingRunCmdError) {
+TEST_F(DistLockCatalogReplSetTest, PingRunCmdError) {
shutdownExecutorPool();
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+ auto status = _distLockCatalog.ping(operationContext(), "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogTest, PingCommandError) {
+TEST_F(DistLockCatalogReplSetTest, PingCommandError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
+ auto status = _distLockCatalog.ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -173,9 +165,9 @@ TEST_F(DistLockCatalogTest, PingCommandError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, PingWriteError) {
+TEST_F(DistLockCatalogReplSetTest, PingWriteError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
+ auto status = _distLockCatalog.ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -191,9 +183,9 @@ TEST_F(DistLockCatalogTest, PingWriteError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, PingWriteConcernError) {
+TEST_F(DistLockCatalogReplSetTest, PingWriteConcernError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
+ auto status = _distLockCatalog.ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -212,9 +204,9 @@ TEST_F(DistLockCatalogTest, PingWriteConcernError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, PingUnsupportedWriteConcernResponse) {
+TEST_F(DistLockCatalogReplSetTest, PingUnsupportedWriteConcernResponse) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
+ auto status = _distLockCatalog.ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -234,9 +226,9 @@ TEST_F(DistLockCatalogTest, PingUnsupportedWriteConcernResponse) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, PingUnsupportedResponseFormat) {
+TEST_F(DistLockCatalogReplSetTest, PingUnsupportedResponseFormat) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
+ auto status = _distLockCatalog.ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -248,12 +240,19 @@ TEST_F(DistLockCatalogTest, PingUnsupportedResponseFormat) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockNoOp) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockNoOp) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
- auto resultStatus = distLockCatalog()
- ->grabLock(opCtx, "test", myID, "me", "mongos", now, "because")
+ auto resultStatus = _distLockCatalog
+ .grabLock(opCtx,
+ "test",
+ myID,
+ "me",
+ "mongos",
+ now,
+ "because",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::LockStateChangeFailed, resultStatus.code());
@@ -290,12 +289,18 @@ TEST_F(DistLockCatalogTest, GrabLockNoOp) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockWithNewDoc) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockWithNewDoc) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
- auto resultStatus =
- distLockCatalog()->grabLock(opCtx, "test", myID, "me", "mongos", now, "because");
+ auto resultStatus = _distLockCatalog.grabLock(opCtx,
+ "test",
+ myID,
+ "me",
+ "mongos",
+ now,
+ "because",
+ DistLockCatalog::kMajorityWriteConcern);
ASSERT_OK(resultStatus.getStatus());
const auto& lockDoc = resultStatus.getValue();
@@ -354,11 +359,14 @@ TEST_F(DistLockCatalogTest, GrabLockWithNewDoc) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockWithBadLockDoc) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockWithBadLockDoc) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
auto resultStatus =
- distLockCatalog()->grabLock(opCtx, "test", OID(), "", "", now, "").getStatus();
+ _distLockCatalog
+ .grabLock(
+ opCtx, "test", OID(), "", "", now, "", DistLockCatalog::kMajorityWriteConcern)
+ .getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, resultStatus.code());
});
@@ -387,28 +395,50 @@ TEST_F(DistLockCatalogTest, GrabLockWithBadLockDoc) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockTargetError) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+
+ auto status = _distLockCatalog
+ .grabLock(operationContext(),
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogTest, GrabLockRunCmdError) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockRunCmdError) {
shutdownExecutorPool();
- auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ auto status = _distLockCatalog
+ .grabLock(operationContext(),
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogTest, GrabLockCommandError) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockCommandError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()
- ->grabLock(opCtx, "", OID::gen(), "", "", Date_t::now(), "")
+ auto status = _distLockCatalog
+ .grabLock(opCtx,
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -425,10 +455,17 @@ TEST_F(DistLockCatalogTest, GrabLockCommandError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockDupKeyError) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockDupKeyError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()
- ->grabLock(opCtx, "", OID::gen(), "", "", Date_t::now(), "")
+ auto status = _distLockCatalog
+ .grabLock(opCtx,
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::LockStateChangeFailed, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -442,10 +479,17 @@ TEST_F(DistLockCatalogTest, GrabLockDupKeyError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockWriteError) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockWriteError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()
- ->grabLock(opCtx, "", OID::gen(), "", "", Date_t::now(), "")
+ auto status = _distLockCatalog
+ .grabLock(opCtx,
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -462,10 +506,17 @@ TEST_F(DistLockCatalogTest, GrabLockWriteError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockWriteConcernError) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockWriteConcernError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ auto status = _distLockCatalog
+ .grabLock(operationContext(),
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -485,10 +536,17 @@ TEST_F(DistLockCatalogTest, GrabLockWriteConcernError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockWriteConcernErrorBadType) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockWriteConcernErrorBadType) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ auto status = _distLockCatalog
+ .grabLock(operationContext(),
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -506,10 +564,17 @@ TEST_F(DistLockCatalogTest, GrabLockWriteConcernErrorBadType) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockResponseMissingValueField) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockResponseMissingValueField) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ auto status = _distLockCatalog
+ .grabLock(operationContext(),
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -524,10 +589,17 @@ TEST_F(DistLockCatalogTest, GrabLockResponseMissingValueField) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockUnsupportedWriteConcernResponse) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockUnsupportedWriteConcernResponse) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ auto status = _distLockCatalog
+ .grabLock(operationContext(),
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -548,10 +620,17 @@ TEST_F(DistLockCatalogTest, GrabLockUnsupportedWriteConcernResponse) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GrabLockUnsupportedResponseFormat) {
+TEST_F(DistLockCatalogReplSetTest, GrabLockUnsupportedResponseFormat) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ auto status = _distLockCatalog
+ .grabLock(operationContext(),
+ "",
+ OID::gen(),
+ "",
+ "",
+ Date_t::now(),
+ "",
+ DistLockCatalog::kMajorityWriteConcern)
.getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -564,14 +643,14 @@ TEST_F(DistLockCatalogTest, GrabLockUnsupportedResponseFormat) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, OvertakeLockNoOp) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockNoOp) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
OID currentOwner("555f99712c99a78c5b083358");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
auto resultStatus =
- distLockCatalog()
- ->overtakeLock(
+ _distLockCatalog
+ .overtakeLock(
operationContext(), "test", myID, currentOwner, "me", "mongos", now, "because")
.getStatus();
@@ -613,12 +692,12 @@ TEST_F(DistLockCatalogTest, OvertakeLockNoOp) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, OvertakeLockWithNewDoc) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockWithNewDoc) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
OID currentOwner("555f99712c99a78c5b083358");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
- auto resultStatus = distLockCatalog()->overtakeLock(
+ auto resultStatus = _distLockCatalog.overtakeLock(
operationContext(), "test", myID, currentOwner, "me", "mongos", now, "because");
ASSERT_OK(resultStatus.getStatus());
@@ -682,12 +761,11 @@ TEST_F(DistLockCatalogTest, OvertakeLockWithNewDoc) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, OvertakeLockWithBadLockDoc) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockWithBadLockDoc) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
auto resultStatus =
- distLockCatalog()
- ->overtakeLock(operationContext(), "test", OID(), OID(), "", "", now, "")
+ _distLockCatalog.overtakeLock(operationContext(), "test", OID(), OID(), "", "", now, "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, resultStatus.code());
});
@@ -717,31 +795,29 @@ TEST_F(DistLockCatalogTest, OvertakeLockWithBadLockDoc) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, OvertakeLockTargetError) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status =
- distLockCatalog()
- ->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
- .getStatus();
+ auto status = _distLockCatalog
+ .overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
+ .getStatus();
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogTest, OvertakeLockRunCmdError) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockRunCmdError) {
shutdownExecutorPool();
- auto status =
- distLockCatalog()
- ->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
- .getStatus();
+ auto status = _distLockCatalog
+ .overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
+ .getStatus();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogTest, OvertakeLockCommandError) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockCommandError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
- distLockCatalog()
- ->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
+ _distLockCatalog
+ .overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -758,11 +834,11 @@ TEST_F(DistLockCatalogTest, OvertakeLockCommandError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, OvertakeLockWriteError) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockWriteError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
- distLockCatalog()
- ->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
+ _distLockCatalog
+ .overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -779,11 +855,11 @@ TEST_F(DistLockCatalogTest, OvertakeLockWriteError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, OvertakeLockWriteConcernError) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockWriteConcernError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
- distLockCatalog()
- ->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
+ _distLockCatalog
+ .overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -803,11 +879,11 @@ TEST_F(DistLockCatalogTest, OvertakeLockWriteConcernError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, OvertakeLockUnsupportedWriteConcernResponse) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockUnsupportedWriteConcernResponse) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
- distLockCatalog()
- ->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
+ _distLockCatalog
+ .overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -828,11 +904,11 @@ TEST_F(DistLockCatalogTest, OvertakeLockUnsupportedWriteConcernResponse) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, OvertakeLockUnsupportedResponseFormat) {
+TEST_F(DistLockCatalogReplSetTest, OvertakeLockUnsupportedResponseFormat) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
- distLockCatalog()
- ->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
+ _distLockCatalog
+ .overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -845,10 +921,9 @@ TEST_F(DistLockCatalogTest, OvertakeLockUnsupportedResponseFormat) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, BasicUnlock) {
+TEST_F(DistLockCatalogReplSetTest, BasicUnlock) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status =
- distLockCatalog()->unlock(operationContext(), OID("555f99712c99a78c5b083358"));
+ auto status = _distLockCatalog.unlock(operationContext(), OID("555f99712c99a78c5b083358"));
ASSERT_OK(status);
});
@@ -879,9 +954,9 @@ TEST_F(DistLockCatalogTest, BasicUnlock) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, BasicUnlockWithName) {
+TEST_F(DistLockCatalogReplSetTest, BasicUnlockWithName) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlock(
+ auto status = _distLockCatalog.unlock(
operationContext(), OID("555f99712c99a78c5b083358"), "TestDB.TestColl");
ASSERT_OK(status);
});
@@ -913,10 +988,9 @@ TEST_F(DistLockCatalogTest, BasicUnlockWithName) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, UnlockWithNoNewDoc) {
+TEST_F(DistLockCatalogReplSetTest, UnlockWithNoNewDoc) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status =
- distLockCatalog()->unlock(operationContext(), OID("555f99712c99a78c5b083358"));
+ auto status = _distLockCatalog.unlock(operationContext(), OID("555f99712c99a78c5b083358"));
ASSERT_OK(status);
});
@@ -943,9 +1017,9 @@ TEST_F(DistLockCatalogTest, UnlockWithNoNewDoc) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, UnlockWithNameWithNoNewDoc) {
+TEST_F(DistLockCatalogReplSetTest, UnlockWithNameWithNoNewDoc) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlock(
+ auto status = _distLockCatalog.unlock(
operationContext(), OID("555f99712c99a78c5b083358"), "TestDB.TestColl");
ASSERT_OK(status);
});
@@ -973,23 +1047,23 @@ TEST_F(DistLockCatalogTest, UnlockWithNameWithNoNewDoc) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, UnlockTargetError) {
+TEST_F(DistLockCatalogReplSetTest, UnlockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status = distLockCatalog()->unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID());
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogTest, UnlockRunCmdError) {
+TEST_F(DistLockCatalogReplSetTest, UnlockRunCmdError) {
shutdownExecutorPool();
- auto status = distLockCatalog()->unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogTest, UnlockCommandError) {
+TEST_F(DistLockCatalogReplSetTest, UnlockCommandError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1005,9 +1079,9 @@ TEST_F(DistLockCatalogTest, UnlockCommandError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, UnlockWriteError) {
+TEST_F(DistLockCatalogReplSetTest, UnlockWriteError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1023,9 +1097,9 @@ TEST_F(DistLockCatalogTest, UnlockWriteError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, UnlockWriteConcernError) {
+TEST_F(DistLockCatalogReplSetTest, UnlockWriteConcernError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1048,9 +1122,9 @@ TEST_F(DistLockCatalogTest, UnlockWriteConcernError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, UnlockUnsupportedWriteConcernResponse) {
+TEST_F(DistLockCatalogReplSetTest, UnlockUnsupportedWriteConcernResponse) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1070,9 +1144,9 @@ TEST_F(DistLockCatalogTest, UnlockUnsupportedWriteConcernResponse) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, UnlockUnsupportedResponseFormat) {
+TEST_F(DistLockCatalogReplSetTest, UnlockUnsupportedResponseFormat) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlock(operationContext(), OID());
+ auto status = _distLockCatalog.unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -1084,9 +1158,9 @@ TEST_F(DistLockCatalogTest, UnlockUnsupportedResponseFormat) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, BasicUnlockAll) {
+TEST_F(DistLockCatalogReplSetTest, BasicUnlockAll) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlockAll(operationContext(), "processID");
+ auto status = _distLockCatalog.unlockAll(operationContext(), "processID");
ASSERT_OK(status);
});
@@ -1118,9 +1192,9 @@ TEST_F(DistLockCatalogTest, BasicUnlockAll) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, UnlockAllWriteFailed) {
+TEST_F(DistLockCatalogReplSetTest, UnlockAllWriteFailed) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlockAll(operationContext(), "processID");
+ auto status = _distLockCatalog.unlockAll(operationContext(), "processID");
ASSERT_EQUALS(ErrorCodes::IllegalOperation, status);
});
@@ -1132,9 +1206,9 @@ TEST_F(DistLockCatalogTest, UnlockAllWriteFailed) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, UnlockAllNetworkError) {
+TEST_F(DistLockCatalogReplSetTest, UnlockAllNetworkError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->unlockAll(operationContext(), "processID");
+ auto status = _distLockCatalog.unlockAll(operationContext(), "processID");
ASSERT_EQUALS(ErrorCodes::NetworkTimeout, status);
});
@@ -1147,11 +1221,11 @@ TEST_F(DistLockCatalogTest, UnlockAllNetworkError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, BasicGetServerInfo) {
+TEST_F(DistLockCatalogReplSetTest, BasicGetServerInfo) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t localTime(dateFromISOString("2015-05-26T13:06:27.293Z").getValue());
OID electionID("555fa85d4d8640862a0fc79b");
- auto resultStatus = distLockCatalog()->getServerInfo(operationContext());
+ auto resultStatus = _distLockCatalog.getServerInfo(operationContext());
ASSERT_OK(resultStatus.getStatus());
const auto& serverInfo = resultStatus.getValue();
@@ -1176,23 +1250,23 @@ TEST_F(DistLockCatalogTest, BasicGetServerInfo) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetServerTargetError) {
+TEST_F(DistLockCatalogReplSetTest, GetServerTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
+ auto status = _distLockCatalog.getServerInfo(operationContext()).getStatus();
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogTest, GetServerRunCmdError) {
+TEST_F(DistLockCatalogReplSetTest, GetServerRunCmdError) {
shutdownExecutorPool();
- auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
+ auto status = _distLockCatalog.getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogTest, GetServerCommandError) {
+TEST_F(DistLockCatalogReplSetTest, GetServerCommandError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
+ auto status = _distLockCatalog.getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1208,9 +1282,9 @@ TEST_F(DistLockCatalogTest, GetServerCommandError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetServerBadElectionId) {
+TEST_F(DistLockCatalogReplSetTest, GetServerBadElectionId) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
+ auto status = _distLockCatalog.getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1229,9 +1303,9 @@ TEST_F(DistLockCatalogTest, GetServerBadElectionId) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetServerBadLocalTime) {
+TEST_F(DistLockCatalogReplSetTest, GetServerBadLocalTime) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
+ auto status = _distLockCatalog.getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1250,9 +1324,9 @@ TEST_F(DistLockCatalogTest, GetServerBadLocalTime) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetServerNoGLEStats) {
+TEST_F(DistLockCatalogReplSetTest, GetServerNoGLEStats) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
+ auto status = _distLockCatalog.getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1267,9 +1341,9 @@ TEST_F(DistLockCatalogTest, GetServerNoGLEStats) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetServerNoElectionId) {
+TEST_F(DistLockCatalogReplSetTest, GetServerNoElectionId) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
+ auto status = _distLockCatalog.getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1288,9 +1362,9 @@ TEST_F(DistLockCatalogTest, GetServerNoElectionId) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetServerInvalidReplSubsectionShouldFail) {
+TEST_F(DistLockCatalogReplSetTest, GetServerInvalidReplSubsectionShouldFail) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
+ auto status = _distLockCatalog.getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1308,9 +1382,9 @@ TEST_F(DistLockCatalogTest, GetServerInvalidReplSubsectionShouldFail) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetServerNoElectionIdButMasterShouldFail) {
+TEST_F(DistLockCatalogReplSetTest, GetServerNoElectionIdButMasterShouldFail) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
+ auto status = _distLockCatalog.getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_NOT_EQUALS(std::string::npos, status.reason().find("me:1234"));
});
@@ -1329,9 +1403,9 @@ TEST_F(DistLockCatalogTest, GetServerNoElectionIdButMasterShouldFail) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, BasicStopPing) {
+TEST_F(DistLockCatalogReplSetTest, BasicStopPing) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->stopPing(operationContext(), "test");
+ auto status = _distLockCatalog.stopPing(operationContext(), "test");
ASSERT_OK(status);
});
@@ -1361,23 +1435,23 @@ TEST_F(DistLockCatalogTest, BasicStopPing) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, StopPingTargetError) {
+TEST_F(DistLockCatalogReplSetTest, StopPingTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status = distLockCatalog()->stopPing(operationContext(), "");
+ auto status = _distLockCatalog.stopPing(operationContext(), "");
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogTest, StopPingRunCmdError) {
+TEST_F(DistLockCatalogReplSetTest, StopPingRunCmdError) {
shutdownExecutorPool();
- auto status = distLockCatalog()->stopPing(operationContext(), "");
+ auto status = _distLockCatalog.stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogTest, StopPingCommandError) {
+TEST_F(DistLockCatalogReplSetTest, StopPingCommandError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->stopPing(operationContext(), "");
+ auto status = _distLockCatalog.stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1393,9 +1467,9 @@ TEST_F(DistLockCatalogTest, StopPingCommandError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, StopPingWriteError) {
+TEST_F(DistLockCatalogReplSetTest, StopPingWriteError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->stopPing(operationContext(), "");
+ auto status = _distLockCatalog.stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1411,9 +1485,9 @@ TEST_F(DistLockCatalogTest, StopPingWriteError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, StopPingWriteConcernError) {
+TEST_F(DistLockCatalogReplSetTest, StopPingWriteConcernError) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->stopPing(operationContext(), "");
+ auto status = _distLockCatalog.stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1432,9 +1506,9 @@ TEST_F(DistLockCatalogTest, StopPingWriteConcernError) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, StopPingUnsupportedWriteConcernResponse) {
+TEST_F(DistLockCatalogReplSetTest, StopPingUnsupportedWriteConcernResponse) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->stopPing(operationContext(), "");
+ auto status = _distLockCatalog.stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1454,9 +1528,9 @@ TEST_F(DistLockCatalogTest, StopPingUnsupportedWriteConcernResponse) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, StopPingUnsupportedResponseFormat) {
+TEST_F(DistLockCatalogReplSetTest, StopPingUnsupportedResponseFormat) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->stopPing(operationContext(), "");
+ auto status = _distLockCatalog.stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -1468,10 +1542,10 @@ TEST_F(DistLockCatalogTest, StopPingUnsupportedResponseFormat) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, BasicGetPing) {
+TEST_F(DistLockCatalogReplSetTest, BasicGetPing) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t ping(dateFromISOString("2015-05-26T13:06:27.293Z").getValue());
- auto resultStatus = distLockCatalog()->getPing(operationContext(), "test");
+ auto resultStatus = _distLockCatalog.getPing(operationContext(), "test");
ASSERT_OK(resultStatus.getStatus());
const auto& pingDoc = resultStatus.getValue();
@@ -1505,23 +1579,23 @@ TEST_F(DistLockCatalogTest, BasicGetPing) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetPingTargetError) {
+TEST_F(DistLockCatalogReplSetTest, GetPingTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status = distLockCatalog()->getPing(operationContext(), "").getStatus();
+ auto status = _distLockCatalog.getPing(operationContext(), "").getStatus();
ASSERT_EQUALS(ErrorCodes::InternalError, status.code());
}
-TEST_F(DistLockCatalogTest, GetPingRunCmdError) {
+TEST_F(DistLockCatalogReplSetTest, GetPingRunCmdError) {
shutdownExecutorPool();
- auto status = distLockCatalog()->getPing(operationContext(), "").getStatus();
+ auto status = _distLockCatalog.getPing(operationContext(), "").getStatus();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogTest, GetPingNotFound) {
+TEST_F(DistLockCatalogReplSetTest, GetPingNotFound) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getPing(operationContext(), "").getStatus();
+ auto status = _distLockCatalog.getPing(operationContext(), "").getStatus();
ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1533,9 +1607,9 @@ TEST_F(DistLockCatalogTest, GetPingNotFound) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetPingUnsupportedFormat) {
+TEST_F(DistLockCatalogReplSetTest, GetPingUnsupportedFormat) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getPing(operationContext(), "test").getStatus();
+ auto status = _distLockCatalog.getPing(operationContext(), "test").getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1556,10 +1630,10 @@ TEST_F(DistLockCatalogTest, GetPingUnsupportedFormat) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, BasicGetLockByTS) {
+TEST_F(DistLockCatalogReplSetTest, BasicGetLockByTS) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID ts("555f99712c99a78c5b083358");
- auto resultStatus = distLockCatalog()->getLockByTS(operationContext(), ts);
+ auto resultStatus = _distLockCatalog.getLockByTS(operationContext(), ts);
ASSERT_OK(resultStatus.getStatus());
const auto& lockDoc = resultStatus.getValue();
@@ -1591,22 +1665,22 @@ TEST_F(DistLockCatalogTest, BasicGetLockByTS) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetLockByTSTargetError) {
+TEST_F(DistLockCatalogReplSetTest, GetLockByTSTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
+ auto status = _distLockCatalog.getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::InternalError, status.code());
}
-TEST_F(DistLockCatalogTest, GetLockByTSRunCmdError) {
+TEST_F(DistLockCatalogReplSetTest, GetLockByTSRunCmdError) {
shutdownExecutorPool();
- auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
+ auto status = _distLockCatalog.getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogTest, GetLockByTSNotFound) {
+TEST_F(DistLockCatalogReplSetTest, GetLockByTSNotFound) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
+ auto status = _distLockCatalog.getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::LockNotFound, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1618,9 +1692,9 @@ TEST_F(DistLockCatalogTest, GetLockByTSNotFound) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetLockByTSUnsupportedFormat) {
+TEST_F(DistLockCatalogReplSetTest, GetLockByTSUnsupportedFormat) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
+ auto status = _distLockCatalog.getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1641,10 +1715,10 @@ TEST_F(DistLockCatalogTest, GetLockByTSUnsupportedFormat) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, BasicGetLockByName) {
+TEST_F(DistLockCatalogReplSetTest, BasicGetLockByName) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID ts("555f99712c99a78c5b083358");
- auto resultStatus = distLockCatalog()->getLockByName(operationContext(), "abc");
+ auto resultStatus = _distLockCatalog.getLockByName(operationContext(), "abc");
ASSERT_OK(resultStatus.getStatus());
const auto& lockDoc = resultStatus.getValue();
@@ -1678,23 +1752,23 @@ TEST_F(DistLockCatalogTest, BasicGetLockByName) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetLockByNameTargetError) {
+TEST_F(DistLockCatalogReplSetTest, GetLockByNameTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
- auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
+ auto status = _distLockCatalog.getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::InternalError, status.code());
}
-TEST_F(DistLockCatalogTest, GetLockByNameRunCmdError) {
+TEST_F(DistLockCatalogReplSetTest, GetLockByNameRunCmdError) {
shutdownExecutorPool();
- auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
+ auto status = _distLockCatalog.getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogTest, GetLockByNameNotFound) {
+TEST_F(DistLockCatalogReplSetTest, GetLockByNameNotFound) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
+ auto status = _distLockCatalog.getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::LockNotFound, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -1706,9 +1780,9 @@ TEST_F(DistLockCatalogTest, GetLockByNameNotFound) {
future.default_timed_get();
}
-TEST_F(DistLockCatalogTest, GetLockByNameUnsupportedFormat) {
+TEST_F(DistLockCatalogReplSetTest, GetLockByNameUnsupportedFormat) {
auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
- auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
+ auto status = _distLockCatalog.getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
diff --git a/src/mongo/s/catalog/dist_lock_manager.cpp b/src/mongo/db/s/dist_lock_manager.cpp
index f97ce124b7a..00893d912e4 100644
--- a/src/mongo/s/catalog/dist_lock_manager.cpp
+++ b/src/mongo/db/s/dist_lock_manager.cpp
@@ -31,11 +31,17 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
-#include <memory>
+#include "mongo/db/operation_context.h"
namespace mongo {
+namespace {
+
+const auto getDistLockManager =
+ ServiceContext::declareDecoration<std::unique_ptr<DistLockManager>>();
+
+} // namespace
const Seconds DistLockManager::kDefaultLockTimeout(20);
const Milliseconds DistLockManager::kSingleLockAttemptTimeout(0);
@@ -71,6 +77,21 @@ DistLockManager::ScopedDistLock& DistLockManager::ScopedDistLock::operator=(
return *this;
}
+Status DistLockManager::ScopedDistLock::checkStatus() {
+ invariant(_lockManager);
+ return _lockManager->checkStatus(_opCtx, _lockID);
+}
+
+DistLockManager* DistLockManager::get(OperationContext* opCtx) {
+ return getDistLockManager(opCtx->getServiceContext()).get();
+}
+
+void DistLockManager::create(ServiceContext* service,
+ std::unique_ptr<DistLockManager> distLockManager) {
+ invariant(!getDistLockManager(service));
+ getDistLockManager(service) = std::move(distLockManager);
+}
+
StatusWith<DistLockManager::ScopedDistLock> DistLockManager::lock(OperationContext* opCtx,
StringData name,
StringData whyMessage,
@@ -83,12 +104,4 @@ StatusWith<DistLockManager::ScopedDistLock> DistLockManager::lock(OperationConte
return DistLockManager::ScopedDistLock(opCtx, std::move(distLockHandleStatus.getValue()), this);
}
-Status DistLockManager::ScopedDistLock::checkStatus() {
- if (!_lockManager) {
- return Status(ErrorCodes::IllegalOperation, "no lock manager, lock was not acquired");
- }
-
- return _lockManager->checkStatus(_opCtx, _lockID);
-}
-
} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_manager.h b/src/mongo/db/s/dist_lock_manager.h
index 97a63a8f096..61da53109b1 100644
--- a/src/mongo/s/catalog/dist_lock_manager.h
+++ b/src/mongo/db/s/dist_lock_manager.h
@@ -31,15 +31,12 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/oid.h"
+#include "mongo/db/service_context.h"
#include "mongo/stdx/chrono.h"
namespace mongo {
using DistLockHandle = OID;
-class OperationContext;
-class Status;
-template <typename T>
-class StatusWith;
/**
* Interface for handling distributed locks.
@@ -99,6 +96,12 @@ public:
virtual ~DistLockManager() = default;
/**
+ * Retrieves the DistLockManager singleton for the node.
+ */
+ static DistLockManager* get(OperationContext* opCtx);
+ static void create(ServiceContext* service, std::unique_ptr<DistLockManager> distLockManager);
+
+ /**
* Performs bootstrapping for the manager. Implementation do not need to guarantee
* thread safety so callers should employ proper synchronization when calling this method.
*/
@@ -176,7 +179,7 @@ public:
*/
virtual void unlockAll(OperationContext* opCtx, const std::string& processID) = 0;
-protected:
+private:
/**
* Checks if the lockHandle still exists in the config server.
*/
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/db/s/dist_lock_manager_mock.cpp
index aa910150612..951bd4e9c97 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp
+++ b/src/mongo/db/s/dist_lock_manager_mock.cpp
@@ -31,7 +31,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
+#include "mongo/db/s/dist_lock_manager_mock.h"
#include <algorithm>
@@ -40,10 +40,9 @@
#include "mongo/util/time_support.h"
namespace mongo {
-
namespace {
-void NoLockFuncSet(StringData name, StringData whyMessage, Milliseconds waitFor) {
+void noLockFuncSet(StringData name, StringData whyMessage, Milliseconds waitFor) {
FAIL(str::stream() << "Lock not expected to be called. "
<< "Name: " << name << ", whyMessage: " << whyMessage
<< ", waitFor: " << waitFor);
@@ -51,8 +50,10 @@ void NoLockFuncSet(StringData name, StringData whyMessage, Milliseconds waitFor)
} // namespace
-DistLockManagerMock::DistLockManagerMock(std::unique_ptr<DistLockCatalog> catalog)
- : _catalog(std::move(catalog)), _lockReturnStatus{Status::OK()}, _lockChecker{NoLockFuncSet} {}
+DistLockManagerMock::DistLockManagerMock()
+ : _lockReturnStatus{Status::OK()}, _lockChecker{noLockFuncSet} {}
+
+DistLockManagerMock::~DistLockManagerMock() = default;
void DistLockManagerMock::startUp() {}
@@ -70,7 +71,7 @@ StatusWith<DistLockHandle> DistLockManagerMock::lockWithSessionID(OperationConte
const OID& lockSessionID,
Milliseconds waitFor) {
_lockChecker(name, whyMessage, waitFor);
- _lockChecker = NoLockFuncSet;
+ _lockChecker = noLockFuncSet;
if (!_lockReturnStatus.isOK()) {
return _lockReturnStatus;
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.h b/src/mongo/db/s/dist_lock_manager_mock.h
index bf8081b50c5..824a7a3b0ca 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.h
+++ b/src/mongo/db/s/dist_lock_manager_mock.h
@@ -30,19 +30,16 @@
#pragma once
#include <functional>
-#include <string>
#include <vector>
-#include "mongo/s/catalog/dist_lock_catalog.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
namespace mongo {
class DistLockManagerMock : public DistLockManager {
public:
- DistLockManagerMock(std::unique_ptr<DistLockCatalog> catalog);
-
- virtual ~DistLockManagerMock() = default;
+ DistLockManagerMock();
+ virtual ~DistLockManagerMock();
void startUp() override;
void shutDown(OperationContext* opCtx) override;
@@ -67,25 +64,19 @@ public:
void expectLock(LockFunc checkerFunc, Status lockStatus);
-protected:
void unlock(OperationContext* opCtx, const DistLockHandle& lockHandle) override;
void unlock(OperationContext* opCtx,
const DistLockHandle& lockHandle,
StringData name) override;
- Status checkStatus(OperationContext* opCtx, const DistLockHandle& lockHandle) override;
-
private:
struct LockInfo {
DistLockHandle lockID;
std::string name;
};
- /**
- * Unused, but needed so that test code mirrors the ownership semantics of production code.
- */
- const std::unique_ptr<DistLockCatalog> _catalog;
+ Status checkStatus(OperationContext* opCtx, const DistLockHandle& lockHandle) override;
std::vector<LockInfo> _locks;
Status _lockReturnStatus;
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/db/s/dist_lock_manager_replset.cpp
index d0017bcc630..a3c09d5fbdc 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/db/s/dist_lock_manager_replset.cpp
@@ -31,15 +31,9 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/catalog/replset_dist_lock_manager.h"
+#include "mongo/db/s/dist_lock_manager_replset.h"
-#include <memory>
-
-#include "mongo/base/status.h"
-#include "mongo/base/status_with.h"
-#include "mongo/db/service_context.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/client/shard_registry.h"
@@ -53,22 +47,17 @@
#include "mongo/util/timer.h"
namespace mongo {
-
-MONGO_FAIL_POINT_DEFINE(setDistLockTimeout);
-
-using std::string;
-using std::unique_ptr;
-
namespace {
-MONGO_FAIL_POINT_DEFINE(disableReplSetDistLockManager);
-
// How many times to retry acquiring the lock after the first attempt fails
const int kMaxNumLockAcquireRetries = 2;
// How frequently to poll the distributed lock when it is found to be locked
const Milliseconds kLockRetryInterval(500);
+MONGO_FAIL_POINT_DEFINE(setDistLockTimeout);
+MONGO_FAIL_POINT_DEFINE(disableReplSetDistLockManager);
+
} // namespace
const Seconds ReplSetDistLockManager::kDistLockPingInterval{30};
@@ -76,7 +65,7 @@ const Minutes ReplSetDistLockManager::kDistLockExpirationTime{15};
ReplSetDistLockManager::ReplSetDistLockManager(ServiceContext* globalContext,
StringData processID,
- unique_ptr<DistLockCatalog> catalog,
+ std::unique_ptr<DistLockCatalog> catalog,
Milliseconds pingInterval,
Milliseconds lockExpiration)
: _serviceContext(globalContext),
@@ -351,7 +340,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
// until the lockTryInterval has been reached. If a network error occurs at each lock
// acquisition attempt, the lock acquisition will be retried immediately.
while (waitFor <= Milliseconds::zero() || Milliseconds(timer.millis()) < waitFor) {
- const string who = str::stream() << _processID << ":" << getThreadName();
+ const std::string who = str::stream() << _processID << ":" << getThreadName();
auto lockExpiration = _lockExpiration;
setDistLockTimeout.execute([&](const BSONObj& data) {
@@ -517,7 +506,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
OperationContext* opCtx, StringData name, StringData whyMessage, const OID& lockSessionID) {
- const string who = str::stream() << _processID << ":" << getThreadName();
+ const std::string who = str::stream() << _processID << ":" << getThreadName();
LOGV2_DEBUG(22662,
1,
@@ -620,4 +609,14 @@ void ReplSetDistLockManager::queueUnlock(const DistLockHandle& lockSessionID,
_unlockList.push_back(std::make_pair(lockSessionID, name));
}
+ReplSetDistLockManager::DistLockPingInfo::DistLockPingInfo() = default;
+
+ReplSetDistLockManager::DistLockPingInfo::DistLockPingInfo(
+ StringData idArg, Date_t lastPingArg, Date_t remoteArg, OID tsArg, OID electionIdArg)
+ : processId(idArg.toString()),
+ lastPing(lastPingArg),
+ configLocalTime(remoteArg),
+ lockSessionId(std::move(tsArg)),
+ electionId(std::move(electionIdArg)) {}
+
} // namespace mongo
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.h b/src/mongo/db/s/dist_lock_manager_replset.h
index 1dfd878703b..3c0ce75c6c2 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.h
+++ b/src/mongo/db/s/dist_lock_manager_replset.h
@@ -30,24 +30,17 @@
#pragma once
#include <deque>
-#include <memory>
-#include <string>
-#include "mongo/base/string_data.h"
+#include "mongo/db/s/dist_lock_catalog.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/platform/mutex.h"
-#include "mongo/s/catalog/dist_lock_catalog.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
-#include "mongo/s/catalog/dist_lock_ping_info.h"
-#include "mongo/stdx/chrono.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
-class ServiceContext;
-
-class ReplSetDistLockManager final : public DistLockManager {
+class ReplSetDistLockManager : public DistLockManager {
public:
// How frequently should the dist lock pinger thread run and write liveness information about
// this instance of the dist lock manager
@@ -88,10 +81,9 @@ public:
void unlockAll(OperationContext* opCtx, const std::string& processID) override;
-protected:
+private:
Status checkStatus(OperationContext* opCtx, const DistLockHandle& lockSessionID) override;
-private:
/**
* Queue a lock to be unlocked asynchronously with retry until it doesn't error.
*/
@@ -115,6 +107,34 @@ private:
const LocksType lockDoc,
const Milliseconds& lockExpiration);
+ /**
+ * Data structure for storing information about distributed lock pings.
+ */
+ struct DistLockPingInfo {
+ DistLockPingInfo();
+ DistLockPingInfo(StringData processId,
+ Date_t lastPing,
+ Date_t configLocalTime,
+ OID lockSessionId,
+ OID electionId);
+
+ // the process processId of the last known owner of the lock.
+ std::string processId;
+
+ // the ping value from the last owner of the lock.
+ Date_t lastPing;
+
+ // the config server local time when this object was updated.
+ Date_t configLocalTime;
+
+ // last known owner of the lock.
+ OID lockSessionId;
+
+ // the election id of the config server when this object was updated.
+ // Note: unused by legacy dist lock.
+ OID electionId;
+ };
+
//
// All member variables are labeled with one of the following codes indicating the
// synchronization rules for accessing them.
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp b/src/mongo/db/s/dist_lock_manager_replset_test.cpp
index 64020eeb9f4..94e0a7be9a9 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/db/s/dist_lock_manager_replset_test.cpp
@@ -31,16 +31,15 @@
#include <boost/optional.hpp>
#include <map>
-#include <memory>
#include <string>
#include <vector>
#include "mongo/bson/json.h"
+#include "mongo/db/s/dist_lock_catalog_mock.h"
+#include "mongo/db/s/dist_lock_manager_replset.h"
#include "mongo/db/s/shard_server_test_fixture.h"
#include "mongo/platform/mutex.h"
#include "mongo/s/balancer_configuration.h"
-#include "mongo/s/catalog/dist_lock_catalog_mock.h"
-#include "mongo/s/catalog/replset_dist_lock_manager.h"
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
@@ -86,7 +85,7 @@ std::string vectorToString(const std::vector<OID>& list) {
* Basic fixture for ReplSetDistLockManager that starts it up before the test begins
* and shuts it down when a test finishes.
*/
-class ReplSetDistLockManagerFixture : public ShardServerTestFixture {
+class DistLockManagerReplSetTest : public ShardServerTestFixture {
protected:
void tearDown() override {
// Don't care about what shutDown passes to stopPing here.
@@ -95,32 +94,26 @@ protected:
ShardServerTestFixture::tearDown();
}
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- invariant(distLockCatalog);
+ std::unique_ptr<DistLockManager> makeDistLockManager() override {
+ auto distLockCatalogMock = std::make_unique<DistLockCatalogMock>();
+ _distLockCatalogMock = distLockCatalogMock.get();
return std::make_unique<ReplSetDistLockManager>(getServiceContext(),
_processID,
- std::move(distLockCatalog),
+ std::move(distLockCatalogMock),
kPingInterval,
kLockExpiration);
}
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override {
- return std::make_unique<ShardingCatalogClientMock>(std::move(distLockManager));
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
+ return std::make_unique<ShardingCatalogClientMock>();
}
std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration() override {
return std::make_unique<BalancerConfiguration>();
}
- /**
- * Returns the mocked catalog used by the lock manager being tested.
- */
- DistLockCatalogMock* getMockCatalog() {
- auto distLockCatalogMock = dynamic_cast<DistLockCatalogMock*>(distLockCatalog());
- invariant(distLockCatalogMock);
- return distLockCatalogMock;
+ DistLockCatalogMock* getMockCatalog() const {
+ return _distLockCatalogMock;
}
/**
@@ -131,7 +124,9 @@ protected:
}
private:
- std::string _processID = "test";
+ const std::string _processID = "test";
+
+ DistLockCatalogMock* _distLockCatalogMock{nullptr};
};
/**
@@ -140,7 +135,7 @@ private:
* 2. Unlock (on destructor of ScopedDistLock).
* 3. Check lock id used in lock and unlock are the same.
*/
-TEST_F(ReplSetDistLockManagerFixture, BasicLockLifeCycle) {
+TEST_F(DistLockManagerReplSetTest, BasicLockLifeCycle) {
std::string lockName("test");
Date_t now(Date_t::now());
std::string whyMsg("because");
@@ -178,8 +173,11 @@ TEST_F(ReplSetDistLockManagerFixture, BasicLockLifeCycle) {
OID unlockSessionIDPassed;
{
- auto lockStatus = distLock()->lock(
- operationContext(), lockName, whyMsg, DistLockManager::kSingleLockAttemptTimeout);
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(),
+ lockName,
+ whyMsg,
+ DistLockManager::kSingleLockAttemptTimeout);
ASSERT_OK(lockStatus.getStatus());
getMockCatalog()->expectNoGrabLock();
@@ -202,7 +200,7 @@ TEST_F(ReplSetDistLockManagerFixture, BasicLockLifeCycle) {
* 3. Wait for unlock to be called.
* 4. Check that lockSessionID used on all unlock is the same as the one used to grab lock.
*/
-TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
+TEST_F(DistLockManagerReplSetTest, MustUnlockOnLockError) {
std::string lockName("test");
std::string me("me");
OID lastTS;
@@ -241,8 +239,9 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
},
Status::OK());
- auto lockStatus =
- distLock()->lock(operationContext(), lockName, whyMsg, Milliseconds(10)).getStatus();
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), lockName, whyMsg, Milliseconds(10))
+ .getStatus();
ASSERT_NOT_OK(lockStatus);
ASSERT_EQUALS(ErrorCodes::ExceededMemoryLimit, lockStatus.code());
@@ -258,7 +257,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
// Join the background thread before trying to call asserts. Shutdown calls
// stopPing and we don't care in this test.
getMockCatalog()->expectStopPing([](StringData) {}, Status::OK());
- distLock()->shutDown(operationContext());
+ DistLockManager::get(operationContext())->shutDown(operationContext());
// No assert until shutDown has been called to make sure that the background thread
// won't be trying to access the local variables that were captured by lamdas that
@@ -276,7 +275,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
* 2. Wait until ping was called at least 3 times.
* 3. Check that correct process is being pinged.
*/
-TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
+TEST_F(DistLockManagerReplSetTest, LockPinging) {
auto testMutex = MONGO_MAKE_LATCH();
stdx::condition_variable ping3TimesCV;
std::vector<std::string> processIDList;
@@ -304,7 +303,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
// Join the background thread before trying to call asserts. Shutdown calls
// stopPing and we don't care in this test.
getMockCatalog()->expectStopPing([](StringData) {}, Status::OK());
- distLock()->shutDown(operationContext());
+ DistLockManager::get(operationContext())->shutDown(operationContext());
// No assert until shutDown has been called to make sure that the background thread
// won't be trying to access the local variables that were captured by lamdas that
@@ -326,7 +325,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
* 3. Unlock finally succeeds at the 4th time.
* 4. Check that lockSessionID used on all unlock is the same as the one used to grab lock.
*/
-TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
+TEST_F(DistLockManagerReplSetTest, UnlockUntilNoError) {
auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
const unsigned int kUnlockErrorCount = 3;
@@ -369,7 +368,10 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
StringData why) { lockSessionID = lockSessionIDArg; },
retLockDoc);
- { auto lockStatus = distLock()->lock(operationContext(), "test", "why", Milliseconds(0)); }
+ {
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "test", "why", Milliseconds(0));
+ }
bool didTimeout = false;
{
@@ -383,7 +385,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
// Join the background thread before trying to call asserts. Shutdown calls
// stopPing and we don't care in this test.
getMockCatalog()->expectStopPing([](StringData) {}, Status::OK());
- distLock()->shutDown(operationContext());
+ DistLockManager::get(operationContext())->shutDown(operationContext());
// No assert until shutDown has been called to make sure that the background thread
// won't be trying to access the local variables that were captured by lamdas that
@@ -406,7 +408,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
* than once. This implies that both ids have been retried at least 3 times.
* 5. Check that the lock session id used when lock was called matches with unlock.
*/
-TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
+TEST_F(DistLockManagerReplSetTest, MultipleQueuedUnlock) {
auto testMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
std::vector<OID> lockSessionIDPassed;
@@ -466,8 +468,10 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
retLockDoc);
{
- auto lockStatus = distLock()->lock(operationContext(), "test", "why", Milliseconds(0));
- auto otherStatus = distLock()->lock(operationContext(), "lock", "why", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "test", "why", Milliseconds(0));
+ auto otherStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "lock", "why", Milliseconds(0));
}
bool didTimeout = false;
@@ -483,7 +487,7 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
// Join the background thread before trying to call asserts. Shutdown calls
// stopPing and we don't care in this test.
getMockCatalog()->expectStopPing([](StringData) {}, Status::OK());
- distLock()->shutDown(operationContext());
+ DistLockManager::get(operationContext())->shutDown(operationContext());
// No assert until shutDown has been called to make sure that the background thread
// won't be trying to access the local variables that were captured by lamdas that
@@ -500,7 +504,7 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
}
}
-TEST_F(ReplSetDistLockManagerFixture, CleanupPingOnShutdown) {
+TEST_F(DistLockManagerReplSetTest, CleanupPingOnShutdown) {
bool stopPingCalled = false;
getMockCatalog()->expectStopPing(
[this, &stopPingCalled](StringData processID) {
@@ -509,11 +513,11 @@ TEST_F(ReplSetDistLockManagerFixture, CleanupPingOnShutdown) {
},
Status::OK());
- distLock()->shutDown(operationContext());
+ DistLockManager::get(operationContext())->shutDown(operationContext());
ASSERT_TRUE(stopPingCalled);
}
-TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusOK) {
+TEST_F(DistLockManagerReplSetTest, CheckLockStatusOK) {
LocksType retLockDoc;
retLockDoc.setName("test");
retLockDoc.setState(LocksType::LOCKED);
@@ -532,7 +536,8 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusOK) {
retLockDoc);
- auto lockStatus = distLock()->lock(operationContext(), "a", "", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "a", "", Milliseconds(0));
ASSERT_OK(lockStatus.getStatus());
getMockCatalog()->expectNoGrabLock();
@@ -551,7 +556,7 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusOK) {
ASSERT_OK(scopedLock.checkStatus());
}
-TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusNoLongerOwn) {
+TEST_F(DistLockManagerReplSetTest, CheckLockStatusNoLongerOwn) {
LocksType retLockDoc;
retLockDoc.setName("test");
retLockDoc.setState(LocksType::LOCKED);
@@ -570,7 +575,8 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusNoLongerOwn) {
retLockDoc);
- auto lockStatus = distLock()->lock(operationContext(), "a", "", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "a", "", Milliseconds(0));
ASSERT_OK(lockStatus.getStatus());
getMockCatalog()->expectNoGrabLock();
@@ -590,7 +596,7 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusNoLongerOwn) {
ASSERT_NOT_OK(scopedLock.checkStatus());
}
-TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusError) {
+TEST_F(DistLockManagerReplSetTest, CheckLockStatusError) {
LocksType retLockDoc;
retLockDoc.setName("test");
retLockDoc.setState(LocksType::LOCKED);
@@ -609,7 +615,8 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusError) {
retLockDoc);
- auto lockStatus = distLock()->lock(operationContext(), "a", "", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "a", "", Milliseconds(0));
ASSERT_OK(lockStatus.getStatus());
getMockCatalog()->expectNoGrabLock();
@@ -639,7 +646,7 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusError) {
* 5. 2nd attempt to grab lock still fails for the same reason.
* 6. But since the ping is not fresh anymore, dist lock manager should overtake lock.
*/
-TEST_F(ReplSetDistLockManagerFixture, LockOvertakingAfterLockExpiration) {
+TEST_F(DistLockManagerReplSetTest, LockOvertakingAfterLockExpiration) {
OID lastTS;
getMockCatalog()->expectGrabLock(
@@ -671,7 +678,9 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingAfterLockExpiration) {
// First attempt will record the ping data.
{
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -701,7 +710,8 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingAfterLockExpiration) {
// Second attempt should overtake lock.
{
- auto lockStatus = distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0));
ASSERT_OK(lockStatus.getStatus());
@@ -723,7 +733,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingAfterLockExpiration) {
* 1. Attempt to grab lock with lockSessionID fails because lock is already owned.
* 2. Then the the lock is overtaken because the lockSessionID matches the lock owner.
*/
-TEST_F(ReplSetDistLockManagerFixture, LockOvertakingWithSessionID) {
+TEST_F(DistLockManagerReplSetTest, LockOvertakingWithSessionID) {
OID passedLockSessionID("5572007fda9e476582bf3716");
LocksType currentLockDoc;
@@ -769,14 +779,16 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingWithSessionID) {
},
currentLockDoc);
- auto distLockHandleStatus = distLock()->lockWithSessionID(
- operationContext(), "bar", "foo", passedLockSessionID, Milliseconds(0));
+ auto distLockHandleStatus =
+ DistLockManager::get(operationContext())
+ ->lockWithSessionID(
+ operationContext(), "bar", "foo", passedLockSessionID, Milliseconds(0));
ASSERT_OK(distLockHandleStatus.getStatus());
getMockCatalog()->expectNoGrabLock();
}
-TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfExpirationHasNotElapsed) {
+TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfExpirationHasNotElapsed) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care.
@@ -805,7 +817,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfExpirationHasNotElapsed) {
// First attempt will record the ping data.
{
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -816,13 +830,15 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfExpirationHasNotElapsed) {
// Second attempt should still not overtake lock.
{
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
}
-TEST_F(ReplSetDistLockManagerFixture, GetPingErrorWhileOvertaking) {
+TEST_F(DistLockManagerReplSetTest, GetPingErrorWhileOvertaking) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -844,12 +860,14 @@ TEST_F(ReplSetDistLockManagerFixture, GetPingErrorWhileOvertaking) {
[](StringData process) { ASSERT_EQUALS("otherProcess", process); },
{ErrorCodes::NetworkTimeout, "bad test network"});
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::NetworkTimeout, status.code());
}
-TEST_F(ReplSetDistLockManagerFixture, GetInvalidPingDocumentWhileOvertaking) {
+TEST_F(DistLockManagerReplSetTest, GetInvalidPingDocumentWhileOvertaking) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -871,12 +889,14 @@ TEST_F(ReplSetDistLockManagerFixture, GetInvalidPingDocumentWhileOvertaking) {
getMockCatalog()->expectGetPing(
[](StringData process) { ASSERT_EQUALS("otherProcess", process); }, invalidPing);
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
}
-TEST_F(ReplSetDistLockManagerFixture, GetServerInfoErrorWhileOvertaking) {
+TEST_F(DistLockManagerReplSetTest, GetServerInfoErrorWhileOvertaking) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -904,12 +924,14 @@ TEST_F(ReplSetDistLockManagerFixture, GetServerInfoErrorWhileOvertaking) {
getMockCatalog()->expectGetServerInfo([]() {},
{ErrorCodes::NetworkTimeout, "bad test network"});
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::NetworkTimeout, status.code());
}
-TEST_F(ReplSetDistLockManagerFixture, GetLockErrorWhileOvertaking) {
+TEST_F(DistLockManagerReplSetTest, GetLockErrorWhileOvertaking) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -919,12 +941,14 @@ TEST_F(ReplSetDistLockManagerFixture, GetLockErrorWhileOvertaking) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
{ErrorCodes::NetworkTimeout, "bad test network"});
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::NetworkTimeout, status.code());
}
-TEST_F(ReplSetDistLockManagerFixture, GetLockDisappearedWhileOvertaking) {
+TEST_F(DistLockManagerReplSetTest, GetLockDisappearedWhileOvertaking) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -934,7 +958,9 @@ TEST_F(ReplSetDistLockManagerFixture, GetLockDisappearedWhileOvertaking) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
{ErrorCodes::LockNotFound, "disappeared!"});
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -946,7 +972,7 @@ TEST_F(ReplSetDistLockManagerFixture, GetLockDisappearedWhileOvertaking) {
* 3. All of the previous attempt should result in lock busy.
* 4. Try to grab lock again when the ping was not updated and lock expiration has elapsed.
*/
-TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfPingIsActive) {
+TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfPingIsActive) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -986,7 +1012,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfPingIsActive) {
[&getServerInfoCallCount]() { getServerInfoCallCount++; },
DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -1020,7 +1048,8 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfPingIsActive) {
OID unlockSessionIDPassed;
{
- auto lockStatus = distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0));
ASSERT_OK(lockStatus.getStatus());
@@ -1044,7 +1073,7 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfPingIsActive) {
* 3. All of the previous attempt should result in lock busy.
* 4. Try to grab lock again when the ping was not updated and lock expiration has elapsed.
*/
-TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfOwnerJustChanged) {
+TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfOwnerJustChanged) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -1083,7 +1112,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfOwnerJustChanged) {
[&getServerInfoCallCount]() { getServerInfoCallCount++; },
DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -1117,7 +1148,8 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfOwnerJustChanged) {
OID unlockSessionIDPassed;
{
- auto lockStatus = distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0));
ASSERT_OK(lockStatus.getStatus());
@@ -1141,7 +1173,7 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfOwnerJustChanged) {
* 3. All of the previous attempt should result in lock busy.
* 4. Try to grab lock again when the ping was not updated and lock expiration has elapsed.
*/
-TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) {
+TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfElectionIdChanged) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -1183,7 +1215,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) {
[&getServerInfoCallCount]() { getServerInfoCallCount++; },
DistLockCatalog::ServerInfo(configServerLocalTime, lastElectionId));
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -1217,7 +1251,8 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) {
OID unlockSessionIDPassed;
{
- auto lockStatus = distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0));
ASSERT_OK(lockStatus.getStatus());
@@ -1240,7 +1275,7 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) {
* 3. All of the previous attempt should result in lock busy.
* 4. Try to grab lock again when the ping was not updated and lock expiration has elapsed.
*/
-TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfNoMaster) {
+TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfNoMaster) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -1288,7 +1323,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfNoMaster) {
{ErrorCodes::NotWritablePrimary, "not master"});
}
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -1321,7 +1358,8 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfNoMaster) {
OID unlockSessionIDPassed;
{
- auto lockStatus = distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0));
ASSERT_OK(lockStatus.getStatus());
@@ -1350,7 +1388,7 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfNoMaster) {
* 7. Attempt to overtake resulted in an error.
* 8. Check that unlock was called.
*/
-TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
+TEST_F(DistLockManagerReplSetTest, LockOvertakingResultsInError) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -1379,7 +1417,9 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
// First attempt will record the ping data.
{
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -1418,7 +1458,8 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
Status::OK());
// Second attempt should overtake lock.
- auto lockStatus = distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0));
ASSERT_NOT_OK(lockStatus.getStatus());
@@ -1434,7 +1475,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
// Join the background thread before trying to call asserts. Shutdown calls
// stopPing and we don't care in this test.
getMockCatalog()->expectStopPing([](StringData) {}, Status::OK());
- distLock()->shutDown(operationContext());
+ DistLockManager::get(operationContext())->shutDown(operationContext());
// No assert until shutDown has been called to make sure that the background thread
// won't be trying to access the local variables that were captured by lamdas that
@@ -1456,7 +1497,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
* 6. But since the ping is not fresh anymore, dist lock manager should overtake lock.
* 7. Attempt to overtake resulted failed because someone beat us into it.
*/
-TEST_F(ReplSetDistLockManagerFixture, LockOvertakingFailed) {
+TEST_F(DistLockManagerReplSetTest, LockOvertakingFailed) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -1485,7 +1526,9 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingFailed) {
// First attempt will record the ping data.
{
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -1511,8 +1554,9 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingFailed) {
{ErrorCodes::LockStateChangeFailed, "nmod 0"});
{
- auto status =
- distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -1529,7 +1573,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingFailed) {
* 6. But since the ping is not fresh anymore, dist lock manager should overtake lock.
* 7. Attempt to overtake resulted failed because someone beat us into it.
*/
-TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfConfigServerClockGoesBackwards) {
+TEST_F(DistLockManagerReplSetTest, CannotOvertakeIfConfigServerClockGoesBackwards) {
getMockCatalog()->expectGrabLock(
[](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Don't care
@@ -1559,7 +1603,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfConfigServerClockGoesBackw
// First attempt will record the ping data.
{
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -1571,14 +1617,15 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfConfigServerClockGoesBackw
// Second attempt should not overtake lock.
{
- auto status =
- distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
}
-TEST_F(ReplSetDistLockManagerFixture, LockAcquisitionRetriesOnNetworkErrorSuccess) {
+TEST_F(DistLockManagerReplSetTest, LockAcquisitionRetriesOnNetworkErrorSuccess) {
getMockCatalog()->expectGrabLock(
[&](StringData, const OID&, StringData, StringData, Date_t, StringData) {
// Next acquisition should be successful
@@ -1598,24 +1645,26 @@ TEST_F(ReplSetDistLockManagerFixture, LockAcquisitionRetriesOnNetworkErrorSucces
getMockCatalog()->expectUnLock([&](const OID& lockSessionID) {}, Status::OK());
- auto status = distLock()
+ auto status = DistLockManager::get(operationContext())
->lock(operationContext(), "LockName", "Lock reason", Milliseconds(0))
.getStatus();
ASSERT_OK(status);
}
-TEST_F(ReplSetDistLockManagerFixture, LockAcquisitionRetriesOnInterruptionNeverSucceeds) {
+TEST_F(DistLockManagerReplSetTest, LockAcquisitionRetriesOnInterruptionNeverSucceeds) {
getMockCatalog()->expectGrabLock(
[&](StringData, const OID&, StringData, StringData, Date_t, StringData) {},
{ErrorCodes::Interrupted, "operation interrupted"});
getMockCatalog()->expectUnLock([&](const OID& lockSessionID) {}, Status::OK());
- auto status = distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
}
-class RSDistLockMgrWithMockTickSource : public ReplSetDistLockManagerFixture {
+class RSDistLockMgrWithMockTickSource : public DistLockManagerReplSetTest {
protected:
RSDistLockMgrWithMockTickSource() {
getServiceContext()->setTickSource(std::make_unique<TickSourceMock<>>());
@@ -1747,7 +1796,8 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
OID unlockSessionIDPassed;
{
- auto lockStatus = distLock()->lock(operationContext(), lockName, whyMsg, Milliseconds(10));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), lockName, whyMsg, Milliseconds(10));
ASSERT_OK(lockStatus.getStatus());
getMockCatalog()->expectNoGrabLock();
@@ -1849,7 +1899,8 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
Status::OK());
{
- auto lockStatus = distLock()->lock(operationContext(), lockName, whyMsg, Milliseconds(10));
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), lockName, whyMsg, Milliseconds(10));
ASSERT_NOT_OK(lockStatus.getStatus());
}
@@ -1865,7 +1916,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
// Join the background thread before trying to call asserts. Shutdown calls
// stopPing and we don't care in this test.
getMockCatalog()->expectStopPing([](StringData) {}, Status::OK());
- distLock()->shutDown(operationContext());
+ DistLockManager::get(operationContext())->shutDown(operationContext());
// No assert until shutDown has been called to make sure that the background thread
// won't be trying to access the local variables that were captured by lamdas that
@@ -1878,7 +1929,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
ASSERT_EQUALS(*lastTS, unlockSessionIDPassed);
}
-TEST_F(ReplSetDistLockManagerFixture, LockBusyNoRetry) {
+TEST_F(DistLockManagerReplSetTest, LockBusyNoRetry) {
getMockCatalog()->expectGrabLock(
[this](StringData, const OID&, StringData, StringData, Date_t, StringData) {
getMockCatalog()->expectNoGrabLock(); // Call only once.
@@ -1889,7 +1940,9 @@ TEST_F(ReplSetDistLockManagerFixture, LockBusyNoRetry) {
getMockCatalog()->expectGetLockByName([](StringData) {},
{ErrorCodes::LockNotFound, "not found!"});
- auto status = distLock()->lock(operationContext(), "", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -1939,8 +1992,9 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockRetryTimeout) {
getMockCatalog()->expectGetLockByName([](StringData) {},
{ErrorCodes::LockNotFound, "not found!"});
- auto lockStatus =
- distLock()->lock(operationContext(), lockName, whyMsg, Milliseconds(5)).getStatus();
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->lock(operationContext(), lockName, whyMsg, Milliseconds(5))
+ .getStatus();
ASSERT_NOT_OK(lockStatus);
ASSERT_EQUALS(ErrorCodes::LockBusy, lockStatus.code());
@@ -1983,7 +2037,9 @@ TEST_F(RSDistLockMgrWithMockTickSource, CanOvertakeIfNoPingDocument) {
// First attempt will record the ping data.
{
- auto status = distLock()->lock(operationContext(), "bar", "", Milliseconds(0)).getStatus();
+ auto status = DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "", Milliseconds(0))
+ .getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::LockBusy, status.code());
}
@@ -2028,10 +2084,14 @@ TEST_F(RSDistLockMgrWithMockTickSource, CanOvertakeIfNoPingDocument) {
Status::OK());
// Second attempt should overtake lock.
- { ASSERT_OK(distLock()->lock(operationContext(), "bar", "foo", Milliseconds(0)).getStatus()); }
+ {
+ ASSERT_OK(DistLockManager::get(operationContext())
+ ->lock(operationContext(), "bar", "foo", Milliseconds(0))
+ .getStatus());
+ }
}
-TEST_F(ReplSetDistLockManagerFixture, TryLockWithLocalWriteConcernBusy) {
+TEST_F(DistLockManagerReplSetTest, TryLockWithLocalWriteConcernBusy) {
std::string lockName("test");
Date_t now(Date_t::now());
std::string whyMsg("because");
@@ -2065,8 +2125,9 @@ TEST_F(ReplSetDistLockManagerFixture, TryLockWithLocalWriteConcernBusy) {
},
{ErrorCodes::LockStateChangeFailed, "Unable to take lock"});
- auto lockStatus = distLock()->tryLockWithLocalWriteConcern(
- operationContext(), lockName, whyMsg, lockSessionIDPassed);
+ auto lockStatus = DistLockManager::get(operationContext())
+ ->tryLockWithLocalWriteConcern(
+ operationContext(), lockName, whyMsg, lockSessionIDPassed);
ASSERT_EQ(ErrorCodes::LockBusy, lockStatus.getStatus());
}
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 0eb7a19ac65..5bcdcaff9b9 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/field_parser.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/collection_sharding_runtime.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/vector_clock.h"
@@ -80,7 +81,7 @@ void mergeChunks(OperationContext* opCtx,
const std::string whyMessage = str::stream() << "merging chunks in " << nss.ns() << " from "
<< redact(minKey) << " to " << redact(maxKey);
auto scopedDistLock = uassertStatusOKWithContext(
- Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
+ DistLockManager::get(opCtx)->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout),
str::stream() << "could not acquire collection lock for " << nss.ns()
<< " to merge chunks in [" << redact(minKey) << ", " << redact(maxKey)
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index 80380de60f8..25d1f2b598a 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -213,11 +213,10 @@ protected:
TxnNumber _txnNumber{0};
private:
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override {
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
class StaticCatalogClient final : public ShardingCatalogClientMock {
public:
- StaticCatalogClient() : ShardingCatalogClientMock(nullptr) {}
+ StaticCatalogClient() = default;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp
index 35838eb6953..fbe7cf9a40d 100644
--- a/src/mongo/db/s/migration_util_test.cpp
+++ b/src/mongo/db/s/migration_util_test.cpp
@@ -372,8 +372,7 @@ public:
// and loading all collections when a database is loaded for the first time by the CatalogCache.
class StaticCatalogClient final : public ShardingCatalogClientMock {
public:
- StaticCatalogClient(std::vector<ShardType> shards)
- : ShardingCatalogClientMock(nullptr), _shards(std::move(shards)) {}
+ StaticCatalogClient(std::vector<ShardType> shards) : _shards(std::move(shards)) {}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
@@ -404,8 +403,7 @@ public:
return autoColl.getCollection()->uuid();
}
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override {
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
auto mockCatalogClient = std::make_unique<StaticCatalogClient>(kShardList);
// Stash a pointer to the mock so its return values can be set.
_mockCatalogClient = mockCatalogClient.get();
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
index 669da0f7ee0..5dbdba020d1 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
@@ -37,12 +37,10 @@
#include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h"
#include "mongo/db/s/resharding/resharding_server_parameters_gen.h"
#include "mongo/db/s/resharding_util.h"
-#include "mongo/db/s/sharding_mongod_test_fixture.h"
-#include "mongo/db/service_context_d_test_fixture.h"
+#include "mongo/db/s/shard_server_test_fixture.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/unittest/unittest.h"
-
#include "mongo/logv2/log.h"
+#include "mongo/unittest/unittest.h"
namespace mongo {
namespace {
@@ -93,7 +91,7 @@ private:
const int _originalValue;
};
-class ReshardingDonorOplogIterTest : public ShardingMongodTestFixture {
+class ReshardingDonorOplogIterTest : public ShardServerTestFixture {
public:
repl::MutableOplogEntry makeInsertOplog(Timestamp ts, BSONObj doc) {
ReshardingDonorOplogId oplogId(ts, ts);
diff --git a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
index b16c9c44dc6..8fb8f0c69ad 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
@@ -58,6 +58,7 @@
namespace mongo {
namespace {
+
auto reshardingTempNss(const UUID& existingUUID) {
return NamespaceString(fmt::format("db.system.resharding.{}", existingUUID.toString()));
}
@@ -67,9 +68,7 @@ protected:
class ThreeRecipientsCatalogClient final : public ShardingCatalogClientMock {
public:
ThreeRecipientsCatalogClient(UUID existingUUID, std::vector<ShardId> recipients)
- : ShardingCatalogClientMock(nullptr),
- _existingUUID(std::move(existingUUID)),
- _recipients(std::move(recipients)) {}
+ : _existingUUID(std::move(existingUUID)), _recipients(std::move(recipients)) {}
// Makes one chunk object per shard; the actual key ranges not relevant for the test.
// The output is deterministic since this function is also used to provide data to the
@@ -134,12 +133,9 @@ protected:
NamespaceString(kReshardNs), reshardingTempNss(kExistingUUID), kRecipientShards);
}
-
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
- auto mockClient = std::make_unique<ThreeRecipientsCatalogClient>(
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
+ return std::make_unique<ThreeRecipientsCatalogClient>(
uassertStatusOK(UUID::parse(kExistingUUID.toString())), kRecipientShards);
- return mockClient;
}
std::shared_ptr<ReshardingDonorService::DonorStateMachine> getStateMachineInstace(
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index 068519269b2..eca4bdd56b6 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -47,7 +47,6 @@
#include "mongo/db/s/resharding_util.h"
#include "mongo/db/s/sharding_mongod_test_fixture.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/session_catalog_mongod.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
index 79b029a9a92..2b9aed54b82 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
@@ -128,13 +128,11 @@ public:
* ShardRegistry reload is done over DBClient, not the NetworkInterface, and there is no
* DBClientMock analogous to the NetworkInterfaceMock.
*/
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() {
class StaticCatalogClient final : public ShardingCatalogClientMock {
public:
- StaticCatalogClient(std::vector<ShardId> shardIds)
- : ShardingCatalogClientMock(nullptr), _shardIds(std::move(shardIds)) {}
+ StaticCatalogClient(std::vector<ShardId> shardIds) : _shardIds(std::move(shardIds)) {}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
index c493e4ce51e..db10b0a6bf6 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
@@ -134,13 +134,11 @@ class ReshardingTxnClonerTest : public ShardServerTestFixture {
* ShardRegistry reload is done over DBClient, not the NetworkInterface, and there is no
* DBClientMock analogous to the NetworkInterfaceMock.
*/
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() {
class StaticCatalogClient final : public ShardingCatalogClientMock {
public:
- StaticCatalogClient(std::vector<ShardId> shardIds)
- : ShardingCatalogClientMock(nullptr), _shardIds(std::move(shardIds)) {}
+ StaticCatalogClient(std::vector<ShardId> shardIds) : _shardIds(std::move(shardIds)) {}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
diff --git a/src/mongo/db/s/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding_destined_recipient_test.cpp
index 78108531fbb..d9d51ae12a2 100644
--- a/src/mongo/db/s/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding_destined_recipient_test.cpp
@@ -125,8 +125,7 @@ public:
class StaticCatalogClient final : public ShardingCatalogClientMock {
public:
- StaticCatalogClient(std::vector<ShardType> shards)
- : ShardingCatalogClientMock(nullptr), _shards(std::move(shards)) {}
+ StaticCatalogClient(std::vector<ShardType> shards) : _shards(std::move(shards)) {}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
@@ -149,8 +148,7 @@ public:
std::vector<CollectionType> _colls;
};
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override {
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
return std::make_unique<StaticCatalogClient>(kShardList);
}
diff --git a/src/mongo/db/s/resharding_util.cpp b/src/mongo/db/s/resharding_util.cpp
index 467fe1b5654..9cdfd2ea121 100644
--- a/src/mongo/db/s/resharding_util.cpp
+++ b/src/mongo/db/s/resharding_util.cpp
@@ -49,7 +49,6 @@
#include "mongo/db/pipeline/document_source_sort.h"
#include "mongo/db/pipeline/document_source_unwind.h"
#include "mongo/db/s/collection_sharding_state.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/storage/write_unit_of_work.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
diff --git a/src/mongo/db/s/session_catalog_migration_destination_test.cpp b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
index 28cac823d42..aba60895ed0 100644
--- a/src/mongo/db/s/session_catalog_migration_destination_test.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
@@ -278,11 +278,10 @@ public:
}
private:
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override {
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
class StaticCatalogClient final : public ShardingCatalogClientMock {
public:
- StaticCatalogClient() : ShardingCatalogClientMock(nullptr) {}
+ StaticCatalogClient() = default;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
diff --git a/src/mongo/db/s/shard_collection_legacy.cpp b/src/mongo/db/s/shard_collection_legacy.cpp
index 494e8a01771..7952367766a 100644
--- a/src/mongo/db/s/shard_collection_legacy.cpp
+++ b/src/mongo/db/s/shard_collection_legacy.cpp
@@ -47,7 +47,7 @@
#include "mongo/db/s/active_shard_collection_registry.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/config/initial_split_policy.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/shard_collection_legacy.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/s/shard_key_util.h"
@@ -589,10 +589,9 @@ CreateCollectionResponse shardCollection(OperationContext* opCtx,
boost::optional<DistLockManager::ScopedDistLock> dbDistLock;
boost::optional<DistLockManager::ScopedDistLock> collDistLock;
if (!mustTakeDistLock) {
- auto const catalogClient = Grid::get(opCtx)->catalogClient();
- dbDistLock.emplace(uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ dbDistLock.emplace(uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, nss.db(), "shardCollection", DistLockManager::kDefaultLockTimeout)));
- collDistLock.emplace(uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ collDistLock.emplace(uassertStatusOK(DistLockManager::get(opCtx)->lock(
opCtx, nss.ns(), "shardCollection", DistLockManager::kDefaultLockTimeout)));
}
diff --git a/src/mongo/db/s/shard_server_test_fixture.cpp b/src/mongo/db/s/shard_server_test_fixture.cpp
index 9677193045a..2c65e268b1b 100644
--- a/src/mongo/db/s/shard_server_test_fixture.cpp
+++ b/src/mongo/db/s/shard_server_test_fixture.cpp
@@ -36,8 +36,6 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/s/shard_server_catalog_cache_loader.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/s/catalog/dist_lock_catalog_mock.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/config_server_catalog_cache_loader.h"
@@ -91,20 +89,8 @@ void ShardServerTestFixture::tearDown() {
ShardingMongodTestFixture::tearDown();
}
-std::unique_ptr<DistLockCatalog> ShardServerTestFixture::makeDistLockCatalog() {
- return std::make_unique<DistLockCatalogMock>();
-}
-
-std::unique_ptr<DistLockManager> ShardServerTestFixture::makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) {
- invariant(distLockCatalog);
- return std::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
-}
-
-std::unique_ptr<ShardingCatalogClient> ShardServerTestFixture::makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
- invariant(distLockManager);
- return std::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
+std::unique_ptr<ShardingCatalogClient> ShardServerTestFixture::makeShardingCatalogClient() {
+ return std::make_unique<ShardingCatalogClientImpl>();
}
} // namespace mongo
diff --git a/src/mongo/db/s/shard_server_test_fixture.h b/src/mongo/db/s/shard_server_test_fixture.h
index 27da7d834f2..57adbc030ba 100644
--- a/src/mongo/db/s/shard_server_test_fixture.h
+++ b/src/mongo/db/s/shard_server_test_fixture.h
@@ -57,22 +57,7 @@ protected:
*/
std::shared_ptr<RemoteCommandTargeterMock> configTargeterMock();
- /**
- * Creates a DistLockCatalogMock.
- */
- std::unique_ptr<DistLockCatalog> makeDistLockCatalog() override;
-
- /**
- * Creates a DistLockManagerMock.
- */
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override;
-
- /**
- * Creates a real ShardingCatalogClient.
- */
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override;
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override;
void setCatalogCacheLoader(std::unique_ptr<CatalogCacheLoader> loader);
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index 64f628e315f..a8d9cf60ed9 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -35,7 +35,6 @@
#include "mongo/client/connection_string.h"
#include "mongo/client/global_conn_pool.h"
-#include "mongo/client/remote_command_targeter.h"
#include "mongo/client/remote_command_targeter_factory_impl.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/catalog_raii.h"
@@ -47,6 +46,8 @@
#include "mongo/db/ops/update.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/chunk_splitter.h"
+#include "mongo/db/s/dist_lock_catalog_replset.h"
+#include "mongo/db/s/dist_lock_manager_replset.h"
#include "mongo/db/s/periodic_balancer_config_refresher.h"
#include "mongo/db/s/read_only_catalog_cache_loader.h"
#include "mongo/db/s/shard_local.h"
@@ -258,42 +259,9 @@ private:
} // namespace
-void ShardingInitializationMongoD::initializeShardingEnvironmentOnShardServer(
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
-
- _replicaSetChangeListener =
- ReplicaSetMonitor::getNotifier().makeListener<ShardingReplicaSetChangeListener>(
- opCtx->getServiceContext());
-
- initializeGlobalShardingStateForMongoD(
- opCtx, shardIdentity.getConfigsvrConnectionString(), distLockProcessId);
-
-
- // Determine primary/secondary/standalone state in order to properly initialize sharding
- // components.
- const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- bool isReplSet = replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
- bool isStandaloneOrPrimary =
- !isReplSet || (replCoord->getMemberState() == repl::MemberState::RS_PRIMARY);
-
- CatalogCacheLoader::get(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary);
- ChunkSplitter::get(opCtx).onShardingInitialization(isStandaloneOrPrimary);
- PeriodicBalancerConfigRefresher::get(opCtx).onShardingInitialization(opCtx->getServiceContext(),
- isStandaloneOrPrimary);
-
- // Start the transaction coordinator service only if the node is the primary of a replica set
- TransactionCoordinatorService::get(opCtx)->onShardingInitialization(
- opCtx, isReplSet && isStandaloneOrPrimary);
-
- LOGV2(22071,
- "Finished initializing sharding components for {memberState} node.",
- "Finished initializing sharding components",
- "memberState"_attr = (isStandaloneOrPrimary ? "primary" : "secondary"));
-}
-
ShardingInitializationMongoD::ShardingInitializationMongoD()
: _initFunc([this](auto... args) {
- this->initializeShardingEnvironmentOnShardServer(std::forward<decltype(args)>(args)...);
+ _initializeShardingEnvironmentOnShardServer(std::forward<decltype(args)>(args)...);
}) {}
ShardingInitializationMongoD::~ShardingInitializationMongoD() = default;
@@ -308,13 +276,14 @@ ShardingInitializationMongoD* ShardingInitializationMongoD::get(ServiceContext*
void ShardingInitializationMongoD::shutDown(OperationContext* opCtx) {
auto const shardingState = ShardingState::get(opCtx);
- auto const grid = Grid::get(opCtx);
-
if (!shardingState->enabled())
return;
- grid->catalogClient()->shutDown(opCtx);
+ DistLockManager::get(opCtx)->shutDown(opCtx);
+
+ auto const grid = Grid::get(opCtx);
grid->shardRegistry()->shutdown();
+
_replicaSetChangeListener.reset();
}
@@ -466,6 +435,7 @@ void ShardingInitializationMongoD::initializeFromShardIdentity(
} catch (const DBException& ex) {
shardingState->setInitialized(ex.toStatus());
}
+
Grid::get(opCtx)->setShardingInitialized();
}
@@ -514,36 +484,33 @@ void ShardingInitializationMongoD::updateShardIdentityConfigString(
void initializeGlobalShardingStateForMongoD(OperationContext* opCtx,
const ConnectionString& configCS,
StringData distLockProcessId) {
+ uassert(ErrorCodes::BadValue, "Unrecognized connection string.", configCS);
+
auto targeterFactory = std::make_unique<RemoteCommandTargeterFactoryImpl>();
auto targeterFactoryPtr = targeterFactory.get();
- ShardFactory::BuilderCallable setBuilder = [targeterFactoryPtr](
- const ShardId& shardId,
- const ConnectionString& connStr) {
- return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
- };
-
- ShardFactory::BuilderCallable masterBuilder = [targeterFactoryPtr](
- const ShardId& shardId,
- const ConnectionString& connStr) {
- return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
- };
-
- ShardFactory::BuilderCallable localBuilder = [](const ShardId& shardId,
- const ConnectionString& connStr) {
- return std::make_unique<ShardLocal>(shardId);
- };
-
ShardFactory::BuildersMap buildersMap{
- {ConnectionString::ConnectionType::kReplicaSet, std::move(setBuilder)},
- {ConnectionString::ConnectionType::kStandalone, std::move(masterBuilder)},
- {ConnectionString::ConnectionType::kLocal, std::move(localBuilder)},
+ {ConnectionString::ConnectionType::kReplicaSet,
+ [targeterFactoryPtr](const ShardId& shardId, const ConnectionString& connStr) {
+ return std::make_unique<ShardRemote>(
+ shardId, connStr, targeterFactoryPtr->create(connStr));
+ }},
+ {ConnectionString::ConnectionType::kLocal,
+ [](const ShardId& shardId, const ConnectionString& connStr) {
+ return std::make_unique<ShardLocal>(shardId);
+ }},
+ {ConnectionString::ConnectionType::kStandalone,
+ [targeterFactoryPtr](const ShardId& shardId, const ConnectionString& connStr) {
+ return std::make_unique<ShardRemote>(
+ shardId, connStr, targeterFactoryPtr->create(connStr));
+ }},
};
auto shardFactory =
std::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory));
auto const service = opCtx->getServiceContext();
+
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
if (storageGlobalParams.readOnly) {
CatalogCacheLoader::set(service, std::make_unique<ReadOnlyCatalogCacheLoader>());
@@ -552,8 +519,10 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx,
std::make_unique<ShardServerCatalogCacheLoader>(
std::make_unique<ConfigServerCatalogCacheLoader>()));
}
- } else {
+ } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
CatalogCacheLoader::set(service, std::make_unique<ConfigServerCatalogCacheLoader>());
+ } else {
+ MONGO_UNREACHABLE;
}
auto validator = LogicalTimeValidator::get(service);
@@ -575,8 +544,6 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx,
catCache->invalidateEntriesThatReferenceShard(removedShard);
}};
- uassert(ErrorCodes::BadValue, "Unrecognized connection string.", configCS);
-
auto shardRegistry = std::make_unique<ShardRegistry>(
std::move(shardFactory), configCS, std::move(shardRemovalHooks));
@@ -590,6 +557,15 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx,
// executors aren't used for user queries in mongod.
1));
+ DistLockManager::create(
+ service,
+ std::make_unique<ReplSetDistLockManager>(service,
+ distLockProcessId,
+ std::make_unique<DistLockCatalogImpl>(),
+ ReplSetDistLockManager::kDistLockPingInterval,
+ ReplSetDistLockManager::kDistLockExpirationTime));
+ DistLockManager::get(opCtx)->startUp();
+
auto const replCoord = repl::ReplicationCoordinator::get(service);
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer &&
replCoord->getMemberState().primary()) {
@@ -597,4 +573,37 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx,
}
}
+void ShardingInitializationMongoD::_initializeShardingEnvironmentOnShardServer(
+ OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
+
+ _replicaSetChangeListener =
+ ReplicaSetMonitor::getNotifier().makeListener<ShardingReplicaSetChangeListener>(
+ opCtx->getServiceContext());
+
+ initializeGlobalShardingStateForMongoD(
+ opCtx, shardIdentity.getConfigsvrConnectionString(), distLockProcessId);
+
+
+ // Determine primary/secondary/standalone state in order to properly initialize sharding
+ // components.
+ const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ bool isReplSet = replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
+ bool isStandaloneOrPrimary =
+ !isReplSet || (replCoord->getMemberState() == repl::MemberState::RS_PRIMARY);
+
+ CatalogCacheLoader::get(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary);
+ ChunkSplitter::get(opCtx).onShardingInitialization(isStandaloneOrPrimary);
+ PeriodicBalancerConfigRefresher::get(opCtx).onShardingInitialization(opCtx->getServiceContext(),
+ isStandaloneOrPrimary);
+
+ // Start the transaction coordinator service only if the node is the primary of a replica set
+ TransactionCoordinatorService::get(opCtx)->onShardingInitialization(
+ opCtx, isReplSet && isStandaloneOrPrimary);
+
+ LOGV2(22071,
+ "Finished initializing sharding components for {memberState} node.",
+ "Finished initializing sharding components",
+ "memberState"_attr = (isStandaloneOrPrimary ? "primary" : "secondary"));
+}
+
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_initialization_mongod.h b/src/mongo/db/s/sharding_initialization_mongod.h
index a8e8eec3c72..f02d5d43c69 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.h
+++ b/src/mongo/db/s/sharding_initialization_mongod.h
@@ -38,10 +38,6 @@
namespace mongo {
-class ConnectionString;
-class OperationContext;
-class ServiceContext;
-
/**
* This class serves as a bootstrap and shutdown for the sharding subsystem and also controls the
* persisted cluster identity. The default ShardingEnvironmentInitFunc instantiates all the sharding
@@ -62,10 +58,6 @@ public:
static ShardingInitializationMongoD* get(OperationContext* opCtx);
static ShardingInitializationMongoD* get(ServiceContext* service);
- void initializeShardingEnvironmentOnShardServer(OperationContext* opCtx,
- const ShardIdentity& shardIdentity,
- StringData distLockProcessId);
-
/**
* If started with --shardsvr, initializes sharding awareness from the shardIdentity document on
* disk, if there is one.
@@ -112,6 +104,10 @@ public:
}
private:
+ void _initializeShardingEnvironmentOnShardServer(OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId);
+
// This mutex ensures that only one thread at a time executes the sharding
// initialization/teardown sequence
Mutex _initSynchronizationMutex =
diff --git a/src/mongo/db/s/sharding_initialization_mongod_test.cpp b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
index 44b90529a89..f1359e75be1 100644
--- a/src/mongo/db/s/sharding_initialization_mongod_test.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
@@ -44,7 +44,6 @@
#include "mongo/db/s/sharding_initialization_mongod.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/server_options.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config_server_catalog_cache_loader.h"
@@ -60,9 +59,6 @@ const std::string kShardName("TestShard");
*/
class ShardingInitializationMongoDTest : public ShardingMongodTestFixture {
protected:
- // Used to write to set up local collections before exercising server logic.
- std::unique_ptr<DBDirectClient> _dbDirectClient;
-
void setUp() override {
serverGlobalParams.clusterRole = ClusterRole::None;
ShardingMongodTestFixture::setUp();
@@ -109,15 +105,8 @@ protected:
ShardingMongodTestFixture::tearDown();
}
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- return std::make_unique<DistLockManagerMock>(nullptr);
- }
-
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override {
- invariant(distLockManager);
- return std::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
+ return std::make_unique<ShardingCatalogClientImpl>();
}
auto* shardingInitialization() {
@@ -127,6 +116,9 @@ protected:
auto* shardingState() {
return ShardingState::get(getServiceContext());
}
+
+ // Used to write to set up local collections before exercising server logic.
+ std::unique_ptr<DBDirectClient> _dbDirectClient;
};
/**
diff --git a/src/mongo/db/s/sharding_initialization_op_observer_test.cpp b/src/mongo/db/s/sharding_initialization_op_observer_test.cpp
index 019394f91e9..e029b043c01 100644
--- a/src/mongo/db/s/sharding_initialization_op_observer_test.cpp
+++ b/src/mongo/db/s/sharding_initialization_op_observer_test.cpp
@@ -43,7 +43,6 @@
#include "mongo/db/s/sharding_mongod_test_fixture.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/server_options.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config_server_catalog_cache_loader.h"
diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.cpp b/src/mongo/db/s/sharding_mongod_test_fixture.cpp
index c1e65f887ce..d0881708665 100644
--- a/src/mongo/db/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/db/s/sharding_mongod_test_fixture.cpp
@@ -54,6 +54,7 @@
#include "mongo/db/repl/storage_interface_mock.h"
#include "mongo/db/s/collection_sharding_state_factory_shard.h"
#include "mongo/db/s/config_server_op_observer.h"
+#include "mongo/db/s/dist_lock_manager_mock.h"
#include "mongo/db/s/op_observer_sharding_impl.h"
#include "mongo/db/s/shard_local.h"
#include "mongo/db/s/shard_server_op_observer.h"
@@ -61,8 +62,6 @@
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/s/balancer_configuration.h"
-#include "mongo/s/catalog/dist_lock_catalog.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_shard.h"
@@ -219,13 +218,8 @@ std::unique_ptr<ShardRegistry> ShardingMongodTestFixture::makeShardRegistry(
return std::make_unique<ShardRegistry>(std::move(shardFactory), configConnStr);
}
-std::unique_ptr<DistLockCatalog> ShardingMongodTestFixture::makeDistLockCatalog() {
- return nullptr;
-}
-
-std::unique_ptr<DistLockManager> ShardingMongodTestFixture::makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) {
- return nullptr;
+std::unique_ptr<DistLockManager> ShardingMongodTestFixture::makeDistLockManager() {
+ return std::make_unique<DistLockManagerMock>();
}
std::unique_ptr<ClusterCursorManager> ShardingMongodTestFixture::makeClusterCursorManager() {
@@ -249,17 +243,11 @@ Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest(
executorPoolPtr->startup();
}
- auto distLockCatalogPtr = makeDistLockCatalog();
- _distLockCatalog = distLockCatalogPtr.get();
-
- auto distLockManagerPtr = makeDistLockManager(std::move(distLockCatalogPtr));
- _distLockManager = distLockManagerPtr.get();
-
auto catalogCache = std::make_unique<CatalogCache>(
getServiceContext(), CatalogCacheLoader::get(getServiceContext()));
auto const grid = Grid::get(operationContext());
- grid->init(makeShardingCatalogClient(std::move(distLockManagerPtr)),
+ grid->init(makeShardingCatalogClient(),
std::move(catalogCache),
makeShardRegistry(configConnStr),
makeClusterCursorManager(),
@@ -267,26 +255,28 @@ Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest(
std::move(executorPoolPtr),
_mockNetwork);
- if (grid->catalogClient()) {
- grid->catalogClient()->startup();
+ DistLockManager::create(getServiceContext(), makeDistLockManager());
+ if (DistLockManager::get(operationContext())) {
+ DistLockManager::get(operationContext())->startUp();
}
return Status::OK();
}
void ShardingMongodTestFixture::setUp() {
+ ServiceContextMongoDTest::setUp();
ShardingTestFixtureCommon::setUp();
}
void ShardingMongodTestFixture::tearDown() {
ReplicaSetMonitor::cleanup();
- if (Grid::get(operationContext())->getExecutorPool() && !_executorPoolShutDown) {
- Grid::get(operationContext())->getExecutorPool()->shutdownAndJoin();
+ if (DistLockManager::get(operationContext())) {
+ DistLockManager::get(operationContext())->shutDown(operationContext());
}
- if (Grid::get(operationContext())->catalogClient()) {
- Grid::get(operationContext())->catalogClient()->shutDown(operationContext());
+ if (Grid::get(operationContext())->getExecutorPool() && !_executorPoolShutDown) {
+ Grid::get(operationContext())->getExecutorPool()->shutdownAndJoin();
}
if (Grid::get(operationContext())->shardRegistry()) {
diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.h b/src/mongo/db/s/sharding_mongod_test_fixture.h
index 3940b46245d..7c6d955976e 100644
--- a/src/mongo/db/s/sharding_mongod_test_fixture.h
+++ b/src/mongo/db/s/sharding_mongod_test_fixture.h
@@ -30,17 +30,12 @@
#pragma once
#include "mongo/db/repl/replication_coordinator_mock.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/s/sharding_test_fixture_common.h"
namespace mongo {
-class CatalogCacheLoader;
-
-namespace repl {
-class ReplSettings;
-} // namespace repl
-
/**
* Sets up this fixture as a mongod with a storage engine, OpObserver, and as a member of a replica
* set.
@@ -57,7 +52,6 @@ protected:
~ShardingMongodTestFixture();
void setUp() override;
-
void tearDown() override;
/**
@@ -110,18 +104,9 @@ protected:
virtual std::unique_ptr<ShardRegistry> makeShardRegistry(ConnectionString configConnStr);
/**
- * Base class returns nullptr.
- */
- virtual std::unique_ptr<DistLockCatalog> makeDistLockCatalog();
-
- /**
- * Base class returns nullptr.
- *
- * Note: DistLockManager takes ownership of the DistLockCatalog, so if DistLockCatalog is not
- * nullptr, a real or mock DistLockManager must be supplied.
+ * Allows tests to conditionally construct a DistLockManager
*/
- virtual std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog);
+ virtual std::unique_ptr<DistLockManager> makeDistLockManager();
/**
* Base class returns nullptr.
diff --git a/src/mongo/db/s/shardsvr_drop_collection_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_command.cpp
index b838f3e8dcf..1fe52939abb 100644
--- a/src/mongo/db/s/shardsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_collection_command.cpp
@@ -32,9 +32,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/commands.h"
#include "mongo/db/drop_database_gen.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_logging.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
diff --git a/src/mongo/db/s/shardsvr_drop_database_command.cpp b/src/mongo/db/s/shardsvr_drop_database_command.cpp
index 785ded10239..b2938bb3319 100644
--- a/src/mongo/db/s/shardsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_database_command.cpp
@@ -34,7 +34,6 @@
#include "mongo/db/drop_database_gen.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_logging.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
diff --git a/src/mongo/db/s/shardsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/shardsvr_refine_collection_shard_key_command.cpp
index 8ecd531f64d..feed625c073 100644
--- a/src/mongo/db/s/shardsvr_refine_collection_shard_key_command.cpp
+++ b/src/mongo/db/s/shardsvr_refine_collection_shard_key_command.cpp
@@ -34,7 +34,6 @@
#include "mongo/db/drop_database_gen.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_logging.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index 718aa93c20e..f2b883d423b 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/s/collection_sharding_runtime.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/logv2/log.h"
@@ -135,7 +136,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
const std::string whyMessage(str::stream()
<< "splitting chunk " << redact(chunkRange.toString()) << " in "
<< nss.toString());
- auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
+ auto scopedDistLock = DistLockManager::get(opCtx)->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus().withContext(
diff --git a/src/mongo/db/s/split_chunk_test.cpp b/src/mongo/db/s/split_chunk_test.cpp
index 3d9147c4130..298059e85cc 100644
--- a/src/mongo/db/s/split_chunk_test.cpp
+++ b/src/mongo/db/s/split_chunk_test.cpp
@@ -34,6 +34,7 @@
#include <boost/optional.hpp>
#include "mongo/db/json.h"
+#include "mongo/db/s/dist_lock_manager_mock.h"
#include "mongo/db/s/shard_server_test_fixture.h"
#include "mongo/db/s/sharding_initialization_mongod.h"
#include "mongo/db/s/split_chunk.h"
@@ -42,7 +43,6 @@
#include "mongo/executor/remote_command_response.h"
#include "mongo/executor/task_executor.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_database.h"
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
index ccb1d84d1c8..d3e7e2818e1 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
@@ -292,12 +292,11 @@ protected:
// expected shards. We cannot mock the network responses for the ShardRegistry reload, since the
// ShardRegistry reload is done over DBClient, not the NetworkInterface, and there is no
// DBClientMock analogous to the NetworkInterfaceMock.
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override {
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
class StaticCatalogClient final : public ShardingCatalogClientMock {
public:
- StaticCatalogClient() : ShardingCatalogClientMock(nullptr) {}
+ StaticCatalogClient() = default;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
diff --git a/src/mongo/db/s/transaction_coordinator_test_fixture.cpp b/src/mongo/db/s/transaction_coordinator_test_fixture.cpp
index 377a1993213..560dc51524b 100644
--- a/src/mongo/db/s/transaction_coordinator_test_fixture.cpp
+++ b/src/mongo/db/s/transaction_coordinator_test_fixture.cpp
@@ -74,13 +74,12 @@ void TransactionCoordinatorTestFixture::tearDown() {
ShardServerTestFixture::tearDown();
}
-std::unique_ptr<ShardingCatalogClient> TransactionCoordinatorTestFixture::makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
+std::unique_ptr<ShardingCatalogClient>
+TransactionCoordinatorTestFixture::makeShardingCatalogClient() {
class StaticCatalogClient final : public ShardingCatalogClientMock {
public:
- StaticCatalogClient(std::vector<ShardId> shardIds)
- : ShardingCatalogClientMock(nullptr), _shardIds(std::move(shardIds)) {}
+ StaticCatalogClient(std::vector<ShardId> shardIds) : _shardIds(std::move(shardIds)) {}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
diff --git a/src/mongo/db/s/transaction_coordinator_test_fixture.h b/src/mongo/db/s/transaction_coordinator_test_fixture.h
index 37232f0d340..6a60538a9fa 100644
--- a/src/mongo/db/s/transaction_coordinator_test_fixture.h
+++ b/src/mongo/db/s/transaction_coordinator_test_fixture.h
@@ -53,8 +53,7 @@ protected:
* ShardRegistry reload is done over DBClient, not the NetworkInterface, and there is no
* DBClientMock analogous to the NetworkInterfaceMock.
*/
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override;
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override;
void assertCommandSentAndRespondWith(const StringData& commandName,
const StatusWith<BSONObj>& response,
diff --git a/src/mongo/db/s/vector_clock_config_server_test.cpp b/src/mongo/db/s/vector_clock_config_server_test.cpp
index 6bc2d618594..81e8138c097 100644
--- a/src/mongo/db/s/vector_clock_config_server_test.cpp
+++ b/src/mongo/db/s/vector_clock_config_server_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/keys_collection_manager.h"
#include "mongo/db/logical_time_validator.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
+#include "mongo/db/s/dist_lock_manager_mock.h"
#include "mongo/db/vector_clock_mutable.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/unittest/death_test.h"
#include "mongo/util/clock_source_mock.h"
@@ -71,10 +71,8 @@ protected:
// The VectorClock tests assume nothing else ticks ClusterTime. However,
// ConfigServerTestFixture installs an actual DistLockManager, which does writes (thereby
// ticking ClusterTime). So for these tests, that is overridden to be a mock.
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- invariant(distLockCatalog);
- return std::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
+ std::unique_ptr<DistLockManager> makeDistLockManager() override {
+ return std::make_unique<DistLockManagerMock>();
}
/**
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 786ab9bff6d..87e57a49fd6 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -96,8 +96,6 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/executor/async_multicaster',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_impl',
- '$BUILD_DIR/mongo/s/catalog/replset_dist_lock_manager',
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl',
'$BUILD_DIR/mongo/util/periodic_runner_factory',
'common_s',
@@ -123,7 +121,7 @@ env.Library(
'$BUILD_DIR/mongo/executor/task_executor_interface',
'$BUILD_DIR/mongo/s/client/shard_interface',
'$BUILD_DIR/mongo/s/client/sharding_client',
- '$BUILD_DIR/mongo/s/coreshard',
+ 'coreshard',
'mongos_server_parameters',
],
)
@@ -223,12 +221,11 @@ env.Library(
LIBDEPS=[
'$BUILD_DIR/mongo/db/query/collation/collator_factory_mock',
'$BUILD_DIR/mongo/executor/task_executor_pool',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
- '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl',
- '$BUILD_DIR/mongo/s/coreshard',
'$BUILD_DIR/mongo/transport/transport_layer_mock',
'$BUILD_DIR/mongo/util/clock_source_mock',
+ 'catalog/sharding_catalog_client_impl',
'committed_optime_metadata_hook',
+ 'coreshard',
'sharding_egress_metadata_hook_for_mongos',
'sharding_task_executor',
'sharding_test_fixture_common',
@@ -493,6 +490,7 @@ env.Library(
# library to inject a static or mongo initializer to mongos,
# please add that library as a private libdep of
# mongos_initializers.
+ '$BUILD_DIR/mongo/client/remote_command_targeter',
'$BUILD_DIR/mongo/db/audit',
'$BUILD_DIR/mongo/db/auth/authmongos',
'$BUILD_DIR/mongo/db/commands/servers',
@@ -573,10 +571,10 @@ env.CppUnitTest(
'append_raw_responses_test.cpp',
'balancer_configuration_test.cpp',
'build_versioned_requests_for_targeted_shards_test.cpp',
- 'catalog_cache_test.cpp',
'catalog_cache_refresh_test.cpp',
- 'comparable_chunk_version_test.cpp',
- 'comparable_database_version_test.cpp',
+ 'catalog_cache_test.cpp',
+ 'catalog/sharding_catalog_client_test.cpp',
+ 'catalog/sharding_catalog_write_retry_test.cpp',
'catalog/type_changelog_test.cpp',
'catalog/type_chunk_test.cpp',
'catalog/type_collection_test.cpp',
@@ -596,6 +594,8 @@ env.CppUnitTest(
'client/shard_remote_test.cpp',
'cluster_identity_loader_test.cpp',
'cluster_last_error_info_test.cpp',
+ 'comparable_chunk_version_test.cpp',
+ 'comparable_database_version_test.cpp',
'hedge_options_util_test.cpp',
'mongos_topology_coordinator_test.cpp',
'request_types/add_shard_request_test.cpp',
diff --git a/src/mongo/s/append_raw_responses_test.cpp b/src/mongo/s/append_raw_responses_test.cpp
index f3d904f0215..3ded061ce00 100644
--- a/src/mongo/s/append_raw_responses_test.cpp
+++ b/src/mongo/s/append_raw_responses_test.cpp
@@ -160,15 +160,11 @@ protected:
}
}
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override {
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
class StaticCatalogClient final : public ShardingCatalogClientMock {
public:
- StaticCatalogClient(std::unique_ptr<DistLockManager> distLockManager,
- std::vector<ShardId> shardIds)
- : ShardingCatalogClientMock(std::move(distLockManager)),
- _shardIds(std::move(shardIds)) {}
+ StaticCatalogClient(std::vector<ShardId> shardIds) : _shardIds(std::move(shardIds)) {}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
@@ -188,7 +184,7 @@ protected:
const std::vector<ShardId> _shardIds;
};
- return std::make_unique<StaticCatalogClient>(std::move(distLockManager), kShardIdList);
+ return std::make_unique<StaticCatalogClient>(kShardIdList);
}
const ShardId kShard1{"s1"};
diff --git a/src/mongo/s/catalog/SConscript b/src/mongo/s/catalog/SConscript
index 82ddd1e0c72..21828701cf4 100644
--- a/src/mongo/s/catalog/SConscript
+++ b/src/mongo/s/catalog/SConscript
@@ -16,61 +16,6 @@ env.Library(
)
env.Library(
- target='dist_lock_manager',
- source=[
- 'dist_lock_manager.cpp',
- 'dist_lock_ping_info.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/base',
- ],
-)
-
-env.Library(
- target='dist_lock_catalog_interface',
- source=[
- 'dist_lock_catalog.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/db/write_concern_options',
- ],
-)
-
-env.Library(
- target='replset_dist_lock_manager',
- source=[
- 'replset_dist_lock_manager.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/db/service_context',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_interface',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager',
- '$BUILD_DIR/mongo/s/client/sharding_client',
- '$BUILD_DIR/mongo/util/fail_point'
- ],
-)
-
-env.Library(
- target='dist_lock_catalog_impl',
- source=[
- 'dist_lock_catalog_impl.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/base',
- '$BUILD_DIR/mongo/client/read_preference',
- '$BUILD_DIR/mongo/client/remote_command_targeter',
- '$BUILD_DIR/mongo/db/common',
- '$BUILD_DIR/mongo/db/query/command_request_response',
- '$BUILD_DIR/mongo/db/repl/read_concern_args',
- '$BUILD_DIR/mongo/rpc/command_status',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_interface',
- '$BUILD_DIR/mongo/s/client/sharding_client',
- '$BUILD_DIR/mongo/s/write_ops/batch_write_types',
- '$BUILD_DIR/mongo/util/net/network',
- ],
-)
-
-env.Library(
target='sharding_catalog_client_impl',
source=[
'sharding_catalog_client_impl.cpp',
@@ -81,7 +26,6 @@ env.Library(
'$BUILD_DIR/mongo/executor/network_interface',
'$BUILD_DIR/mongo/s/client/sharding_client',
'$BUILD_DIR/mongo/s/coreshard',
- 'dist_lock_manager',
'sharding_catalog_client',
],
LIBDEPS_PRIVATE=[
@@ -90,55 +34,11 @@ env.Library(
)
env.Library(
- target='dist_lock_manager_mock',
- source=[
- 'dist_lock_manager_mock.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/unittest/unittest',
- 'dist_lock_manager',
- ],
-)
-
-env.Library(
- target='dist_lock_catalog_mock',
- source=[
- 'dist_lock_catalog_mock.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/s/common_s',
- '$BUILD_DIR/mongo/unittest/unittest',
- 'dist_lock_catalog_interface',
- ]
-)
-
-env.Library(
target='sharding_catalog_client_mock',
source=[
'sharding_catalog_client_mock.cpp',
],
LIBDEPS=[
- 'dist_lock_manager_mock',
'sharding_catalog_client',
]
)
-
-env.CppUnitTest(
- target='s_catalog_test',
- source=[
- 'dist_lock_catalog_impl_test.cpp',
- 'replset_dist_lock_manager_test.cpp',
- 'sharding_catalog_client_test.cpp',
- 'sharding_catalog_write_retry_test.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/db/auth/authmocks',
- '$BUILD_DIR/mongo/db/s/shard_server_test_fixture',
- '$BUILD_DIR/mongo/db/storage/duplicate_key_error_info',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_mock',
- '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_mock',
- '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
- 'dist_lock_catalog_impl',
- 'replset_dist_lock_manager',
- ]
-)
diff --git a/src/mongo/s/catalog/dist_lock_ping_info.cpp b/src/mongo/s/catalog/dist_lock_ping_info.cpp
deleted file mode 100644
index 2549e55bb19..00000000000
--- a/src/mongo/s/catalog/dist_lock_ping_info.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/s/catalog/dist_lock_ping_info.h"
-
-namespace mongo {
-
-DistLockPingInfo::DistLockPingInfo() = default;
-
-DistLockPingInfo::DistLockPingInfo(
- StringData idArg, Date_t lastPingArg, Date_t remoteArg, OID tsArg, OID electionIdArg)
- : processId(idArg.toString()),
- lastPing(lastPingArg),
- configLocalTime(remoteArg),
- lockSessionId(std::move(tsArg)),
- electionId(std::move(electionIdArg)) {}
-} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_ping_info.h b/src/mongo/s/catalog/dist_lock_ping_info.h
deleted file mode 100644
index 6e236fb5133..00000000000
--- a/src/mongo/s/catalog/dist_lock_ping_info.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <string>
-
-#include "mongo/base/string_data.h"
-#include "mongo/bson/oid.h"
-#include "mongo/util/time_support.h"
-
-namespace mongo {
-
-/**
- * Data structure for storing information about distributed lock pings.
- */
-struct DistLockPingInfo {
- DistLockPingInfo();
- DistLockPingInfo(StringData processId,
- Date_t lastPing,
- Date_t configLocalTime,
- OID lockSessionId,
- OID electionId);
-
- // the process processId of the last known owner of the lock.
- std::string processId;
-
- // the ping value from the last owner of the lock.
- Date_t lastPing;
-
- // the config server local time when this object was updated.
- Date_t configLocalTime;
-
- // last known owner of the lock.
- OID lockSessionId;
-
- // the election id of the config server when this object was updated.
- // Note: unused by legacy dist lock.
- OID electionId;
-};
-} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index bb9a97c15cf..a8918a2ed36 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -37,7 +37,6 @@
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/repl/optime_with.h"
#include "mongo/db/write_concern_options.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/client/shard.h"
namespace mongo {
@@ -98,18 +97,6 @@ public:
virtual ~ShardingCatalogClient() = default;
/**
- * Performs implementation-specific startup tasks. Must be run after the catalog client
- * has been installed into the global 'grid' object. Implementations do not need to guarantee
- * thread safety so callers should employ proper synchronization when calling this method.
- */
- virtual void startup() = 0;
-
- /**
- * Performs necessary cleanup when shutting down cleanly.
- */
- virtual void shutDown(OperationContext* opCtx) = 0;
-
- /**
* Retrieves the metadata for a given database, if it exists.
*
* @param dbName name of the database (case sensitive)
@@ -210,8 +197,8 @@ public:
OperationContext* opCtx, repl::ReadConcernLevel readConcern) = 0;
/**
- * Runs a user management command on the config servers, potentially synchronizing through
- * a distributed lock. Do not use for general write command execution.
+ * Runs a user management command on the config servers. Do not use for general write command
+ * execution.
*
* @param commandName: name of command
* @param dbname: database for which the user management command is invoked
@@ -343,15 +330,6 @@ public:
const BSONObj& query,
const WriteConcernOptions& writeConcern) = 0;
- /**
- * Obtains a reference to the distributed lock manager instance to use for synchronizing
- * system-wide changes.
- *
- * The returned reference is valid only as long as the catalog client is valid and should not
- * be cached.
- */
- virtual DistLockManager* getDistLockManager() = 0;
-
protected:
ShardingCatalogClient() = default;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 35399985419..a0dadf7de00 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -52,7 +52,6 @@
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/s/catalog/config_server_version.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_config_version.h"
@@ -135,33 +134,10 @@ void sendRetryableWriteBatchRequestToConfig(OperationContext* opCtx,
} // namespace
-ShardingCatalogClientImpl::ShardingCatalogClientImpl(
- std::unique_ptr<DistLockManager> distLockManager)
- : _distLockManager(std::move(distLockManager)) {}
+ShardingCatalogClientImpl::ShardingCatalogClientImpl() = default;
ShardingCatalogClientImpl::~ShardingCatalogClientImpl() = default;
-void ShardingCatalogClientImpl::startup() {
- stdx::lock_guard<Latch> lk(_mutex);
- if (_started) {
- return;
- }
-
- _started = true;
- _distLockManager->startUp();
-}
-
-void ShardingCatalogClientImpl::shutDown(OperationContext* opCtx) {
- LOGV2_DEBUG(22673, 1, "ShardingCatalogClientImpl::shutDown() called.");
- {
- stdx::lock_guard<Latch> lk(_mutex);
- _inShutdown = true;
- }
-
- invariant(_distLockManager);
- _distLockManager->shutDown(opCtx);
-}
-
Status ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
OperationContext* opCtx,
const NamespaceString& nss,
@@ -707,11 +683,6 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
return Status::OK();
}
-DistLockManager* ShardingCatalogClientImpl::getDistLockManager() {
- invariant(_distLockManager);
- return _distLockManager.get();
-}
-
Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& doc,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index 6e966cf4c8e..66f25e71fb3 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -51,6 +51,9 @@ class TaskExecutor;
class ShardingCatalogClientImpl final : public ShardingCatalogClient {
public:
+ ShardingCatalogClientImpl();
+ virtual ~ShardingCatalogClientImpl();
+
/*
* Updates (or if "upsert" is true, creates) catalog data for the sharded collection "collNs" by
* writing a document to the "config.collections" collection with the catalog information
@@ -61,17 +64,6 @@ public:
const CollectionType& coll,
const bool upsert);
- explicit ShardingCatalogClientImpl(std::unique_ptr<DistLockManager> distLockManager);
- virtual ~ShardingCatalogClientImpl();
-
- /**
- * Safe to call multiple times as long as the calls are externally synchronized to be
- * non-overlapping.
- */
- void startup() override;
-
- void shutDown(OperationContext* opCtx) override;
-
DatabaseType getDatabase(OperationContext* opCtx,
StringData db,
repl::ReadConcernLevel readConcernLevel) override;
@@ -152,8 +144,6 @@ public:
const BSONObj& query,
const WriteConcernOptions& writeConcern) override;
- DistLockManager* getDistLockManager() override;
-
StatusWith<std::vector<KeysCollectionDocument>> getNewKeys(
OperationContext* opCtx,
StringData purpose,
@@ -199,25 +189,6 @@ private:
const std::string& dbName,
const ReadPreferenceSetting& readPref,
repl::ReadConcernLevel readConcernLevel);
-
- //
- // All member variables are labeled with one of the following codes indicating the
- // synchronization rules for accessing them.
- //
- // (M) Must hold _mutex for access.
- // (R) Read only, can only be written during initialization.
- //
-
- Mutex _mutex = MONGO_MAKE_LATCH("ShardingCatalogClientImpl::_mutex");
-
- // Distributed lock manager singleton.
- std::unique_ptr<DistLockManager> _distLockManager; // (R)
-
- // True if shutDown() has been called. False, otherwise.
- bool _inShutdown = false; // (M)
-
- // True if startup() has been called.
- bool _started = false; // (M)
};
} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index 6305f604d0a..7f3c5a90299 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -31,10 +31,6 @@
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
-#include <memory>
-
-#include "mongo/base/status.h"
-#include "mongo/db/repl/optime.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_config_version.h"
@@ -44,24 +40,10 @@
namespace mongo {
-ShardingCatalogClientMock::ShardingCatalogClientMock(
- std::unique_ptr<DistLockManager> distLockManager)
- : _distLockManager(std::move(distLockManager)) {}
+ShardingCatalogClientMock::ShardingCatalogClientMock() = default;
ShardingCatalogClientMock::~ShardingCatalogClientMock() = default;
-void ShardingCatalogClientMock::startup() {
- if (_distLockManager) {
- _distLockManager->startUp();
- }
-}
-
-void ShardingCatalogClientMock::shutDown(OperationContext* opCtx) {
- if (_distLockManager) {
- _distLockManager->shutDown(opCtx);
- }
-}
-
DatabaseType ShardingCatalogClientMock::getDatabase(OperationContext* opCtx,
StringData db,
repl::ReadConcernLevel readConcernLevel) {
@@ -177,10 +159,6 @@ Status ShardingCatalogClientMock::createDatabase(OperationContext* opCtx,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-DistLockManager* ShardingCatalogClientMock::getDistLockManager() {
- return _distLockManager.get();
-}
-
StatusWith<std::vector<KeysCollectionDocument>> ShardingCatalogClientMock::getNewKeys(
OperationContext* opCtx,
StringData purpose,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 4aaca81b020..ba7ec3ba81e 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -38,13 +38,9 @@ namespace mongo {
*/
class ShardingCatalogClientMock : public ShardingCatalogClient {
public:
- ShardingCatalogClientMock(std::unique_ptr<DistLockManager> distLockManager);
+ ShardingCatalogClientMock();
~ShardingCatalogClientMock();
- void startup() override;
-
- void shutDown(OperationContext* opCtx) override;
-
DatabaseType getDatabase(OperationContext* opCtx,
StringData db,
repl::ReadConcernLevel readConcernLevel) override;
@@ -129,8 +125,6 @@ public:
Status createDatabase(OperationContext* opCtx, StringData dbName, ShardId primaryShard);
- DistLockManager* getDistLockManager() override;
-
StatusWith<std::vector<KeysCollectionDocument>> getNewKeys(
OperationContext* opCtx,
StringData purpose,
@@ -138,8 +132,6 @@ public:
repl::ReadConcernLevel readConcernLevel) override;
private:
- std::unique_ptr<DistLockManager> _distLockManager;
-
StatusWith<repl::OpTimeWith<std::vector<BSONObj>>> _exhaustiveFindOnConfig(
OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
index 13227fcf77c..63fed92f4d9 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
@@ -43,7 +43,6 @@
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/tracking_metadata.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
@@ -70,13 +69,6 @@ using rpc::ReplSetMetadata;
using std::vector;
using unittest::assertGet;
-class ShardingCatalogClientTest : public ShardingTestFixture {
-protected:
- DistLockManagerMock* distLock() const {
- return dynamic_cast<DistLockManagerMock*>(ShardingTestFixture::distLock());
- }
-};
-
const int kMaxCommandRetry = 3;
const NamespaceString kNamespace("TestDB", "TestColl");
@@ -87,6 +79,8 @@ BSONObj getReplSecondaryOkMetadata() {
return o.obj();
}
+using ShardingCatalogClientTest = ShardingTestFixture;
+
TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
@@ -636,13 +630,6 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
// Tests that if you send a w:1 write concern it gets replaced with w:majority
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- distLock()->expectLock(
- [](StringData name, StringData whyMessage, Milliseconds waitFor) {
- ASSERT_EQUALS("authorizationData", name);
- ASSERT_EQUALS("dropUser", whyMessage);
- },
- Status::OK());
-
auto future =
launchAsync([this] {
BSONObjBuilder responseBuilder;
diff --git a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
index 5cc2d0707db..f7bae8c4d2b 100644
--- a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
@@ -44,7 +44,6 @@
#include "mongo/db/write_concern.h"
#include "mongo/executor/task_executor.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
diff --git a/src/mongo/s/mongos_main.cpp b/src/mongo/s/mongos_main.cpp
index 83a135d6f86..7d4c0850d51 100644
--- a/src/mongo/s/mongos_main.cpp
+++ b/src/mongo/s/mongos_main.cpp
@@ -42,7 +42,6 @@
#include "mongo/client/connpool.h"
#include "mongo/client/dbclient_rs.h"
#include "mongo/client/global_conn_pool.h"
-#include "mongo/client/remote_command_targeter.h"
#include "mongo/client/remote_command_targeter_factory_impl.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/config.h"
@@ -346,10 +345,6 @@ void cleanupTask(const ShutdownTaskArgs& shutdownArgs) {
pool->shutdownAndJoin();
}
- if (auto catalog = Grid::get(opCtx)->catalogClient()) {
- catalog->shutDown(opCtx);
- }
-
if (auto shardRegistry = Grid::get(opCtx)->shardRegistry()) {
shardRegistry->shutdown();
}
diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp
index a721115577d..f92a2cf5948 100644
--- a/src/mongo/s/sharding_initialization.cpp
+++ b/src/mongo/s/sharding_initialization.cpp
@@ -57,8 +57,6 @@
#include "mongo/rpc/metadata/config_server_metadata.h"
#include "mongo/rpc/metadata/metadata_hook.h"
#include "mongo/s/balancer_configuration.h"
-#include "mongo/s/catalog/dist_lock_catalog_impl.h"
-#include "mongo/s/catalog/replset_dist_lock_manager.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog_cache.h"
@@ -79,7 +77,6 @@
#include "mongo/util/str.h"
namespace mongo {
-
namespace {
using executor::ConnectionPool;
@@ -90,19 +87,6 @@ using executor::ThreadPoolTaskExecutor;
static constexpr auto kRetryInterval = Seconds{2};
-std::unique_ptr<ShardingCatalogClient> makeCatalogClient(ServiceContext* service,
- StringData distLockProcessId) {
- auto distLockCatalog = std::make_unique<DistLockCatalogImpl>();
- auto distLockManager =
- std::make_unique<ReplSetDistLockManager>(service,
- distLockProcessId,
- std::move(distLockCatalog),
- ReplSetDistLockManager::kDistLockPingInterval,
- ReplSetDistLockManager::kDistLockExpirationTime);
-
- return std::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
-}
-
std::shared_ptr<executor::TaskExecutor> makeShardingFixedTaskExecutor(
std::unique_ptr<NetworkInterface> net) {
auto executor =
@@ -208,7 +192,7 @@ Status initializeGlobalShardingState(OperationContext* opCtx,
const auto service = opCtx->getServiceContext();
auto const grid = Grid::get(service);
- grid->init(makeCatalogClient(service, distLockProcessId),
+ grid->init(std::make_unique<ShardingCatalogClientImpl>(),
std::move(catalogCache),
std::move(shardRegistry),
std::make_unique<ClusterCursorManager>(service->getPreciseClockSource()),
@@ -219,9 +203,6 @@ Status initializeGlobalShardingState(OperationContext* opCtx,
// The shard registry must be started once the grid is initialized
grid->shardRegistry()->startupPeriodicReloader(opCtx);
- // The catalog client must be started after the shard registry has been started up
- grid->catalogClient()->startup();
-
auto keysCollectionClient =
std::make_unique<KeysCollectionClientSharded>(grid->catalogClient());
auto keyManager =
diff --git a/src/mongo/s/sharding_router_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp
index dca4ff42323..04c652dcd8d 100644
--- a/src/mongo/s/sharding_router_test_fixture.cpp
+++ b/src/mongo/s/sharding_router_test_fixture.cpp
@@ -53,7 +53,6 @@
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/tracking_metadata.h"
#include "mongo/s/balancer_configuration.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_shard.h"
@@ -132,9 +131,6 @@ ShardingTestFixture::ShardingTestFixture()
auto executorPool = std::make_unique<executor::TaskExecutorPool>();
executorPool->addExecutors(std::move(executorsForPool), _fixedExecutor);
- auto uniqueDistLockManager = std::make_unique<DistLockManagerMock>(nullptr);
- _distLockManager = uniqueDistLockManager.get();
-
NumHostsTargetedMetrics::get(service).startup();
ConnectionString configCS = ConnectionString::forReplicaSet(
@@ -174,17 +170,13 @@ ShardingTestFixture::ShardingTestFixture()
// until we get rid of it.
auto uniqueOpCtx = makeOperationContext();
auto const grid = Grid::get(uniqueOpCtx.get());
- grid->init(makeShardingCatalogClient(std::move(uniqueDistLockManager)),
+ grid->init(makeShardingCatalogClient(),
std::move(catalogCache),
std::move(shardRegistry),
std::make_unique<ClusterCursorManager>(service->getPreciseClockSource()),
std::make_unique<BalancerConfiguration>(),
std::move(executorPool),
_mockNetwork);
-
- if (grid->catalogClient()) {
- grid->catalogClient()->startup();
- }
}
ShardingTestFixture::~ShardingTestFixture() {
@@ -194,9 +186,6 @@ ShardingTestFixture::~ShardingTestFixture() {
if (grid->getExecutorPool()) {
grid->getExecutorPool()->shutdownAndJoin();
}
- if (grid->catalogClient()) {
- grid->catalogClient()->shutDown(makeOperationContext().get());
- }
if (grid->shardRegistry()) {
grid->shardRegistry()->shutdown();
}
@@ -419,10 +408,8 @@ void ShardingTestFixture::checkReadConcern(const BSONObj& cmdObj,
}
}
-std::unique_ptr<ShardingCatalogClient> ShardingTestFixture::makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
- invariant(distLockManager);
- return std::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
+std::unique_ptr<ShardingCatalogClient> ShardingTestFixture::makeShardingCatalogClient() {
+ return std::make_unique<ShardingCatalogClientImpl>();
}
} // namespace mongo
diff --git a/src/mongo/s/sharding_router_test_fixture.h b/src/mongo/s/sharding_router_test_fixture.h
index 87956428105..2758c3b58af 100644
--- a/src/mongo/s/sharding_router_test_fixture.h
+++ b/src/mongo/s/sharding_router_test_fixture.h
@@ -37,7 +37,6 @@ class BSONObj;
class ShardingCatalogClient;
struct ChunkVersion;
class CollectionType;
-class DistLockManagerMock;
class ShardRegistry;
class ShardType;
@@ -143,8 +142,7 @@ protected:
}
private:
- std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) override;
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override;
transport::SessionHandle _transportSession;
diff --git a/src/mongo/s/sharding_test_fixture_common.cpp b/src/mongo/s/sharding_test_fixture_common.cpp
index d5f38c4423b..12db6905098 100644
--- a/src/mongo/s/sharding_test_fixture_common.cpp
+++ b/src/mongo/s/sharding_test_fixture_common.cpp
@@ -165,8 +165,4 @@ void ShardingTestFixtureCommon::expectConfigCollectionInsert(const HostAndPort&
});
}
-std::unique_ptr<ShardingCatalogClient> ShardingTestFixtureCommon::makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
- return nullptr;
-}
} // namespace mongo
diff --git a/src/mongo/s/sharding_test_fixture_common.h b/src/mongo/s/sharding_test_fixture_common.h
index b5df4c66ef0..e58624c9db2 100644
--- a/src/mongo/s/sharding_test_fixture_common.h
+++ b/src/mongo/s/sharding_test_fixture_common.h
@@ -38,13 +38,6 @@
namespace mongo {
-class DistLockCatalog;
-class DistLockManager;
-
-namespace executor {
-class TaskExecutor;
-} // namespace executor
-
/**
* Contains common functionality and tools, which apply to both mongos and mongod unit-tests.
*/
@@ -87,16 +80,6 @@ protected:
return _targeterFactory;
}
- DistLockCatalog* distLockCatalog() const {
- invariant(_distLockCatalog);
- return _distLockCatalog;
- }
-
- DistLockManager* distLock() const {
- invariant(_distLockManager);
- return _distLockManager;
- }
-
/**
* Blocking methods, which receive one message from the network and respond using the responses
* returned from the input function. This is a syntactic sugar for simple, single request +
@@ -129,14 +112,9 @@ protected:
const std::string& ns,
const BSONObj& detail);
- /**
- * Base class returns nullptr.
- *
- * Note: ShardingCatalogClient takes ownership of DistLockManager, so if DistLockManager is not
- * nulllptr, a real or mock ShardingCatalogClient must be supplied.
- */
- virtual std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager);
+ virtual std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() {
+ return nullptr;
+ }
// Since a NetworkInterface is a private member of a TaskExecutor, we store a raw pointer to the
// fixed TaskExecutor's NetworkInterface here.
@@ -153,14 +131,6 @@ protected:
// store a raw pointer to it here.
RemoteCommandTargeterFactoryMock* _targeterFactory = nullptr;
- // Since the DistLockCatalog is currently a private member of ReplSetDistLockManager, we store
- // a raw pointer to it here.
- DistLockCatalog* _distLockCatalog = nullptr;
-
- // Since the DistLockManager is currently a private member of ShardingCatalogClient, we
- // store a raw pointer to it here.
- DistLockManager* _distLockManager = nullptr;
-
private:
// Keeps the lifetime of the operation context
ServiceContext::UniqueOperationContext _opCtxHolder;