summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSpencer T Brody <spencer@mongodb.com>2016-05-31 11:14:24 -0400
committerSpencer T Brody <spencer@mongodb.com>2016-06-13 18:26:51 -0400
commit7899a8e8a62548fb7a149649082bc34bbc001dc2 (patch)
tree537c418fcd35755518d2f359401ddc644c925454 /src
parent710159c9602a6738e6455cfb26bc2d70a0454ae2 (diff)
downloadmongo-7899a8e8a62548fb7a149649082bc34bbc001dc2.tar.gz
SERVER-24323 Rename CatalogManager to ShardingCatalogClient
Renames the relevant classes and libraries, moves files, updates comments referencing the CatalogManager, and renames common methods for getting the catalogManager (like grid.catalogManager()). No functional changes.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/SConscript2
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.cpp18
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp4
-rw-r--r--src/mongo/db/cloner.h2
-rw-r--r--src/mongo/db/commands/clone.cpp4
-rw-r--r--src/mongo/db/commands/conn_pool_stats.cpp2
-rw-r--r--src/mongo/db/instance.cpp2
-rw-r--r--src/mongo/db/mongod_options.cpp2
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp1
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.h1
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_test.cpp1
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp1
-rw-r--r--src/mongo/db/s/SConscript4
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp10
-rw-r--r--src/mongo/db/s/config/configsvr_add_shard_command.cpp4
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp6
-rw-r--r--src/mongo/db/s/metadata_loader.cpp16
-rw-r--r--src/mongo/db/s/metadata_loader.h8
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp26
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp54
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp3
-rw-r--r--src/mongo/db/s/move_timing_helper.cpp2
-rw-r--r--src/mongo/db/s/sharding_state.cpp6
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp8
-rw-r--r--src/mongo/db/s/sharding_state_test.cpp4
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp9
-rw-r--r--src/mongo/db/server_options.h2
-rw-r--r--src/mongo/dbtests/framework.cpp2
-rw-r--r--src/mongo/executor/network_test_env.h2
-rw-r--r--src/mongo/s/SConscript6
-rw-r--r--src/mongo/s/balancer/balancer.cpp10
-rw-r--r--src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp14
-rw-r--r--src/mongo/s/balancer/balancer_configuration.cpp6
-rw-r--r--src/mongo/s/balancer/balancer_configuration_test.cpp2
-rw-r--r--src/mongo/s/balancer/balancer_policy.cpp2
-rw-r--r--src/mongo/s/balancer/cluster_statistics_impl.cpp4
-rw-r--r--src/mongo/s/catalog/SConscript4
-rw-r--r--src/mongo/s/catalog/catalog_cache.cpp4
-rw-r--r--src/mongo/s/catalog/catalog_cache.h4
-rw-r--r--src/mongo/s/catalog/catalog_manager_mock.cpp238
-rw-r--r--src/mongo/s/catalog/dist_lock_manager.h2
-rw-r--r--src/mongo/s/catalog/replset/SConscript6
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp82
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_append_db_stats_test.cpp10
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp40
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp6
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp14
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp12
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp204
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp30
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp18
-rw-r--r--src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp6
-rw-r--r--src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp2
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp (renamed from src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp)249
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_client_impl.h (renamed from src/mongo/s/catalog/replset/catalog_manager_replica_set.h)12
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h (renamed from src/mongo/s/catalog/catalog_manager.h)21
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp238
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h (renamed from src/mongo/s/catalog/catalog_manager_mock.h)10
-rw-r--r--src/mongo/s/chunk.cpp14
-rw-r--r--src/mongo/s/chunk_manager.cpp8
-rw-r--r--src/mongo/s/client/shard_registry.cpp5
-rw-r--r--src/mongo/s/client/shard_registry.h2
-rw-r--r--src/mongo/s/client/sharding_network_connection_hook.h9
-rw-r--r--src/mongo/s/client/version_manager.cpp2
-rw-r--r--src/mongo/s/cluster_write.cpp4
-rw-r--r--src/mongo/s/commands/cluster_add_shard_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_enable_sharding_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_list_shards_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp13
-rw-r--r--src/mongo/s/commands/cluster_netstat_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_cmd.cpp20
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp38
-rw-r--r--src/mongo/s/commands/commands_public.cpp4
-rw-r--r--src/mongo/s/config.cpp32
-rw-r--r--src/mongo/s/grid.cpp10
-rw-r--r--src/mongo/s/grid.h14
-rw-r--r--src/mongo/s/server.cpp8
-rw-r--r--src/mongo/s/sharding_initialization.cpp22
-rw-r--r--src/mongo/s/sharding_initialization.h2
-rw-r--r--src/mongo/s/sharding_raii.cpp4
-rw-r--r--src/mongo/s/sharding_test_fixture.cpp25
-rw-r--r--src/mongo/s/sharding_test_fixture.h15
-rw-r--r--src/mongo/s/sharding_uptime_reporter.cpp4
87 files changed, 870 insertions, 877 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 9e53d26a224..8f4c088c26b 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -696,7 +696,7 @@ serveronlyLibdeps = [
"$BUILD_DIR/mongo/client/parallel",
"$BUILD_DIR/mongo/db/bson/dotted_path_support",
"$BUILD_DIR/mongo/executor/network_interface_factory",
- "$BUILD_DIR/mongo/s/catalog/replset/catalog_manager_replica_set",
+ "$BUILD_DIR/mongo/s/catalog/replset/sharding_catalog_client_impl",
"$BUILD_DIR/mongo/s/client/sharding_connection_hook",
"$BUILD_DIR/mongo/s/coreshard",
"$BUILD_DIR/mongo/s/serveronly",
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp
index 4bdb2648688..500cc85c70a 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp
@@ -41,7 +41,7 @@
#include "mongo/db/auth/user_name.h"
#include "mongo/db/jsobj.h"
#include "mongo/rpc/get_status_from_command_result.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/grid.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/mongoutils/str.h"
@@ -69,7 +69,7 @@ Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(OperationC
// that runs this command
BSONObj getParameterCmd = BSON("getParameter" << 1 << authSchemaVersionServerParameter << 1);
BSONObjBuilder builder;
- const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementReadCommand(
txn, "admin", getParameterCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
@@ -98,8 +98,8 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn
<< "showCredentials"
<< true);
BSONObjBuilder builder;
- const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
- txn, "admin", usersInfoCmd, &builder);
+ const bool ok =
+ grid.catalogClient(txn)->runUserManagementReadCommand(txn, "admin", usersInfoCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -133,8 +133,8 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* txn
<< "showPrivileges"
<< showPrivileges);
BSONObjBuilder builder;
- const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
- txn, "admin", rolesInfoCmd, &builder);
+ const bool ok =
+ grid.catalogClient(txn)->runUserManagementReadCommand(txn, "admin", rolesInfoCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -166,7 +166,7 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex
<< showBuiltinRoles);
BSONObjBuilder builder;
const bool ok =
- grid.catalogManager(txn)->runUserManagementReadCommand(txn, dbname, rolesInfoCmd, &builder);
+ grid.catalogClient(txn)->runUserManagementReadCommand(txn, dbname, rolesInfoCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -180,7 +180,7 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex
bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext* txn) {
BSONObj usersInfoCmd = BSON("usersInfo" << 1);
BSONObjBuilder userBuilder;
- bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ bool ok = grid.catalogClient(txn)->runUserManagementReadCommand(
txn, "admin", usersInfoCmd, &userBuilder);
if (!ok) {
// If we were unable to complete the query,
@@ -198,7 +198,7 @@ bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext*
BSONObj rolesInfoCmd = BSON("rolesInfo" << 1);
BSONObjBuilder roleBuilder;
- ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ ok = grid.catalogClient(txn)->runUserManagementReadCommand(
txn, "admin", rolesInfoCmd, &roleBuilder);
if (!ok) {
return true;
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index 5f58d956c5c..29283c7ed91 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -41,7 +41,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/server_parameters.h"
#include "mongo/rpc/get_status_from_command_result.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/grid.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/background.h"
@@ -93,7 +93,7 @@ public:
StatusWith<OID> getCurrentCacheGeneration(OperationContext* txn) {
try {
BSONObjBuilder result;
- const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementReadCommand(
txn, "admin", BSON("_getUserCacheGeneration" << 1), &result);
if (!ok) {
return getStatusFromCommandResult(result.obj());
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
index 7f1629fafde..f6576575610 100644
--- a/src/mongo/db/cloner.h
+++ b/src/mongo/db/cloner.h
@@ -35,7 +35,6 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/client/dbclientinterface.h"
-#include "mongo/s/catalog/catalog_manager.h"
namespace mongo {
@@ -119,6 +118,7 @@ private:
* holding a distributed lock (such as movePrimary). Indicates that we need to
* be periodically checking to see if the catalog manager has swapped and fail
* if it has so that we don't block the mongos that initiated the command.
+ * TODO: This can be removed now - it was only used during 3.0->3.2 upgrade.
* createCollections - When 'true', will fetch a list of collections from the remote and create
* them. When 'false', assumes collections have already been created ahead of time.
*/
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index ddb251bc4c9..5ad425a0780 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -107,8 +107,8 @@ public:
opts.checkForCatalogChange = cmdObj["_checkForCatalogChange"].trueValue();
if (opts.checkForCatalogChange) {
- auto catalogManager = grid.catalogManager(txn);
- if (!catalogManager) {
+ auto catalogClient = grid.catalogClient(txn);
+ if (!catalogClient) {
return appendCommandStatus(
result,
Status(ErrorCodes::NotYetInitialized,
diff --git a/src/mongo/db/commands/conn_pool_stats.cpp b/src/mongo/db/commands/conn_pool_stats.cpp
index 3444ef1c50a..15bbe103ce1 100644
--- a/src/mongo/db/commands/conn_pool_stats.cpp
+++ b/src/mongo/db/commands/conn_pool_stats.cpp
@@ -90,7 +90,7 @@ public:
auto grid = Grid::get(txn);
if (grid->shardRegistry()) {
grid->getExecutorPool()->appendConnectionStats(&stats);
- grid->catalogManager(txn)->appendConnectionStats(&stats);
+ grid->catalogClient(txn)->appendConnectionStats(&stats);
}
// Output to a BSON object.
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index bae59911976..3961beae740 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -83,8 +83,6 @@
#include "mongo/rpc/legacy_request_builder.h"
#include "mongo/rpc/metadata.h"
#include "mongo/rpc/request_interface.h"
-#include "mongo/s/catalog/catalog_manager.h"
-#include "mongo/s/grid.h"
#include "mongo/s/stale_exception.h" // for SendStaleConfigException
#include "mongo/scripting/engine.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 32f88c57d51..2d2db93a737 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -47,7 +47,7 @@
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/logger/console_appender.h"
#include "mongo/logger/message_event_utf8_encoder.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/ssl_options.h"
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 8d1ed0e9992..708ef8de639 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -48,7 +48,6 @@
#include "mongo/db/repl/replication_executor.h"
#include "mongo/db/repl/rslog.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/util/hex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/repl/topology_coordinator_impl.h b/src/mongo/db/repl/topology_coordinator_impl.h
index fefebd6d9db..76a820aeba0 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.h
+++ b/src/mongo/db/repl/topology_coordinator_impl.h
@@ -40,7 +40,6 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/server_options.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/util/time_support.h"
namespace mongo {
diff --git a/src/mongo/db/repl/topology_coordinator_impl_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
index 94fbfb96b58..ada5a794c9c 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/server_options.h"
#include "mongo/logger/logger.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
index 925e8a364ae..4a25ff9d2fc 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
@@ -42,7 +42,6 @@
#include "mongo/db/server_options.h"
#include "mongo/logger/logger.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index c4342750cef..86ff456bec2 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -158,7 +158,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/serveronly',
'$BUILD_DIR/mongo/executor/network_test_env',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/s/catalog/catalog_manager_mock',
+ '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_mock',
'$BUILD_DIR/mongo/s/coreshard',
'$BUILD_DIR/mongo/util/clock_source_mock',
'$BUILD_DIR/mongo/util/net/message_port_mock',
@@ -177,7 +177,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/serveronly',
'$BUILD_DIR/mongo/executor/network_test_env',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/s/catalog/catalog_manager_mock',
+ '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_mock',
'$BUILD_DIR/mongo/s/coreshard',
'$BUILD_DIR/mongo/util/clock_source_mock',
'$BUILD_DIR/mongo/util/net/message_port_mock',
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index b6861350ed8..1b2f604a2dd 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -77,7 +77,7 @@ protected:
auto future = launchAsync([this] {
MetadataLoader loader;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -345,7 +345,7 @@ protected:
auto future = launchAsync([this] {
MetadataLoader loader;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -601,7 +601,7 @@ protected:
auto future = launchAsync([this] {
MetadataLoader loader;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -677,7 +677,7 @@ protected:
auto future = launchAsync([this] {
MetadataLoader loader;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -891,7 +891,7 @@ protected:
auto future = launchAsync([this] {
MetadataLoader loader;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
index 27031c5a4e9..4378ae22c20 100644
--- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
@@ -38,7 +38,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/replica_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/add_shard_request_type.h"
@@ -119,7 +119,7 @@ public:
parsedRequest.hasMaxSize() ? parsedRequest.getMaxSize()
: kMaxSizeMBDefault);
- StatusWith<string> addShardResult = grid.catalogManager(txn)->addShard(
+ StatusWith<string> addShardResult = grid.catalogClient(txn)->addShard(
txn,
parsedRequest.hasName() ? &parsedRequest.getName() : nullptr,
parsedRequest.getConnString(),
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 612bf92616c..4f37d969bf9 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -144,7 +144,7 @@ Status runApplyOpsCmd(OperationContext* txn,
BSONArray preCond = buildOpPrecond(firstChunk.getNS(), firstChunk.getShard(), currShardVersion);
- return grid.catalogManager(txn)->applyChunkOpsDeprecated(
+ return grid.catalogClient(txn)->applyChunkOpsDeprecated(
txn, updatesB.arr(), preCond, firstChunk.getNS(), newMergedVersion);
}
@@ -157,7 +157,7 @@ bool mergeChunks(OperationContext* txn,
// Get the distributed lock
const string whyMessage = stream() << "merging chunks in " << nss.ns() << " from " << minKey
<< " to " << maxKey;
- auto scopedDistLock = grid.catalogManager(txn)->distLock(
+ auto scopedDistLock = grid.catalogClient(txn)->distLock(
txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
@@ -344,7 +344,7 @@ bool mergeChunks(OperationContext* txn,
BSONObj mergeLogEntry = buildMergeLogEntry(chunksToMerge, shardVersion, mergeVersion);
- grid.catalogManager(txn)->logChange(txn, "merge", nss.ns(), mergeLogEntry);
+ grid.catalogClient(txn)->logChange(txn, "merge", nss.ns(), mergeLogEntry);
return true;
}
diff --git a/src/mongo/db/s/metadata_loader.cpp b/src/mongo/db/s/metadata_loader.cpp
index e4d41dad429..f57fc35a2df 100644
--- a/src/mongo/db/s/metadata_loader.cpp
+++ b/src/mongo/db/s/metadata_loader.cpp
@@ -35,7 +35,7 @@
#include <vector>
#include "mongo/db/s/collection_metadata.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk_diff.h"
@@ -93,25 +93,25 @@ MetadataLoader::MetadataLoader() = default;
MetadataLoader::~MetadataLoader() = default;
Status MetadataLoader::makeCollectionMetadata(OperationContext* txn,
- CatalogManager* catalogManager,
+ ShardingCatalogClient* catalogClient,
const string& ns,
const string& shard,
const CollectionMetadata* oldMetadata,
CollectionMetadata* metadata) const {
- Status status = _initCollection(txn, catalogManager, ns, shard, metadata);
+ Status status = _initCollection(txn, catalogClient, ns, shard, metadata);
if (!status.isOK() || metadata->getKeyPattern().isEmpty()) {
return status;
}
- return initChunks(txn, catalogManager, ns, shard, oldMetadata, metadata);
+ return initChunks(txn, catalogClient, ns, shard, oldMetadata, metadata);
}
Status MetadataLoader::_initCollection(OperationContext* txn,
- CatalogManager* catalogManager,
+ ShardingCatalogClient* catalogClient,
const string& ns,
const string& shard,
CollectionMetadata* metadata) const {
- auto coll = catalogManager->getCollection(txn, ns);
+ auto coll = catalogClient->getCollection(txn, ns);
if (!coll.isOK()) {
return coll.getStatus();
}
@@ -132,7 +132,7 @@ Status MetadataLoader::_initCollection(OperationContext* txn,
}
Status MetadataLoader::initChunks(OperationContext* txn,
- CatalogManager* catalogManager,
+ ShardingCatalogClient* catalogClient,
const string& ns,
const string& shard,
const CollectionMetadata* oldMetadata,
@@ -179,7 +179,7 @@ Status MetadataLoader::initChunks(OperationContext* txn,
try {
std::vector<ChunkType> chunks;
const auto diffQuery = differ.configDiffQuery();
- Status status = catalogManager->getChunks(
+ Status status = catalogClient->getChunks(
txn, diffQuery.query, diffQuery.sort, boost::none, &chunks, nullptr);
if (!status.isOK()) {
if (status == ErrorCodes::HostUnreachable) {
diff --git a/src/mongo/db/s/metadata_loader.h b/src/mongo/db/s/metadata_loader.h
index ef7048f2fc6..bcaae5f4b9f 100644
--- a/src/mongo/db/s/metadata_loader.h
+++ b/src/mongo/db/s/metadata_loader.h
@@ -36,7 +36,7 @@
namespace mongo {
-class CatalogManager;
+class ShardingCatalogClient;
class CollectionMetadata;
class CollectionType;
class DBClientCursor;
@@ -91,7 +91,7 @@ public:
* @return RemoteChangeDetected if the data loaded was modified by another operation
*/
Status makeCollectionMetadata(OperationContext* txn,
- CatalogManager* catalogManager,
+ ShardingCatalogClient* catalogClient,
const std::string& ns,
const std::string& shard,
const CollectionMetadata* oldMetadata,
@@ -134,7 +134,7 @@ private:
*
*/
Status _initCollection(OperationContext* txn,
- CatalogManager* catalogManager,
+ ShardingCatalogClient* catalogClient,
const std::string& ns,
const std::string& shard,
CollectionMetadata* metadata) const;
@@ -153,7 +153,7 @@ private:
* TODO: @return FailedToParse
*/
Status initChunks(OperationContext* txn,
- CatalogManager* catalogManager,
+ ShardingCatalogClient* catalogClient,
const std::string& ns,
const std::string& shard,
const CollectionMetadata* oldMetadata,
diff --git a/src/mongo/db/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
index 67ea0317db0..a3d50228c86 100644
--- a/src/mongo/db/s/metadata_loader_test.cpp
+++ b/src/mongo/db/s/metadata_loader_test.cpp
@@ -115,7 +115,7 @@ protected:
auto future = launchAsync([this, ns, shardName, metadata] {
auto status = loader().makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
ns,
shardName,
NULL, /* no old metadata */
@@ -160,7 +160,7 @@ TEST_F(MetadataLoaderFixture, DroppedColl) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -176,7 +176,7 @@ TEST_F(MetadataLoaderFixture, EmptyColl) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -193,7 +193,7 @@ TEST_F(MetadataLoaderFixture, BadColl) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -221,7 +221,7 @@ TEST_F(MetadataLoaderFixture, BadChunk) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -248,7 +248,7 @@ TEST_F(MetadataLoaderFixture, NoChunksIsDropped) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -286,7 +286,7 @@ TEST_F(MetadataLoaderFixture, CheckNumChunk) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -311,7 +311,7 @@ TEST_F(MetadataLoaderFixture, SingleChunkCheckNumChunk) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -331,7 +331,7 @@ TEST_F(MetadataLoaderFixture, SingleChunkGetNext) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -351,7 +351,7 @@ TEST_F(MetadataLoaderFixture, SingleChunkGetShardKey) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -371,7 +371,7 @@ TEST_F(MetadataLoaderFixture, SingleChunkGetMaxCollVersion) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -389,7 +389,7 @@ TEST_F(MetadataLoaderFixture, SingleChunkGetMaxShardVersion) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
@@ -407,7 +407,7 @@ TEST_F(MetadataLoaderFixture, NoChunks) {
MetadataLoader loader;
CollectionMetadata metadata;
auto status = loader.makeCollectionMetadata(operationContext(),
- catalogManager(),
+ catalogClient(),
"test.foo",
"shard0000",
NULL, /* no old metadata */
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 51af102018f..10cbb678c7f 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -41,7 +41,7 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/sharding_state_recovery.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_key_pattern.h"
@@ -187,14 +187,14 @@ Status MigrationSourceManager::startClone(OperationContext* txn) {
invariant(_state == kCreated);
auto scopedGuard = MakeGuard([&] { cleanupOnError(txn); });
- grid.catalogManager(txn)->logChange(
- txn,
- "moveChunk.start",
- _args.getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()));
+ grid.catalogClient(txn)->logChange(txn,
+ "moveChunk.start",
+ _args.getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey()
+ << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
_cloneDriver = stdx::make_unique<MigrationChunkClonerSourceLegacy>(
_args, _committedMetadata->getKeyPattern());
@@ -388,7 +388,7 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeCommitMigration);
- Status applyOpsStatus = grid.catalogManager(txn)->applyChunkOpsDeprecated(
+ Status applyOpsStatus = grid.catalogClient(txn)->applyChunkOpsDeprecated(
txn, updates.arr(), preCond.arr(), _args.getNss().ns(), nextVersion);
if (MONGO_FAIL_POINT(failCommitMigrationCommand)) {
@@ -419,7 +419,7 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
// Need to get the latest optime in case the refresh request goes to a secondary --
// otherwise the read won't wait for the write that applyChunkOpsDeprecated may have done.
- Status status = grid.catalogManager(txn)->logChange(
+ Status status = grid.catalogClient(txn)->logChange(
txn,
"moveChunk.validating",
_args.getNss().ns(),
@@ -492,14 +492,14 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
scopedGuard.Dismiss();
_cleanup(txn);
- grid.catalogManager(txn)->logChange(
- txn,
- "moveChunk.commit",
- _args.getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()));
+ grid.catalogClient(txn)->logChange(txn,
+ "moveChunk.commit",
+ _args.getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey()
+ << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
return Status::OK();
}
@@ -509,14 +509,14 @@ void MigrationSourceManager::cleanupOnError(OperationContext* txn) {
return;
}
- grid.catalogManager(txn)->logChange(
- txn,
- "moveChunk.error",
- _args.getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()));
+ grid.catalogClient(txn)->logChange(txn,
+ "moveChunk.error",
+ _args.getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey()
+ << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
_cleanup(txn);
}
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 51637a6efee..12ed1cbd6f7 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -66,7 +66,8 @@ DistLockManager::ScopedDistLock acquireCollectionDistLock(OperationContext* txn,
<< ChunkRange(args.getMinKey(), args.getMaxKey()).toString()
<< " in "
<< args.getNss().ns());
- auto distLockStatus = grid.catalogManager(txn)->distLock(txn, args.getNss().ns(), whyMessage);
+ auto distLockStatus =
+ Grid::get(txn)->catalogClient(txn)->distLock(txn, args.getNss().ns(), whyMessage);
if (!distLockStatus.isOK()) {
const string msg = str::stream()
<< "Could not acquire collection lock for " << args.getNss().ns()
diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp
index 4ff91d6e658..3e4d1507dbc 100644
--- a/src/mongo/db/s/move_timing_helper.cpp
+++ b/src/mongo/db/s/move_timing_helper.cpp
@@ -82,7 +82,7 @@ MoveTimingHelper::~MoveTimingHelper() {
_b.append("errmsg", *_cmdErrmsg);
}
- grid.catalogManager(_txn)->logChange(
+ grid.catalogClient(_txn)->logChange(
_txn, str::stream() << "moveChunk." << _where, _ns, _b.obj());
} catch (const std::exception& e) {
warning() << "couldn't record timing for moveChunk '" << _where << "': " << e.what();
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 124629899c4..715fd1ffa02 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -52,7 +52,7 @@
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/metadata/config_server_metadata.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
@@ -211,7 +211,7 @@ void ShardingState::shutDown(OperationContext* txn) {
if (_getInitializationState() == InitializationState::kInitialized) {
grid.getExecutorPool()->shutdownAndJoin();
- grid.catalogManager(txn)->shutDown(txn);
+ grid.catalogClient(txn)->shutDown(txn);
}
}
@@ -698,7 +698,7 @@ Status ShardingState::_refreshMetadata(OperationContext* txn,
{
Status status = mdLoader.makeCollectionMetadata(txn,
- grid.catalogManager(txn),
+ grid.catalogClient(txn),
ns,
getShardName(),
fullReload ? nullptr : beforeMetadata.get(),
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index d076b995f53..5980556a2ba 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -294,10 +294,10 @@ Status ShardingStateRecovery::recover(OperationContext* txn) {
// Need to fetch the latest uptime from the config server, so do a logging write
Status status =
- grid.catalogManager(txn)->logChange(txn,
- "Sharding minOpTime recovery",
- NamespaceString::kConfigCollectionNamespace.ns(),
- recoveryDocBSON);
+ grid.catalogClient(txn)->logChange(txn,
+ "Sharding minOpTime recovery",
+ NamespaceString::kConfigCollectionNamespace.ns(),
+ recoveryDocBSON);
if (!status.isOK())
return status;
diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp
index ba8a456f51e..83b885163cb 100644
--- a/src/mongo/db/s/sharding_state_test.cpp
+++ b/src/mongo/db/s/sharding_state_test.cpp
@@ -46,7 +46,7 @@
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager_mock.h"
+#include "mongo/s/catalog/sharding_catalog_client_mock.h"
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/client/shard_remote.h"
@@ -110,7 +110,7 @@ void initGrid(OperationContext* txn, const ConnectionString& configConnString) {
auto shardRegistry(stdx::make_unique<ShardRegistry>(std::move(shardFactory), configConnString));
grid.init(
- stdx::make_unique<CatalogManagerMock>(),
+ stdx::make_unique<ShardingCatalogClientMock>(),
stdx::make_unique<CatalogCache>(),
std::move(shardRegistry),
stdx::make_unique<ClusterCursorManager>(txn->getServiceContext()->getPreciseClockSource()),
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index 58018441072..0ff327db442 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -222,7 +222,7 @@ public:
const string whyMessage(str::stream() << "splitting chunk [" << min << ", " << max
<< ") in "
<< nss.toString());
- auto scopedDistLock = grid.catalogManager(txn)->distLock(
+ auto scopedDistLock = grid.catalogClient(txn)->distLock(
txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
errmsg = str::stream() << "could not acquire collection lock for " << nss.toString()
@@ -401,7 +401,7 @@ public:
// 4. apply the batch of updates to remote and local metadata
//
- Status applyOpsStatus = grid.catalogManager(txn)->applyChunkOpsDeprecated(
+ Status applyOpsStatus = grid.catalogClient(txn)->applyChunkOpsDeprecated(
txn, updates.arr(), preCond.arr(), nss.ns(), nextChunkVersion);
if (!applyOpsStatus.isOK()) {
return appendCommandStatus(result, applyOpsStatus);
@@ -441,7 +441,7 @@ public:
appendShortVersion(logDetail.subobjStart("left"), *newChunks[0]);
appendShortVersion(logDetail.subobjStart("right"), *newChunks[1]);
- grid.catalogManager(txn)->logChange(txn, "split", nss.ns(), logDetail.obj());
+ grid.catalogClient(txn)->logChange(txn, "split", nss.ns(), logDetail.obj());
} else {
BSONObj beforeDetailObj = logDetail.obj();
BSONObj firstDetailObj = beforeDetailObj.getOwned();
@@ -454,8 +454,7 @@ public:
chunkDetail.append("of", newChunksSize);
appendShortVersion(chunkDetail.subobjStart("chunk"), *newChunks[i]);
- grid.catalogManager(txn)->logChange(
- txn, "multi-split", nss.ns(), chunkDetail.obj());
+ grid.catalogClient(txn)->logChange(txn, "multi-split", nss.ns(), chunkDetail.obj());
}
}
diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h
index ec692649967..886b7bb0dd1 100644
--- a/src/mongo/db/server_options.h
+++ b/src/mongo/db/server_options.h
@@ -29,7 +29,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/platform/process_id.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/util/net/listen.h" // For DEFAULT_MAX_CONN
namespace mongo {
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index b6022383f6b..db8815b1e90 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -43,8 +43,6 @@
#include "mongo/db/service_context_d.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/dbtests/framework_options.h"
-#include "mongo/s/catalog/catalog_manager.h"
-#include "mongo/s/grid.h"
#include "mongo/scripting/engine.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/executor/network_test_env.h b/src/mongo/executor/network_test_env.h
index e83311b7bc4..7e09a18f0fb 100644
--- a/src/mongo/executor/network_test_env.h
+++ b/src/mongo/executor/network_test_env.h
@@ -43,7 +43,7 @@
namespace mongo {
class BSONObj;
-class CatalogManagerReplicaSet;
+class ShardingCatalogClientImpl;
class DistLockManagerMock;
class ShardRegistry;
template <typename T>
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 350e0b14ce5..5c28a1defb4 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -22,7 +22,7 @@ env.Library(
'$BUILD_DIR/mongo/executor/network_interface_factory',
'$BUILD_DIR/mongo/executor/network_interface_thread_pool',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor',
- '$BUILD_DIR/mongo/s/catalog/replset/catalog_manager_replica_set',
+ '$BUILD_DIR/mongo/s/catalog/replset/sharding_catalog_client_impl',
'$BUILD_DIR/mongo/s/catalog/replset/dist_lock_catalog_impl',
'$BUILD_DIR/mongo/s/catalog/replset/replset_dist_lock_manager',
'client/sharding_connection_hook',
@@ -83,7 +83,7 @@ env.Library(
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
'$BUILD_DIR/mongo/rpc/metadata',
'$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
- '$BUILD_DIR/mongo/s/catalog/replset/catalog_manager_replica_set',
+ '$BUILD_DIR/mongo/s/catalog/replset/sharding_catalog_client_impl',
'$BUILD_DIR/mongo/s/coreshard',
'$BUILD_DIR/mongo/util/clock_source_mock',
'$BUILD_DIR/mongo/util/net/message_port_mock',
@@ -218,7 +218,7 @@ env.Library(
LIBDEPS=[
'$BUILD_DIR/mongo/executor/task_executor_pool',
'$BUILD_DIR/mongo/s/query/cluster_cursor_manager',
- 'catalog/replset/catalog_manager_replica_set',
+ 'catalog/replset/sharding_catalog_client_impl',
'client/sharding_client',
'common',
],
diff --git a/src/mongo/s/balancer/balancer.cpp b/src/mongo/s/balancer/balancer.cpp
index 4c5db602acd..8f30f468c85 100644
--- a/src/mongo/s/balancer/balancer.cpp
+++ b/src/mongo/s/balancer/balancer.cpp
@@ -46,7 +46,7 @@
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/balancer/cluster_statistics_impl.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_registry.h"
@@ -220,7 +220,7 @@ Status executeSingleMigration(OperationContext* txn,
// Send the first moveChunk command with the balancer holding the distlock.
{
StatusWith<DistLockManager::ScopedDistLock> distLockStatus =
- grid.catalogManager(txn)->distLock(txn, nss.ns(), whyMessage);
+ Grid::get(txn)->catalogClient(txn)->distLock(txn, nss.ns(), whyMessage);
if (!distLockStatus.isOK()) {
const std::string msg = str::stream()
<< "Could not acquire collection lock for " << nss.ns() << " to migrate chunk ["
@@ -413,7 +413,7 @@ void Balancer::_mainThread() {
uassert(13258, "oids broken after resetting!", _checkOIDs(txn.get()));
{
- auto scopedDistLock = shardingContext->catalogManager(txn.get())->distLock(
+ auto scopedDistLock = shardingContext->catalogClient(txn.get())->distLock(
txn.get(),
"balancer",
"doing balance round",
@@ -459,7 +459,7 @@ void Balancer::_mainThread() {
roundDetails.setSucceeded(static_cast<int>(candidateChunks.size()),
_balancedLastTime);
- shardingContext->catalogManager(txn.get())->logAction(
+ shardingContext->catalogClient(txn.get())->logAction(
txn.get(), "balancer.round", "", roundDetails.toBSON());
}
@@ -479,7 +479,7 @@ void Balancer::_mainThread() {
// This round failed, tell the world!
roundDetails.setFailed(e.what());
- shardingContext->catalogManager(txn.get())->logAction(
+ shardingContext->catalogClient(txn.get())->logAction(
txn.get(), "balancer.round", "", roundDetails.toBSON());
// Sleep a fair amount before retrying because of the error
diff --git a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 48e596a871d..e2a00e9034f 100644
--- a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -37,7 +37,7 @@
#include "mongo/base/status_with.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_tags.h"
@@ -102,7 +102,7 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToSpli
vector<CollectionType> collections;
Status collsStatus =
- Grid::get(txn)->catalogManager(txn)->getCollections(txn, nullptr, &collections, nullptr);
+ Grid::get(txn)->catalogClient(txn)->getCollections(txn, nullptr, &collections, nullptr);
if (!collsStatus.isOK()) {
return collsStatus;
}
@@ -146,7 +146,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
vector<CollectionType> collections;
Status collsStatus =
- Grid::get(txn)->catalogManager(txn)->getCollections(txn, nullptr, &collections, nullptr);
+ Grid::get(txn)->catalogClient(txn)->getCollections(txn, nullptr, &collections, nullptr);
if (!collsStatus.isOK()) {
return collsStatus;
}
@@ -195,7 +195,7 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* tx
ChunkManager* const cm = scopedCM.cm();
auto tagForChunkStatus =
- Grid::get(txn)->catalogManager(txn)->getTagForChunk(txn, nss.ns(), chunk);
+ Grid::get(txn)->catalogClient(txn)->getTagForChunk(txn, nss.ns(), chunk);
if (!tagForChunkStatus.isOK()) {
return tagForChunkStatus.getStatus();
}
@@ -221,7 +221,7 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* txn,
const ChunkType& chunk,
const ShardId& newShardId) {
auto tagForChunkStatus =
- Grid::get(txn)->catalogManager(txn)->getTagForChunk(txn, chunk.getNS(), chunk);
+ Grid::get(txn)->catalogClient(txn)->getTagForChunk(txn, chunk.getNS(), chunk);
if (!tagForChunkStatus.isOK()) {
return tagForChunkStatus.getStatus();
}
@@ -261,7 +261,7 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidate
vector<TagsType> collectionTags;
Status tagsStatus =
- Grid::get(txn)->catalogManager(txn)->getTagsForCollection(txn, nss.ns(), &collectionTags);
+ Grid::get(txn)->catalogClient(txn)->getTagsForCollection(txn, nss.ns(), &collectionTags);
if (!tagsStatus.isOK()) {
return {tagsStatus.code(),
str::stream() << "Unable to load tags for collection " << nss.ns() << " due to "
@@ -311,7 +311,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
DistributionStatus distStatus(shardStats, shardToChunksMap);
{
vector<TagsType> collectionTags;
- Status status = Grid::get(txn)->catalogManager(txn)->getTagsForCollection(
+ Status status = Grid::get(txn)->catalogClient(txn)->getTagsForCollection(
txn, nss.ns(), &collectionTags);
if (!status.isOK()) {
return status;
diff --git a/src/mongo/s/balancer/balancer_configuration.cpp b/src/mongo/s/balancer/balancer_configuration.cpp
index 5381b73ae24..165a18bd212 100644
--- a/src/mongo/s/balancer/balancer_configuration.cpp
+++ b/src/mongo/s/balancer/balancer_configuration.cpp
@@ -36,7 +36,7 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/util/bson_extract.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/grid.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -105,7 +105,7 @@ Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* txn) {
BalancerSettingsType settings = BalancerSettingsType::createDefault();
auto settingsObjStatus =
- Grid::get(txn)->catalogManager(txn)->getGlobalSettings(txn, BalancerSettingsType::kKey);
+ Grid::get(txn)->catalogClient(txn)->getGlobalSettings(txn, BalancerSettingsType::kKey);
if (settingsObjStatus.isOK()) {
auto settingsStatus = BalancerSettingsType::fromBSON(settingsObjStatus.getValue());
if (!settingsStatus.isOK()) {
@@ -127,7 +127,7 @@ Status BalancerConfiguration::_refreshChunkSizeSettings(OperationContext* txn) {
ChunkSizeSettingsType settings = ChunkSizeSettingsType::createDefault();
auto settingsObjStatus =
- grid.catalogManager(txn)->getGlobalSettings(txn, ChunkSizeSettingsType::kKey);
+ grid.catalogClient(txn)->getGlobalSettings(txn, ChunkSizeSettingsType::kKey);
if (settingsObjStatus.isOK()) {
auto settingsStatus = ChunkSizeSettingsType::fromBSON(settingsObjStatus.getValue());
if (!settingsStatus.isOK()) {
diff --git a/src/mongo/s/balancer/balancer_configuration_test.cpp b/src/mongo/s/balancer/balancer_configuration_test.cpp
index 217df4c5196..32b9f0413a8 100644
--- a/src/mongo/s/balancer/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer/balancer_configuration_test.cpp
@@ -39,7 +39,7 @@
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/balancer/balancer_configuration.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/sharding_test_fixture.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/s/balancer/balancer_policy.cpp b/src/mongo/s/balancer/balancer_policy.cpp
index cc7b47693e5..f588f089702 100644
--- a/src/mongo/s/balancer/balancer_policy.cpp
+++ b/src/mongo/s/balancer/balancer_policy.cpp
@@ -36,7 +36,7 @@
#include "mongo/client/read_preference.h"
#include "mongo/client/remote_command_targeter.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/chunk_manager.h"
diff --git a/src/mongo/s/balancer/cluster_statistics_impl.cpp b/src/mongo/s/balancer/cluster_statistics_impl.cpp
index 9c6bea9350e..96014d9dc90 100644
--- a/src/mongo/s/balancer/cluster_statistics_impl.cpp
+++ b/src/mongo/s/balancer/cluster_statistics_impl.cpp
@@ -35,7 +35,7 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/read_preference.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
@@ -121,7 +121,7 @@ void ClusterStatisticsImpl::_refreshShardStats(OperationContext* txn) {
// db.serverStatus() (mem.mapped) to all shards.
//
// TODO: skip unresponsive shards and mark information as stale.
- auto shardsStatus = Grid::get(txn)->catalogManager(txn)->getAllShards(txn);
+ auto shardsStatus = Grid::get(txn)->catalogClient(txn)->getAllShards(txn);
uassertStatusOK(shardsStatus.getStatus());
const vector<ShardType> shards(std::move(shardsStatus.getValue().value));
diff --git a/src/mongo/s/catalog/SConscript b/src/mongo/s/catalog/SConscript
index 0e0edee7419..d9470241010 100644
--- a/src/mongo/s/catalog/SConscript
+++ b/src/mongo/s/catalog/SConscript
@@ -9,9 +9,9 @@ env.SConscript(
)
env.Library(
- target='catalog_manager_mock',
+ target='sharding_catalog_client_mock',
source=[
- 'catalog_manager_mock.cpp',
+ 'sharding_catalog_client_mock.cpp',
],
LIBDEPS=[
'dist_lock_manager_mock',
diff --git a/src/mongo/s/catalog/catalog_cache.cpp b/src/mongo/s/catalog/catalog_cache.cpp
index 50ab6539b80..e5d739abdb6 100644
--- a/src/mongo/s/catalog/catalog_cache.cpp
+++ b/src/mongo/s/catalog/catalog_cache.cpp
@@ -32,7 +32,7 @@
#include "mongo/base/status_with.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
@@ -55,7 +55,7 @@ StatusWith<shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn
}
// Need to load from the store
- auto status = grid.catalogManager(txn)->getDatabase(txn, dbName);
+ auto status = grid.catalogClient(txn)->getDatabase(txn, dbName);
if (!status.isOK()) {
return status.getStatus();
}
diff --git a/src/mongo/s/catalog/catalog_cache.h b/src/mongo/s/catalog/catalog_cache.h
index 022882d4592..4f5a445c67c 100644
--- a/src/mongo/s/catalog/catalog_cache.h
+++ b/src/mongo/s/catalog/catalog_cache.h
@@ -37,7 +37,7 @@
namespace mongo {
-class CatalogManager;
+class ShardingCatalogClient;
class DBConfig;
class OperationContext;
template <typename T>
@@ -47,7 +47,7 @@ class StatusWith;
/**
* This is the root of the "read-only" hierarchy of cached catalog metadata. It is read only
* in the sense that it only reads from the persistent store, but never writes to it. Instead
- * writes happen thorugh the CatalogManager and the cache hierarchy needs to be invalidated.
+ * writes happen through the ShardingCatalogManager and the cache hierarchy needs to be invalidated.
*/
class CatalogCache {
MONGO_DISALLOW_COPYING(CatalogCache);
diff --git a/src/mongo/s/catalog/catalog_manager_mock.cpp b/src/mongo/s/catalog/catalog_manager_mock.cpp
deleted file mode 100644
index 5e09bc1a7b7..00000000000
--- a/src/mongo/s/catalog/catalog_manager_mock.cpp
+++ /dev/null
@@ -1,238 +0,0 @@
-/**
- * Copyright (C) 2015 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/s/catalog/catalog_manager_mock.h"
-
-#include "mongo/base/status.h"
-#include "mongo/db/repl/optime.h"
-#include "mongo/s/catalog/type_collection.h"
-#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog/type_shard.h"
-#include "mongo/stdx/memory.h"
-
-namespace mongo {
-
-using std::string;
-using std::vector;
-
-CatalogManagerMock::CatalogManagerMock() {
- _mockDistLockMgr = stdx::make_unique<DistLockManagerMock>();
-}
-
-CatalogManagerMock::~CatalogManagerMock() = default;
-
-Status CatalogManagerMock::startup() {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-void CatalogManagerMock::shutDown(OperationContext* txn) {}
-
-Status CatalogManagerMock::enableSharding(OperationContext* txn, const std::string& dbName) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::shardCollection(OperationContext* txn,
- const string& ns,
- const ShardKeyPattern& fieldsAndOrder,
- bool unique,
- const vector<BSONObj>& initPoints,
- const std::set<ShardId>& initShardIds) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-StatusWith<string> CatalogManagerMock::addShard(OperationContext* txn,
- const std::string* shardProposedName,
- const ConnectionString& shardConnectionString,
- const long long maxSize) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-StatusWith<ShardDrainingStatus> CatalogManagerMock::removeShard(OperationContext* txn,
- const string& name) {
- return ShardDrainingStatus::COMPLETED;
-}
-
-Status CatalogManagerMock::updateDatabase(OperationContext* txn,
- const string& dbName,
- const DatabaseType& db) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-StatusWith<repl::OpTimeWith<DatabaseType>> CatalogManagerMock::getDatabase(OperationContext* txn,
- const string& dbName) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::updateCollection(OperationContext* txn,
- const string& collNs,
- const CollectionType& coll) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-StatusWith<repl::OpTimeWith<CollectionType>> CatalogManagerMock::getCollection(
- OperationContext* txn, const string& collNs) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::getCollections(OperationContext* txn,
- const string* dbName,
- vector<CollectionType>* collections,
- repl::OpTime* optime) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::dropCollection(OperationContext* txn, const NamespaceString& ns) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::getDatabasesForShard(OperationContext* txn,
- const string& shardName,
- vector<string>* dbs) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::getChunks(OperationContext* txn,
- const BSONObj& filter,
- const BSONObj& sort,
- boost::optional<int> limit,
- std::vector<ChunkType>* chunks,
- repl::OpTime* opTime) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::getTagsForCollection(OperationContext* txn,
- const string& collectionNs,
- vector<TagsType>* tags) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-StatusWith<string> CatalogManagerMock::getTagForChunk(OperationContext* txn,
- const string& collectionNs,
- const ChunkType& chunk) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-StatusWith<repl::OpTimeWith<std::vector<ShardType>>> CatalogManagerMock::getAllShards(
- OperationContext* txn) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-StatusWith<DistLockManager::ScopedDistLock> CatalogManagerMock::distLock(OperationContext* txn,
- StringData name,
- StringData whyMessage,
- Milliseconds waitFor) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-bool CatalogManagerMock::runUserManagementWriteCommand(OperationContext* txn,
- const string& commandName,
- const string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
- return true;
-}
-
-bool CatalogManagerMock::runUserManagementReadCommand(OperationContext* txn,
- const string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
- return true;
-}
-
-Status CatalogManagerMock::applyChunkOpsDeprecated(OperationContext* txn,
- const BSONArray& updateOps,
- const BSONArray& preCondition,
- const std::string& nss,
- const ChunkVersion& lastChunkVersion) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::logAction(OperationContext* txn,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::logChange(OperationContext* txn,
- const string& what,
- const string& ns,
- const BSONObj& detail) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-StatusWith<BSONObj> CatalogManagerMock::getGlobalSettings(OperationContext* txn, StringData key) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-void CatalogManagerMock::writeConfigServerDirect(OperationContext* txn,
- const BatchedCommandRequest& request,
- BatchedCommandResponse* response) {}
-
-Status CatalogManagerMock::insertConfigDocument(OperationContext* txn,
- const std::string& ns,
- const BSONObj& doc) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-StatusWith<bool> CatalogManagerMock::updateConfigDocument(OperationContext* txn,
- const std::string& ns,
- const BSONObj& query,
- const BSONObj& update,
- bool upsert) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::removeConfigDocuments(OperationContext* txn,
- const std::string& ns,
- const BSONObj& query) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::createDatabase(OperationContext* txn, const std::string& dbName) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-DistLockManager* CatalogManagerMock::getDistLockManager() {
- return _mockDistLockMgr.get();
-}
-
-Status CatalogManagerMock::initConfigVersion(OperationContext* txn) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status CatalogManagerMock::appendInfoForConfigServerDatabases(OperationContext* txn,
- BSONArrayBuilder* builder) {
- return Status::OK();
-}
-
-void CatalogManagerMock::appendConnectionStats(executor::ConnectionPoolStats* stats) {}
-
-} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_manager.h b/src/mongo/s/catalog/dist_lock_manager.h
index 23f815c63cc..9622fae428e 100644
--- a/src/mongo/s/catalog/dist_lock_manager.h
+++ b/src/mongo/s/catalog/dist_lock_manager.h
@@ -156,8 +156,6 @@ public:
/**
* Makes a best-effort attempt to unlock all locks owned by the given processID.
- * Only implemented for the ReplSetDistLockManager and only used after catalog manager swap
- * during upgrade to CSRS.
*/
virtual void unlockAll(OperationContext* txn, const std::string& processID) = 0;
diff --git a/src/mongo/s/catalog/replset/SConscript b/src/mongo/s/catalog/replset/SConscript
index 3d482b50403..a39cb3a5c27 100644
--- a/src/mongo/s/catalog/replset/SConscript
+++ b/src/mongo/s/catalog/replset/SConscript
@@ -50,8 +50,8 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/service_context_noop_init',
'$BUILD_DIR/mongo/executor/network_test_env',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/s/catalog/catalog_manager_mock',
'$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_mock',
+ '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_mock',
'$BUILD_DIR/mongo/s/coreshard',
'$BUILD_DIR/mongo/s/mongoscore',
'$BUILD_DIR/mongo/s/sharding_test_fixture',
@@ -60,9 +60,9 @@ env.CppUnitTest(
)
env.Library(
- target='catalog_manager_replica_set',
+ target='sharding_catalog_client_impl',
source=[
- 'catalog_manager_replica_set.cpp',
+ 'sharding_catalog_client_impl.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/repl/read_concern_args',
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
index 5c3e1ccfb06..d400ae0213d 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
@@ -40,8 +40,8 @@
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_changelog.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
@@ -300,10 +300,10 @@ TEST_F(AddShardTest, Standalone) {
auto future = launchAsync([this, expectedShardName] {
auto shardName = assertGet(
- catalogManager()->addShard(operationContext(),
- &expectedShardName,
- assertGet(ConnectionString::parse("StandaloneHost:12345")),
- 100));
+ catalogClient()->addShard(operationContext(),
+ &expectedShardName,
+ assertGet(ConnectionString::parse("StandaloneHost:12345")),
+ 100));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -381,10 +381,10 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
auto future = launchAsync([this, expectedShardName, shardTarget] {
auto shardName = assertGet(
- catalogManager()->addShard(operationContext(),
- nullptr,
- assertGet(ConnectionString::parse(shardTarget.toString())),
- 100));
+ catalogClient()->addShard(operationContext(),
+ nullptr,
+ assertGet(ConnectionString::parse(shardTarget.toString())),
+ 100));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -478,7 +478,7 @@ TEST_F(AddShardTest, AddSCCCConnectionStringAsShard) {
auto future = launchAsync([this, invalidConn] {
const std::string shardName("StandaloneShard");
- auto status = catalogManager()->addShard(operationContext(), &shardName, invalidConn, 100);
+ auto status = catalogClient()->addShard(operationContext(), &shardName, invalidConn, 100);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "Invalid connection string");
});
@@ -493,10 +493,10 @@ TEST_F(AddShardTest, EmptyShardName) {
auto future = launchAsync([this, expectedShardName] {
auto status =
- catalogManager()->addShard(operationContext(),
- &expectedShardName,
- assertGet(ConnectionString::parse("StandaloneHost:12345")),
- 100);
+ catalogClient()->addShard(operationContext(),
+ &expectedShardName,
+ assertGet(ConnectionString::parse("StandaloneHost:12345")),
+ 100);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_EQUALS("shard name cannot be empty", status.getStatus().reason());
});
@@ -517,10 +517,10 @@ TEST_F(AddShardTest, UnreachableHost) {
auto future = launchAsync([this, expectedShardName] {
auto status =
- catalogManager()->addShard(operationContext(),
- &expectedShardName,
- assertGet(ConnectionString::parse("StandaloneHost:12345")),
- 100);
+ catalogClient()->addShard(operationContext(),
+ &expectedShardName,
+ assertGet(ConnectionString::parse("StandaloneHost:12345")),
+ 100);
ASSERT_EQUALS(ErrorCodes::HostUnreachable, status);
ASSERT_EQUALS("host unreachable", status.getStatus().reason());
});
@@ -544,10 +544,10 @@ TEST_F(AddShardTest, AddMongosAsShard) {
auto future = launchAsync([this, expectedShardName] {
auto status =
- catalogManager()->addShard(operationContext(),
- &expectedShardName,
- assertGet(ConnectionString::parse("StandaloneHost:12345")),
- 100);
+ catalogClient()->addShard(operationContext(),
+ &expectedShardName,
+ assertGet(ConnectionString::parse("StandaloneHost:12345")),
+ 100);
ASSERT_EQUALS(ErrorCodes::RPCProtocolNegotiationFailed, status);
});
@@ -571,10 +571,10 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
auto future = launchAsync([this, expectedShardName, shardTarget] {
auto status =
- catalogManager()->addShard(operationContext(),
- &expectedShardName,
- assertGet(ConnectionString::parse(shardTarget.toString())),
- 100);
+ catalogClient()->addShard(operationContext(),
+ &expectedShardName,
+ assertGet(ConnectionString::parse(shardTarget.toString())),
+ 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(),
"is already a member of the existing shard");
@@ -603,7 +603,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(),
"is already a member of the existing shard");
@@ -630,10 +630,10 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
auto future = launchAsync([this, expectedShardName, shardTarget] {
auto status =
- catalogManager()->addShard(operationContext(),
- &expectedShardName,
- assertGet(ConnectionString::parse(shardTarget.toString())),
- 100);
+ catalogClient()->addShard(operationContext(),
+ &expectedShardName,
+ assertGet(ConnectionString::parse(shardTarget.toString())),
+ 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "use replica set url format");
});
@@ -660,7 +660,7 @@ TEST_F(AddShardTest, AddStandaloneHostShardAsReplicaSet) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "host did not return a set name");
});
@@ -686,7 +686,7 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "does not match the actual set name");
});
@@ -713,7 +713,7 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(),
"as a shard since it is a config server");
@@ -743,7 +743,7 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(),
"host2:12345 does not belong to replica set");
@@ -775,7 +775,7 @@ TEST_F(AddShardTest, ShardNameIsConfig) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_EQUALS(status.getStatus().reason(),
"use of shard replica set with name 'config' is not allowed");
@@ -807,7 +807,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(
status.getStatus().reason(),
@@ -851,7 +851,7 @@ TEST_F(AddShardTest, ReAddExistingShard) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_OK(status);
});
@@ -918,7 +918,7 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_OK(status);
auto shardName = status.getValue();
ASSERT_EQUALS(expectedShardName, shardName);
@@ -979,7 +979,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
auto future = launchAsync([this, expectedShardName, connString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, connString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_OK(status);
auto shardName = status.getValue();
ASSERT_EQUALS(expectedShardName, shardName);
@@ -1064,7 +1064,7 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
auto future = launchAsync([this, expectedShardName, seedString] {
auto status =
- catalogManager()->addShard(operationContext(), &expectedShardName, seedString, 100);
+ catalogClient()->addShard(operationContext(), &expectedShardName, seedString, 100);
ASSERT_OK(status);
auto shardName = status.getValue();
ASSERT_EQUALS(expectedShardName, shardName);
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_append_db_stats_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_append_db_stats_test.cpp
index 6542a14a5e6..23f77464afd 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_append_db_stats_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_append_db_stats_test.cpp
@@ -35,8 +35,8 @@
#include "mongo/executor/network_interface_mock.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/stdx/future.h"
#include "mongo/util/log.h"
#include "mongo/util/time_support.h"
@@ -63,7 +63,7 @@ TEST_F(CatalogManagerReplSetAppendDbStatsTest, BasicAppendDBStats) {
BSONArrayBuilder builder;
auto future = launchAsync([this, &builder] {
ASSERT_OK(
- catalogManager()->appendInfoForConfigServerDatabases(operationContext(), &builder));
+ catalogClient()->appendInfoForConfigServerDatabases(operationContext(), &builder));
});
onCommand([](const RemoteCommandRequest& request) {
@@ -122,7 +122,7 @@ TEST_F(CatalogManagerReplSetAppendDbStatsTest, ErrorRunningListDatabases) {
BSONArrayBuilder builder;
auto future = launchAsync([this, &builder] {
auto status =
- catalogManager()->appendInfoForConfigServerDatabases(operationContext(), &builder);
+ catalogClient()->appendInfoForConfigServerDatabases(operationContext(), &builder);
ASSERT_NOT_OK(status);
ASSERT_EQ(ErrorCodes::AuthenticationFailed, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -141,7 +141,7 @@ TEST_F(CatalogManagerReplSetAppendDbStatsTest, MalformedListDatabasesResponse) {
BSONArrayBuilder builder;
auto future = launchAsync([this, &builder] {
auto status =
- catalogManager()->appendInfoForConfigServerDatabases(operationContext(), &builder);
+ catalogClient()->appendInfoForConfigServerDatabases(operationContext(), &builder);
ASSERT_NOT_OK(status);
ASSERT_EQ(ErrorCodes::NoSuchKey, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -158,7 +158,7 @@ TEST_F(CatalogManagerReplSetAppendDbStatsTest, MalformedListDatabasesEntryInResp
BSONArrayBuilder builder;
auto future = launchAsync([this, &builder] {
auto status =
- catalogManager()->appendInfoForConfigServerDatabases(operationContext(), &builder);
+ catalogClient()->appendInfoForConfigServerDatabases(operationContext(), &builder);
ASSERT_NOT_OK(status);
ASSERT_EQ(ErrorCodes::NoSuchKey, status.code());
ASSERT_FALSE(status.reason().empty());
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp
index 8f5b49aab7e..dfc4643b560 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp
@@ -36,8 +36,8 @@
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/chunk_version.h"
@@ -174,7 +174,7 @@ private:
TEST_F(DropColl2ShardTest, Basic) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_OK(status);
});
@@ -203,7 +203,7 @@ TEST_F(DropColl2ShardTest, Basic) {
TEST_F(DropColl2ShardTest, NSNotFound) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_OK(status);
});
@@ -255,7 +255,7 @@ TEST_F(DropColl2ShardTest, ConfigTargeterError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::HostUnreachable, "bad test network"});
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::HostUnreachable, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -268,7 +268,7 @@ TEST_F(DropColl2ShardTest, DistLockBusy) {
{ErrorCodes::LockBusy, "test lock taken"});
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::LockBusy, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -288,7 +288,7 @@ TEST_F(DropColl2ShardTest, FirstShardTargeterError) {
shard1Targeter->setFindHostReturnValue({ErrorCodes::HostUnreachable, "bad test network"});
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::HostUnreachable, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -304,7 +304,7 @@ TEST_F(DropColl2ShardTest, FirstShardTargeterError) {
TEST_F(DropColl2ShardTest, FirstShardDropError) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::CallbackCanceled, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -325,7 +325,7 @@ TEST_F(DropColl2ShardTest, FirstShardDropError) {
TEST_F(DropColl2ShardTest, FirstShardDropCmdError) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::OperationFailed, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -352,7 +352,7 @@ TEST_F(DropColl2ShardTest, SecondShardTargeterError) {
shard2Targeter->setFindHostReturnValue({ErrorCodes::HostUnreachable, "bad test network"});
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::HostUnreachable, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -370,7 +370,7 @@ TEST_F(DropColl2ShardTest, SecondShardTargeterError) {
TEST_F(DropColl2ShardTest, SecondShardDropError) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::CallbackCanceled, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -393,7 +393,7 @@ TEST_F(DropColl2ShardTest, SecondShardDropError) {
TEST_F(DropColl2ShardTest, SecondShardDropCmdError) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::OperationFailed, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -415,7 +415,7 @@ TEST_F(DropColl2ShardTest, SecondShardDropCmdError) {
TEST_F(DropColl2ShardTest, CleanupChunkError) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -439,7 +439,7 @@ TEST_F(DropColl2ShardTest, CleanupChunkError) {
TEST_F(DropColl2ShardTest, SSVCmdErrorOnShard1) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -465,7 +465,7 @@ TEST_F(DropColl2ShardTest, SSVCmdErrorOnShard1) {
TEST_F(DropColl2ShardTest, SSVErrorOnShard1) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::CallbackCanceled, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -491,7 +491,7 @@ TEST_F(DropColl2ShardTest, SSVErrorOnShard1) {
TEST_F(DropColl2ShardTest, UnsetCmdErrorOnShard1) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -519,7 +519,7 @@ TEST_F(DropColl2ShardTest, UnsetCmdErrorOnShard1) {
TEST_F(DropColl2ShardTest, UnsetErrorOnShard1) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::CallbackCanceled, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -547,7 +547,7 @@ TEST_F(DropColl2ShardTest, UnsetErrorOnShard1) {
TEST_F(DropColl2ShardTest, SSVCmdErrorOnShard2) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -576,7 +576,7 @@ TEST_F(DropColl2ShardTest, SSVCmdErrorOnShard2) {
TEST_F(DropColl2ShardTest, SSVErrorOnShard2) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::CallbackCanceled, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -605,7 +605,7 @@ TEST_F(DropColl2ShardTest, SSVErrorOnShard2) {
TEST_F(DropColl2ShardTest, UnsetCmdErrorOnShard2) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -636,7 +636,7 @@ TEST_F(DropColl2ShardTest, UnsetCmdErrorOnShard2) {
TEST_F(DropColl2ShardTest, UnsetErrorOnShard2) {
auto future = launchAsync([this] {
- auto status = catalogManager()->dropCollection(operationContext(), dropNS());
+ auto status = catalogClient()->dropCollection(operationContext(), dropNS());
ASSERT_EQ(ErrorCodes::CallbackCanceled, status.code());
ASSERT_FALSE(status.reason().empty());
});
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp
index ed2a1b7e868..ed1b7eaaac8 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp
@@ -36,8 +36,8 @@
#include "mongo/db/commands.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
@@ -171,9 +171,9 @@ protected:
Status log(const std::string& what, const std::string& ns, const BSONObj& detail) {
if (_configCollType == ChangeLog) {
- return catalogManager()->logChange(operationContext(), what, ns, detail);
+ return catalogClient()->logChange(operationContext(), what, ns, detail);
} else {
- return catalogManager()->logAction(operationContext(), what, ns, detail);
+ return catalogClient()->logAction(operationContext(), what, ns, detail);
}
}
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp
index 8d3f9c1f21e..3f6c7a83db7 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp
@@ -39,8 +39,8 @@
#include "mongo/executor/task_executor.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
@@ -87,7 +87,7 @@ TEST_F(RemoveShardTest, RemoveShardAnotherShardDraining) {
auto future = launchAsync([&] {
ASSERT_EQUALS(ErrorCodes::ConflictingOperationInProgress,
- catalogManager()->removeShard(operationContext(), shardName));
+ catalogClient()->removeShard(operationContext(), shardName));
});
expectCount(configHost,
@@ -103,7 +103,7 @@ TEST_F(RemoveShardTest, RemoveShardCantRemoveLastShard) {
auto future = launchAsync([&] {
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
- catalogManager()->removeShard(operationContext(), shardName));
+ catalogClient()->removeShard(operationContext(), shardName));
});
// Report that there are no other draining operations ongoing
@@ -127,7 +127,7 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) {
getMessagingPort()->setRemote(clientHost);
auto future = launchAsync([&] {
- auto result = assertGet(catalogManager()->removeShard(operationContext(), shardName));
+ auto result = assertGet(catalogClient()->removeShard(operationContext(), shardName));
ASSERT_EQUALS(ShardDrainingStatus::STARTED, result);
});
@@ -209,7 +209,7 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) {
string shardName = "shardToRemove";
auto future = launchAsync([&] {
- auto result = assertGet(catalogManager()->removeShard(operationContext(), shardName));
+ auto result = assertGet(catalogClient()->removeShard(operationContext(), shardName));
ASSERT_EQUALS(ShardDrainingStatus::ONGOING, result);
});
@@ -249,7 +249,7 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingDatabasesRemaining) {
string shardName = "shardToRemove";
auto future = launchAsync([&] {
- auto result = assertGet(catalogManager()->removeShard(operationContext(), shardName));
+ auto result = assertGet(catalogClient()->removeShard(operationContext(), shardName));
ASSERT_EQUALS(ShardDrainingStatus::ONGOING, result);
});
@@ -291,7 +291,7 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
getMessagingPort()->setRemote(clientHost);
auto future = launchAsync([&] {
- auto result = assertGet(catalogManager()->removeShard(operationContext(), shardName));
+ auto result = assertGet(catalogClient()->removeShard(operationContext(), shardName));
ASSERT_EQUALS(ShardDrainingStatus::COMPLETED, result);
});
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
index cd41e90c3ed..13ccd0c75c0 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
@@ -43,8 +43,8 @@
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_changelog.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
@@ -233,7 +233,7 @@ TEST_F(ShardCollectionTest, distLockFails) {
ShardKeyPattern keyPattern(BSON("_id" << 1));
ASSERT_EQUALS(
ErrorCodes::LockBusy,
- catalogManager()->shardCollection(
+ catalogClient()->shardCollection(
operationContext(), "test.foo", keyPattern, false, vector<BSONObj>{}, set<ShardId>{}));
}
@@ -267,7 +267,7 @@ TEST_F(ShardCollectionTest, anotherMongosSharding) {
Client::initThreadIfNotAlready();
ASSERT_EQUALS(
ErrorCodes::AlreadyInitialized,
- catalogManager()->shardCollection(
+ catalogClient()->shardCollection(
operationContext(), ns, keyPattern, false, vector<BSONObj>{}, set<ShardId>{}));
});
@@ -324,7 +324,7 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
// Now start actually sharding the collection.
auto future = launchAsync([&] {
Client::initThreadIfNotAlready();
- ASSERT_OK(catalogManager()->shardCollection(
+ ASSERT_OK(catalogClient()->shardCollection(
operationContext(), ns, keyPattern, false, vector<BSONObj>{}, set<ShardId>{}));
});
@@ -493,7 +493,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
auto future = launchAsync([&] {
Client::initThreadIfNotAlready();
set<ShardId> shards{shard0.getName(), shard1.getName(), shard2.getName()};
- ASSERT_OK(catalogManager()->shardCollection(
+ ASSERT_OK(catalogClient()->shardCollection(
operationContext(),
ns,
keyPattern,
@@ -644,7 +644,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
// Now start actually sharding the collection.
auto future = launchAsync([&] {
Client::initThreadIfNotAlready();
- ASSERT_OK(catalogManager()->shardCollection(
+ ASSERT_OK(catalogClient()->shardCollection(
operationContext(), ns, keyPattern, false, vector<BSONObj>{}, set<ShardId>{}));
});
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
index b45914dfeba..3d0e1033999 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
@@ -43,8 +43,8 @@
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_database.h"
@@ -96,7 +96,7 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionExisting) {
auto future = launchAsync([this, &expectedColl] {
return assertGet(
- catalogManager()->getCollection(operationContext(), expectedColl.getNs().ns()));
+ catalogClient()->getCollection(operationContext(), expectedColl.getNs().ns()));
});
onFindWithMetadataCommand(
@@ -134,7 +134,7 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionNotExisting) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- auto status = catalogManager()->getCollection(operationContext(), "NonExistent");
+ auto status = catalogClient()->getCollection(operationContext(), "NonExistent");
ASSERT_EQUALS(status.getStatus(), ErrorCodes::NamespaceNotFound);
});
@@ -145,7 +145,7 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionNotExisting) {
}
TEST_F(CatalogManagerReplSetTest, GetDatabaseInvalidName) {
- auto status = catalogManager()->getDatabase(operationContext(), "b.c").getStatus();
+ auto status = catalogClient()->getDatabase(operationContext(), "b.c").getStatus();
ASSERT_EQ(ErrorCodes::InvalidNamespace, status.code());
ASSERT_FALSE(status.reason().empty());
}
@@ -161,7 +161,7 @@ TEST_F(CatalogManagerReplSetTest, GetDatabaseExisting) {
const OpTime newOpTime(Timestamp(7, 6), 5);
auto future = launchAsync([this, &expectedDb] {
- return assertGet(catalogManager()->getDatabase(operationContext(), expectedDb.getName()));
+ return assertGet(catalogClient()->getDatabase(operationContext(), expectedDb.getName()));
});
onFindWithMetadataCommand([this, &expectedDb, newOpTime](const RemoteCommandRequest& request) {
@@ -202,7 +202,7 @@ TEST_F(CatalogManagerReplSetTest, GetDatabaseStaleSecondaryRetrySuccess) {
expectedDb.setSharded(true);
auto future = launchAsync([this, &expectedDb] {
- return assertGet(catalogManager()->getDatabase(operationContext(), expectedDb.getName()));
+ return assertGet(catalogClient()->getDatabase(operationContext(), expectedDb.getName()));
});
// Return empty result set as if the database wasn't found
@@ -227,7 +227,7 @@ TEST_F(CatalogManagerReplSetTest, GetDatabaseStaleSecondaryRetryNoPrimary) {
configTargeter()->setFindHostReturnValue(testHost);
auto future = launchAsync([this] {
- auto dbResult = catalogManager()->getDatabase(operationContext(), "NonExistent");
+ auto dbResult = catalogClient()->getDatabase(operationContext(), "NonExistent");
ASSERT_EQ(dbResult.getStatus(), ErrorCodes::NotMaster);
});
@@ -246,7 +246,7 @@ TEST_F(CatalogManagerReplSetTest, GetDatabaseNotExisting) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- auto dbResult = catalogManager()->getDatabase(operationContext(), "NonExistent");
+ auto dbResult = catalogClient()->getDatabase(operationContext(), "NonExistent");
ASSERT_EQ(dbResult.getStatus(), ErrorCodes::NamespaceNotFound);
});
@@ -267,7 +267,7 @@ TEST_F(CatalogManagerReplSetTest, UpdateCollection) {
collection.setKeyPattern(KeyPattern(BSON("_id" << 1)));
auto future = launchAsync([this, collection] {
- auto status = catalogManager()->updateCollection(
+ auto status = catalogClient()->updateCollection(
operationContext(), collection.getNs().toString(), collection);
ASSERT_OK(status);
});
@@ -289,7 +289,7 @@ TEST_F(CatalogManagerReplSetTest, UpdateCollectionNotMaster) {
collection.setKeyPattern(KeyPattern(BSON("_id" << 1)));
auto future = launchAsync([this, collection] {
- auto status = catalogManager()->updateCollection(
+ auto status = catalogClient()->updateCollection(
operationContext(), collection.getNs().toString(), collection);
ASSERT_EQUALS(ErrorCodes::NotMaster, status);
});
@@ -320,7 +320,7 @@ TEST_F(CatalogManagerReplSetTest, UpdateCollectionNotMasterFromTargeter) {
collection.setKeyPattern(KeyPattern(BSON("_id" << 1)));
auto future = launchAsync([this, collection] {
- auto status = catalogManager()->updateCollection(
+ auto status = catalogClient()->updateCollection(
operationContext(), collection.getNs().toString(), collection);
ASSERT_EQUALS(ErrorCodes::NotMaster, status);
});
@@ -342,7 +342,7 @@ TEST_F(CatalogManagerReplSetTest, UpdateCollectionNotMasterRetrySuccess) {
collection.setKeyPattern(KeyPattern(BSON("_id" << 1)));
auto future = launchAsync([this, collection] {
- auto status = catalogManager()->updateCollection(
+ auto status = catalogClient()->updateCollection(
operationContext(), collection.getNs().toString(), collection);
ASSERT_OK(status);
});
@@ -389,7 +389,7 @@ TEST_F(CatalogManagerReplSetTest, GetAllShardsValid) {
const vector<ShardType> expectedShardsList = {s1, s2, s3};
auto future = launchAsync([this] {
- auto shards = assertGet(catalogManager()->getAllShards(operationContext()));
+ auto shards = assertGet(catalogClient()->getAllShards(operationContext()));
return shards.value;
});
@@ -423,7 +423,7 @@ TEST_F(CatalogManagerReplSetTest, GetAllShardsWithInvalidShard) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- auto status = catalogManager()->getAllShards(operationContext());
+ auto status = catalogClient()->getAllShards(operationContext());
ASSERT_EQ(ErrorCodes::FailedToParse, status.getStatus());
});
@@ -475,12 +475,12 @@ TEST_F(CatalogManagerReplSetTest, GetChunksForNSWithSortAndLimit) {
vector<ChunkType> chunks;
OpTime opTime;
- ASSERT_OK(catalogManager()->getChunks(operationContext(),
- chunksQuery,
- BSON(ChunkType::DEPRECATED_lastmod() << -1),
- 1,
- &chunks,
- &opTime));
+ ASSERT_OK(catalogClient()->getChunks(operationContext(),
+ chunksQuery,
+ BSON(ChunkType::DEPRECATED_lastmod() << -1),
+ 1,
+ &chunks,
+ &opTime));
ASSERT_EQ(2U, chunks.size());
ASSERT_EQ(newOpTime, opTime);
@@ -528,7 +528,7 @@ TEST_F(CatalogManagerReplSetTest, GetChunksForNSNoSortNoLimit) {
auto future = launchAsync([this, &chunksQuery] {
vector<ChunkType> chunks;
- ASSERT_OK(catalogManager()->getChunks(
+ ASSERT_OK(catalogClient()->getChunks(
operationContext(), chunksQuery, BSONObj(), boost::none, &chunks, nullptr));
ASSERT_EQ(0U, chunks.size());
@@ -568,7 +568,7 @@ TEST_F(CatalogManagerReplSetTest, GetChunksForNSInvalidChunk) {
auto future = launchAsync([this, &chunksQuery] {
vector<ChunkType> chunks;
- Status status = catalogManager()->getChunks(
+ Status status = catalogClient()->getChunks(
operationContext(), chunksQuery, BSONObj(), boost::none, &chunks, nullptr);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status);
@@ -601,7 +601,7 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementReadCommand) {
auto future = launchAsync([this] {
BSONObjBuilder responseBuilder;
- bool ok = catalogManager()->runUserManagementReadCommand(
+ bool ok = catalogClient()->runUserManagementReadCommand(
operationContext(), "test", BSON("usersInfo" << 1), &responseBuilder);
ASSERT_TRUE(ok);
@@ -628,7 +628,7 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementReadCommandUnsatisfiedReadPre
Status(ErrorCodes::FailedToSatisfyReadPreference, "no nodes up"));
BSONObjBuilder responseBuilder;
- bool ok = catalogManager()->runUserManagementReadCommand(
+ bool ok = catalogClient()->runUserManagementReadCommand(
operationContext(), "test", BSON("usersInfo" << 1), &responseBuilder);
ASSERT_FALSE(ok);
@@ -641,12 +641,12 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandSuccess) {
auto future = launchAsync([this] {
BSONObjBuilder responseBuilder;
- bool ok = catalogManager()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"),
- &responseBuilder);
+ bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"),
+ &responseBuilder);
ASSERT_FALSE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -683,14 +683,14 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandInvalidWriteConce
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
BSONObjBuilder responseBuilder;
- bool ok = catalogManager()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w" << 2)),
- &responseBuilder);
+ bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern"
+ << BSON("w" << 2)),
+ &responseBuilder);
ASSERT_FALSE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -714,16 +714,15 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandRewriteWriteConce
auto future = launchAsync([this] {
BSONObjBuilder responseBuilder;
- bool ok =
- catalogManager()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w" << 1 << "wtimeout"
- << 30)),
- &responseBuilder);
+ bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern"
+ << BSON("w" << 1 << "wtimeout"
+ << 30)),
+ &responseBuilder);
ASSERT_FALSE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -760,12 +759,12 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandNotMaster) {
auto future = launchAsync([this] {
BSONObjBuilder responseBuilder;
- bool ok = catalogManager()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"),
- &responseBuilder);
+ bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"),
+ &responseBuilder);
ASSERT_FALSE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -793,12 +792,12 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandNotMasterRetrySuc
auto future = launchAsync([this] {
BSONObjBuilder responseBuilder;
- bool ok = catalogManager()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"),
- &responseBuilder);
+ bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"),
+ &responseBuilder);
ASSERT_TRUE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -875,7 +874,7 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionsValidResultsNoDb) {
OpTime opTime;
const auto status =
- catalogManager()->getCollections(operationContext(), nullptr, &collections, &opTime);
+ catalogClient()->getCollections(operationContext(), nullptr, &collections, &opTime);
ASSERT_OK(status);
ASSERT_EQ(newOpTime, opTime);
@@ -935,7 +934,7 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionsValidResultsWithDb) {
vector<CollectionType> collections;
const auto status =
- catalogManager()->getCollections(operationContext(), &dbName, &collections, nullptr);
+ catalogClient()->getCollections(operationContext(), &dbName, &collections, nullptr);
ASSERT_OK(status);
return collections;
@@ -975,7 +974,7 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionsInvalidCollectionType) {
vector<CollectionType> collections;
const auto status =
- catalogManager()->getCollections(operationContext(), &dbName, &collections, nullptr);
+ catalogClient()->getCollections(operationContext(), &dbName, &collections, nullptr);
ASSERT_EQ(ErrorCodes::FailedToParse, status);
ASSERT_EQ(0U, collections.size());
@@ -1029,7 +1028,7 @@ TEST_F(CatalogManagerReplSetTest, GetDatabasesForShardValid) {
auto future = launchAsync([this] {
vector<string> dbs;
const auto status =
- catalogManager()->getDatabasesForShard(operationContext(), "shard0000", &dbs);
+ catalogClient()->getDatabasesForShard(operationContext(), "shard0000", &dbs);
ASSERT_OK(status);
return dbs;
@@ -1064,7 +1063,7 @@ TEST_F(CatalogManagerReplSetTest, GetDatabasesForShardInvalidDoc) {
auto future = launchAsync([this] {
vector<string> dbs;
const auto status =
- catalogManager()->getDatabasesForShard(operationContext(), "shard0000", &dbs);
+ catalogClient()->getDatabasesForShard(operationContext(), "shard0000", &dbs);
ASSERT_EQ(ErrorCodes::TypeMismatch, status);
ASSERT_EQ(0U, dbs.size());
@@ -1103,7 +1102,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagsForCollection) {
vector<TagsType> tags;
ASSERT_OK(
- catalogManager()->getTagsForCollection(operationContext(), "TestDB.TestColl", &tags));
+ catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl", &tags));
ASSERT_EQ(2U, tags.size());
return tags;
@@ -1138,7 +1137,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagsForCollectionNoTags) {
vector<TagsType> tags;
ASSERT_OK(
- catalogManager()->getTagsForCollection(operationContext(), "TestDB.TestColl", &tags));
+ catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl", &tags));
ASSERT_EQ(0U, tags.size());
return tags;
@@ -1155,7 +1154,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagsForCollectionInvalidTag) {
auto future = launchAsync([this] {
vector<TagsType> tags;
Status status =
- catalogManager()->getTagsForCollection(operationContext(), "TestDB.TestColl", &tags);
+ catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl", &tags);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status);
ASSERT_EQ(0U, tags.size());
@@ -1192,7 +1191,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkOneTagFound) {
ASSERT_OK(chunk.validate());
auto future = launchAsync([this, chunk] {
- return assertGet(catalogManager()->getTagForChunk(operationContext(), "test.coll", chunk));
+ return assertGet(catalogClient()->getTagForChunk(operationContext(), "test.coll", chunk));
});
onFindCommand([this, chunk](const RemoteCommandRequest& request) {
@@ -1237,7 +1236,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkNoTagFound) {
ASSERT_OK(chunk.validate());
auto future = launchAsync([this, chunk] {
- return assertGet(catalogManager()->getTagForChunk(operationContext(), "test.coll", chunk));
+ return assertGet(catalogClient()->getTagForChunk(operationContext(), "test.coll", chunk));
});
onFindCommand([this, chunk](const RemoteCommandRequest& request) {
@@ -1277,7 +1276,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkInvalidTagDoc) {
auto future = launchAsync([this, chunk] {
const auto tagResult =
- catalogManager()->getTagForChunk(operationContext(), "test.coll", chunk);
+ catalogClient()->getTagForChunk(operationContext(), "test.coll", chunk);
ASSERT_EQ(ErrorCodes::FailedToParse, tagResult.getStatus());
});
@@ -1316,7 +1315,7 @@ TEST_F(CatalogManagerReplSetTest, UpdateDatabase) {
dbt.setSharded(true);
auto future = launchAsync([this, dbt] {
- auto status = catalogManager()->updateDatabase(operationContext(), dbt.getName(), dbt);
+ auto status = catalogClient()->updateDatabase(operationContext(), dbt.getName(), dbt);
ASSERT_OK(status);
});
@@ -1359,7 +1358,7 @@ TEST_F(CatalogManagerReplSetTest, UpdateDatabaseExceededTimeLimit) {
dbt.setSharded(false);
auto future = launchAsync([this, dbt] {
- auto status = catalogManager()->updateDatabase(operationContext(), dbt.getName(), dbt);
+ auto status = catalogClient()->updateDatabase(operationContext(), dbt.getName(), dbt);
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, status);
});
@@ -1393,7 +1392,7 @@ TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedSuccessful) {
ChunkVersion lastChunkVersion(0, 0, OID());
auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] {
- auto status = catalogManager()->applyChunkOpsDeprecated(
+ auto status = catalogClient()->applyChunkOpsDeprecated(
operationContext(), updateOps, preCondition, nss, lastChunkVersion);
ASSERT_OK(status);
});
@@ -1432,7 +1431,7 @@ TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) {
ChunkVersion lastChunkVersion(0, 0, OID());
auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] {
- auto status = catalogManager()->applyChunkOpsDeprecated(
+ auto status = catalogClient()->applyChunkOpsDeprecated(
operationContext(), updateOps, preCondition, nss, lastChunkVersion);
ASSERT_OK(status);
});
@@ -1474,7 +1473,7 @@ TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedFailedWithCheck) {
ChunkVersion lastChunkVersion(0, 0, OID());
auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] {
- auto status = catalogManager()->applyChunkOpsDeprecated(
+ auto status = catalogClient()->applyChunkOpsDeprecated(
operationContext(), updateOps, preCondition, nss, lastChunkVersion);
ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, status);
});
@@ -1553,7 +1552,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseSuccess) {
future = launchAsync([this, dbname] {
- Status status = catalogManager()->createDatabase(operationContext(), dbname);
+ Status status = catalogClient()->createDatabase(operationContext(), dbname);
ASSERT_OK(status);
});
@@ -1653,7 +1652,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDistLockHeld) {
},
Status(ErrorCodes::LockBusy, "lock already held"));
- Status status = catalogManager()->createDatabase(operationContext(), dbname);
+ Status status = catalogClient()->createDatabase(operationContext(), dbname);
ASSERT_EQUALS(ErrorCodes::LockBusy, status);
}
@@ -1671,7 +1670,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDBExists) {
auto future = launchAsync([this, dbname] {
- Status status = catalogManager()->createDatabase(operationContext(), dbname);
+ Status status = catalogClient()->createDatabase(operationContext(), dbname);
ASSERT_EQUALS(ErrorCodes::NamespaceExists, status);
});
@@ -1710,7 +1709,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDBExistsDifferentCase) {
auto future = launchAsync([this, dbname] {
- Status status = catalogManager()->createDatabase(operationContext(), dbname);
+ Status status = catalogClient()->createDatabase(operationContext(), dbname);
ASSERT_EQUALS(ErrorCodes::DatabaseDifferCase, status);
});
@@ -1748,7 +1747,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseNoShards) {
auto future = launchAsync([this, dbname] {
- Status status = catalogManager()->createDatabase(operationContext(), dbname);
+ Status status = catalogClient()->createDatabase(operationContext(), dbname);
ASSERT_EQUALS(ErrorCodes::ShardNotFound, status);
});
@@ -1840,7 +1839,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDuplicateKeyOnInsert) {
future = launchAsync([this, dbname] {
- Status status = catalogManager()->createDatabase(operationContext(), dbname);
+ Status status = catalogClient()->createDatabase(operationContext(), dbname);
ASSERT_EQUALS(ErrorCodes::NamespaceExists, status);
});
@@ -1949,7 +1948,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExists) {
Status::OK());
auto future = launchAsync([this] {
- auto status = catalogManager()->enableSharding(operationContext(), "test");
+ auto status = catalogClient()->enableSharding(operationContext(), "test");
ASSERT_OK(status);
});
@@ -2029,7 +2028,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingLockBusy) {
distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {},
{ErrorCodes::LockBusy, "lock taken"});
- auto status = catalogManager()->enableSharding(operationContext(), "test");
+ auto status = catalogClient()->enableSharding(operationContext(), "test");
ASSERT_EQ(ErrorCodes::LockBusy, status.code());
}
@@ -2046,7 +2045,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingDBExistsWithDifferentCase) {
distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK());
auto future = launchAsync([this] {
- auto status = catalogManager()->enableSharding(operationContext(), "test");
+ auto status = catalogClient()->enableSharding(operationContext(), "test");
ASSERT_EQ(ErrorCodes::DatabaseDifferCase, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -2073,7 +2072,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingDBExists) {
distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK());
auto future = launchAsync([this] {
- auto status = catalogManager()->enableSharding(operationContext(), "test");
+ auto status = catalogClient()->enableSharding(operationContext(), "test");
ASSERT_OK(status);
});
@@ -2129,7 +2128,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingFailsWhenTheDatabaseIsAlreadySha
distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK());
auto future = launchAsync([this] {
- auto status = catalogManager()->enableSharding(operationContext(), "test");
+ auto status = catalogClient()->enableSharding(operationContext(), "test");
ASSERT_EQ(status.code(), ErrorCodes::AlreadyInitialized);
});
@@ -2155,7 +2154,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingDBExistsInvalidFormat) {
distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK());
auto future = launchAsync([this] {
- auto status = catalogManager()->enableSharding(operationContext(), "test");
+ auto status = catalogClient()->enableSharding(operationContext(), "test");
ASSERT_EQ(ErrorCodes::TypeMismatch, status.code());
});
@@ -2175,7 +2174,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExistsNoShards) {
distLock()->expectLock([](StringData, StringData, Milliseconds, Milliseconds) {}, Status::OK());
auto future = launchAsync([this] {
- auto status = catalogManager()->enableSharding(operationContext(), "test");
+ auto status = catalogClient()->enableSharding(operationContext(), "test");
ASSERT_EQ(ErrorCodes::ShardNotFound, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -2196,7 +2195,7 @@ TEST_F(CatalogManagerReplSetTest, BasicReadAfterOpTime) {
for (int x = 0; x < 3; x++) {
auto future = launchAsync([this] {
BSONObjBuilder responseBuilder;
- ASSERT_TRUE(getCatalogManagerReplicaSet()->runReadCommandForTest(
+ ASSERT_TRUE(getCatalogClient()->runReadCommandForTest(
operationContext(), "test", BSON("dummy" << 1), &responseBuilder));
});
@@ -2230,7 +2229,7 @@ TEST_F(CatalogManagerReplSetTest, ReadAfterOpTimeShouldNotGoBack) {
// Initialize the internal config OpTime
auto future1 = launchAsync([this] {
BSONObjBuilder responseBuilder;
- ASSERT_TRUE(getCatalogManagerReplicaSet()->runReadCommandForTest(
+ ASSERT_TRUE(getCatalogClient()->runReadCommandForTest(
operationContext(), "test", BSON("dummy" << 1), &responseBuilder));
});
@@ -2259,7 +2258,7 @@ TEST_F(CatalogManagerReplSetTest, ReadAfterOpTimeShouldNotGoBack) {
// Return an older OpTime
auto future2 = launchAsync([this] {
BSONObjBuilder responseBuilder;
- ASSERT_TRUE(getCatalogManagerReplicaSet()->runReadCommandForTest(
+ ASSERT_TRUE(getCatalogClient()->runReadCommandForTest(
operationContext(), "test", BSON("dummy" << 1), &responseBuilder));
});
@@ -2285,7 +2284,7 @@ TEST_F(CatalogManagerReplSetTest, ReadAfterOpTimeShouldNotGoBack) {
// Check that older OpTime does not override highest OpTime
auto future3 = launchAsync([this] {
BSONObjBuilder responseBuilder;
- ASSERT_TRUE(getCatalogManagerReplicaSet()->runReadCommandForTest(
+ ASSERT_TRUE(getCatalogClient()->runReadCommandForTest(
operationContext(), "test", BSON("dummy" << 1), &responseBuilder));
});
@@ -2311,7 +2310,7 @@ TEST_F(CatalogManagerReplSetTest, ReadAfterOpTimeFindThenCmd) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future1 = launchAsync([this] {
- ASSERT_OK(catalogManager()->getDatabase(operationContext(), "TestDB").getStatus());
+ ASSERT_OK(catalogClient()->getDatabase(operationContext(), "TestDB").getStatus());
});
OpTime highestOpTime;
@@ -2341,7 +2340,7 @@ TEST_F(CatalogManagerReplSetTest, ReadAfterOpTimeFindThenCmd) {
// Return an older OpTime
auto future2 = launchAsync([this] {
BSONObjBuilder responseBuilder;
- ASSERT_TRUE(getCatalogManagerReplicaSet()->runReadCommandForTest(
+ ASSERT_TRUE(getCatalogClient()->runReadCommandForTest(
operationContext(), "test", BSON("dummy" << 1), &responseBuilder));
});
@@ -2367,7 +2366,7 @@ TEST_F(CatalogManagerReplSetTest, ReadAfterOpTimeCmdThenFind) {
// Initialize the internal config OpTime
auto future1 = launchAsync([this] {
BSONObjBuilder responseBuilder;
- ASSERT_TRUE(getCatalogManagerReplicaSet()->runReadCommandForTest(
+ ASSERT_TRUE(getCatalogClient()->runReadCommandForTest(
operationContext(), "test", BSON("dummy" << 1), &responseBuilder));
});
@@ -2395,7 +2394,7 @@ TEST_F(CatalogManagerReplSetTest, ReadAfterOpTimeCmdThenFind) {
// Return an older OpTime
auto future2 = launchAsync([this] {
- ASSERT_OK(catalogManager()->getDatabase(operationContext(), "TestDB").getStatus());
+ ASSERT_OK(catalogClient()->getDatabase(operationContext(), "TestDB").getStatus());
});
const OpTime oldOpTime(Timestamp(3, 10), 5);
@@ -2422,7 +2421,7 @@ TEST_F(CatalogManagerReplSetTest, RetryOnReadCommandNetworkErrorFailsAtMaxRetry)
auto future1 = launchAsync([this] {
BSONObjBuilder responseBuilder;
- auto ok = getCatalogManagerReplicaSet()->runReadCommandForTest(
+ auto ok = getCatalogClient()->runReadCommandForTest(
operationContext(), "test", BSON("dummy" << 1), &responseBuilder);
ASSERT_FALSE(ok);
auto status = getStatusFromCommandResult(responseBuilder.obj());
@@ -2446,7 +2445,7 @@ TEST_F(CatalogManagerReplSetTest, RetryOnReadCommandNetworkErrorSucceedsAtMaxRet
auto future1 = launchAsync([this, expectedResult] {
BSONObjBuilder responseBuilder;
- auto ok = getCatalogManagerReplicaSet()->runReadCommandForTest(
+ auto ok = getCatalogClient()->runReadCommandForTest(
operationContext(), "test", BSON("dummy" << 1), &responseBuilder);
ASSERT_TRUE(ok);
auto response = responseBuilder.obj();
@@ -2468,7 +2467,7 @@ TEST_F(CatalogManagerReplSetTest, RetryOnFindCommandNetworkErrorFailsAtMaxRetry)
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- auto status = catalogManager()->getDatabase(operationContext(), "TestDB");
+ auto status = catalogClient()->getDatabase(operationContext(), "TestDB");
ASSERT_EQ(ErrorCodes::HostUnreachable, status.getStatus().code());
});
@@ -2484,9 +2483,8 @@ TEST_F(CatalogManagerReplSetTest, RetryOnFindCommandNetworkErrorFailsAtMaxRetry)
TEST_F(CatalogManagerReplSetTest, RetryOnFindCommandNetworkErrorSucceedsAtMaxRetry) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- auto future = launchAsync([&] {
- ASSERT_OK(catalogManager()->getDatabase(operationContext(), "TestDB").getStatus());
- });
+ auto future = launchAsync(
+ [&] { ASSERT_OK(catalogClient()->getDatabase(operationContext(), "TestDB").getStatus()); });
for (int i = 0; i < kMaxCommandRetry - 1; ++i) {
onFindCommand([](const RemoteCommandRequest&) {
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp
index 43946d897c2..c8dc6b939fe 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp
@@ -36,8 +36,8 @@
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/catalog/config_server_version.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_config_version.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/s/write_ops/batched_update_request.h"
@@ -63,7 +63,7 @@ TEST_F(CatalogManagerReplSetTestFixture, UpgradeNotNeeded) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future =
- launchAsync([this] { ASSERT_OK(catalogManager()->initConfigVersion(operationContext())); });
+ launchAsync([this] { ASSERT_OK(catalogClient()->initConfigVersion(operationContext())); });
onFindCommand([this](const RemoteCommandRequest& request) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
@@ -92,7 +92,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "Bad test network"});
auto future = launchAsync([this] {
- auto status = catalogManager()->initConfigVersion(operationContext());
+ auto status = catalogClient()->initConfigVersion(operationContext());
ASSERT_EQ(ErrorCodes::InternalError, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -104,7 +104,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitIncompatibleVersion) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future = launchAsync([this] {
- auto status = catalogManager()->initConfigVersion(operationContext());
+ auto status = catalogClient()->initConfigVersion(operationContext());
ASSERT_EQ(ErrorCodes::IncompatibleShardingConfigVersion, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -127,7 +127,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitClusterMultiVersion) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future = launchAsync([this] {
- auto status = catalogManager()->initConfigVersion(operationContext());
+ auto status = catalogClient()->initConfigVersion(operationContext());
ASSERT_EQ(ErrorCodes::RemoteValidationError, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -158,7 +158,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitInvalidConfigVersionDoc) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future = launchAsync([this] {
- auto status = catalogManager()->initConfigVersion(operationContext());
+ auto status = catalogClient()->initConfigVersion(operationContext());
ASSERT_EQ(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -181,7 +181,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitNoVersionDocEmptyConfig) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future =
- launchAsync([this] { ASSERT_OK(catalogManager()->initConfigVersion(operationContext())); });
+ launchAsync([this] { ASSERT_OK(catalogClient()->initConfigVersion(operationContext())); });
onFindCommand([](const RemoteCommandRequest& request) { return vector<BSONObj>{}; });
@@ -230,7 +230,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitConfigWriteError) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future = launchAsync([this] {
- auto status = catalogManager()->initConfigVersion(operationContext());
+ auto status = catalogClient()->initConfigVersion(operationContext());
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -257,7 +257,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitVersionTooOld) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future = launchAsync([this] {
- auto status = catalogManager()->initConfigVersion(operationContext());
+ auto status = catalogClient()->initConfigVersion(operationContext());
ASSERT_EQ(ErrorCodes::IncompatibleShardingConfigVersion, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -284,7 +284,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitVersionDuplicateKeyNoOpAfterRetry)
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future =
- launchAsync([this] { ASSERT_OK(catalogManager()->initConfigVersion(operationContext())); });
+ launchAsync([this] { ASSERT_OK(catalogClient()->initConfigVersion(operationContext())); });
onFindCommand([](const RemoteCommandRequest& request) { return vector<BSONObj>{}; });
@@ -335,7 +335,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitVersionDuplicateKeyNoConfigVersionA
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future =
- launchAsync([this] { ASSERT_OK(catalogManager()->initConfigVersion(operationContext())); });
+ launchAsync([this] { ASSERT_OK(catalogClient()->initConfigVersion(operationContext())); });
onFindCommand([](const RemoteCommandRequest& request) { return vector<BSONObj>{}; });
@@ -406,7 +406,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitVersionDuplicateKeyTooNewAfterRetry
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future = launchAsync([this] {
- auto status = catalogManager()->initConfigVersion(operationContext());
+ auto status = catalogClient()->initConfigVersion(operationContext());
ASSERT_EQ(ErrorCodes::IncompatibleShardingConfigVersion, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -460,7 +460,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitVersionDuplicateKeyMaxRetry) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future = launchAsync([this] {
- auto status = catalogManager()->initConfigVersion(operationContext());
+ auto status = catalogClient()->initConfigVersion(operationContext());
ASSERT_EQ(ErrorCodes::IncompatibleShardingConfigVersion, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -495,7 +495,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitVersionUpsertNoMatchNoOpAfterRetry)
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future =
- launchAsync([this] { ASSERT_OK(catalogManager()->initConfigVersion(operationContext())); });
+ launchAsync([this] { ASSERT_OK(catalogClient()->initConfigVersion(operationContext())); });
onFindCommand([](const RemoteCommandRequest& request) { return vector<BSONObj>{}; });
@@ -541,7 +541,7 @@ TEST_F(CatalogManagerReplSetTestFixture, InitVersionUpsertNoMatchNoConfigVersion
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
auto future =
- launchAsync([this] { ASSERT_OK(catalogManager()->initConfigVersion(operationContext())); });
+ launchAsync([this] { ASSERT_OK(catalogClient()->initConfigVersion(operationContext())); });
onFindCommand([](const RemoteCommandRequest& request) { return vector<BSONObj>{}; });
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp
index 2f00e9533f6..a5b7c8034c9 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp
@@ -43,8 +43,8 @@
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_changelog.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
@@ -84,7 +84,7 @@ TEST_F(InsertRetryTest, RetryOnInterruptedAndNetworkErrorSuccess) {
<< "TestValue");
auto future = launchAsync([&] {
- Status status = catalogManager()->insertConfigDocument(
+ Status status = catalogClient()->insertConfigDocument(
operationContext(), kTestNamespace.ns(), objToInsert);
ASSERT_OK(status);
});
@@ -113,7 +113,7 @@ TEST_F(InsertRetryTest, RetryOnNetworkErrorFails) {
<< "TestValue");
auto future = launchAsync([&] {
- Status status = catalogManager()->insertConfigDocument(
+ Status status = catalogClient()->insertConfigDocument(
operationContext(), kTestNamespace.ns(), objToInsert);
ASSERT_EQ(ErrorCodes::NetworkTimeout, status.code());
});
@@ -145,7 +145,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMatch) {
<< "TestValue");
auto future = launchAsync([&] {
- Status status = catalogManager()->insertConfigDocument(
+ Status status = catalogClient()->insertConfigDocument(
operationContext(), kTestNamespace.ns(), objToInsert);
ASSERT_OK(status);
});
@@ -180,7 +180,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorNotFound) {
<< "TestValue");
auto future = launchAsync([&] {
- Status status = catalogManager()->insertConfigDocument(
+ Status status = catalogClient()->insertConfigDocument(
operationContext(), kTestNamespace.ns(), objToInsert);
ASSERT_EQ(ErrorCodes::DuplicateKey, status.code());
});
@@ -215,7 +215,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMismatch) {
<< "TestValue");
auto future = launchAsync([&] {
- Status status = catalogManager()->insertConfigDocument(
+ Status status = catalogClient()->insertConfigDocument(
operationContext(), kTestNamespace.ns(), objToInsert);
ASSERT_EQ(ErrorCodes::DuplicateKey, status.code());
});
@@ -251,7 +251,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterWriteConcernFailureMatch) {
<< "TestValue");
auto future = launchAsync([&] {
- Status status = catalogManager()->insertConfigDocument(
+ Status status = catalogClient()->insertConfigDocument(
operationContext(), kTestNamespace.ns(), objToInsert);
ASSERT_OK(status);
});
@@ -308,7 +308,7 @@ TEST_F(UpdateRetryTest, OperationInterruptedDueToPrimaryStepDown) {
<< "NewTestValue"));
auto future = launchAsync([&] {
- auto status = catalogManager()->updateConfigDocument(
+ auto status = catalogClient()->updateConfigDocument(
operationContext(), kTestNamespace.ns(), objToUpdate, updateExpr, false);
ASSERT_OK(status);
});
@@ -355,7 +355,7 @@ TEST_F(UpdateRetryTest, WriteConcernFailure) {
<< "NewTestValue"));
auto future = launchAsync([&] {
- auto status = catalogManager()->updateConfigDocument(
+ auto status = catalogClient()->updateConfigDocument(
operationContext(), kTestNamespace.ns(), objToUpdate, updateExpr, false);
ASSERT_OK(status);
});
diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
index 5d538ea3407..3b8331495e5 100644
--- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
@@ -46,8 +46,8 @@
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager_mock.h"
#include "mongo/s/catalog/replset/dist_lock_catalog_impl.h"
+#include "mongo/s/catalog/sharding_catalog_client_mock.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/client/shard_factory.h"
@@ -160,7 +160,7 @@ private:
_distLockCatalog = stdx::make_unique<DistLockCatalogImpl>(shardRegistry.get());
- grid.init(stdx::make_unique<CatalogManagerMock>(),
+ grid.init(stdx::make_unique<ShardingCatalogClientMock>(),
stdx::make_unique<CatalogCache>(),
std::move(shardRegistry),
std::unique_ptr<ClusterCursorManager>{nullptr},
@@ -180,7 +180,7 @@ private:
std::unique_ptr<executor::NetworkTestEnv> _networkTestEnv;
- CatalogManagerMock _catalogMgr;
+ ShardingCatalogClientMock _catalogMgr;
std::unique_ptr<DistLockCatalogImpl> _distLockCatalog;
OperationContextNoop _txn;
diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
index ab3e7cb240b..dac97dc4369 100644
--- a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
@@ -50,8 +50,8 @@
#include "mongo/executor/task_executor_pool.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/dist_lock_catalog_mock.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/client/shard_factory.h"
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
index 8237359140e..f78d6a7ba62 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
@@ -30,7 +30,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include <iomanip>
#include <pcrecpp.h>
@@ -162,7 +162,7 @@ Status _processBatchWriteResponse(StatusWith<Shard::CommandResponse> response,
} // namespace
-StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
+StatusWith<ShardType> ShardingCatalogClientImpl::_validateHostAsShard(
OperationContext* txn,
ShardRegistry* shardRegistry,
const ConnectionString& connectionString,
@@ -346,7 +346,7 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
return shard;
}
-StatusWith<std::vector<std::string>> CatalogManagerReplicaSet::_getDBNamesListFromShard(
+StatusWith<std::vector<std::string>> ShardingCatalogClientImpl::_getDBNamesListFromShard(
OperationContext* txn, ShardRegistry* shardRegistry, const ConnectionString& connectionString) {
// TODO: Don't create a detached Shard object, create a detached RemoteCommandTargeter instead.
const std::shared_ptr<Shard> shardConn{
@@ -379,15 +379,15 @@ StatusWith<std::vector<std::string>> CatalogManagerReplicaSet::_getDBNamesListFr
return dbNames;
}
-CatalogManagerReplicaSet::CatalogManagerReplicaSet(
+ShardingCatalogClientImpl::ShardingCatalogClientImpl(
std::unique_ptr<DistLockManager> distLockManager,
std::unique_ptr<executor::TaskExecutor> addShardExecutor)
: _distLockManager(std::move(distLockManager)),
_executorForAddShard(std::move(addShardExecutor)) {}
-CatalogManagerReplicaSet::~CatalogManagerReplicaSet() = default;
+ShardingCatalogClientImpl::~ShardingCatalogClientImpl() = default;
-Status CatalogManagerReplicaSet::startup() {
+Status ShardingCatalogClientImpl::startup() {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_started) {
return Status::OK();
@@ -398,8 +398,8 @@ Status CatalogManagerReplicaSet::startup() {
return Status::OK();
}
-void CatalogManagerReplicaSet::shutDown(OperationContext* txn) {
- LOG(1) << "CatalogManagerReplicaSet::shutDown() called.";
+void ShardingCatalogClientImpl::shutDown(OperationContext* txn) {
+ LOG(1) << "ShardingCatalogClientImpl::shutDown() called.";
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
_inShutdown = true;
@@ -411,7 +411,7 @@ void CatalogManagerReplicaSet::shutDown(OperationContext* txn) {
_executorForAddShard->join();
}
-StatusWith<Shard::CommandResponse> CatalogManagerReplicaSet::_runCommandForAddShard(
+StatusWith<Shard::CommandResponse> ShardingCatalogClientImpl::_runCommandForAddShard(
OperationContext* txn,
RemoteCommandTargeter* targeter,
const std::string& dbName,
@@ -456,10 +456,11 @@ StatusWith<Shard::CommandResponse> CatalogManagerReplicaSet::_runCommandForAddSh
std::move(writeConcernStatus));
}
-StatusWith<string> CatalogManagerReplicaSet::addShard(OperationContext* txn,
- const std::string* shardProposedName,
- const ConnectionString& shardConnectionString,
- const long long maxSize) {
+StatusWith<string> ShardingCatalogClientImpl::addShard(
+ OperationContext* txn,
+ const std::string* shardProposedName,
+ const ConnectionString& shardConnectionString,
+ const long long maxSize) {
// Validate the specified connection string may serve as shard at all
auto shardStatus =
_validateHostAsShard(txn, grid.shardRegistry(), shardConnectionString, shardProposedName);
@@ -601,9 +602,9 @@ StatusWith<string> CatalogManagerReplicaSet::addShard(OperationContext* txn,
return shardType.getName();
}
-Status CatalogManagerReplicaSet::updateCollection(OperationContext* txn,
- const std::string& collNs,
- const CollectionType& coll) {
+Status ShardingCatalogClientImpl::updateCollection(OperationContext* txn,
+ const std::string& collNs,
+ const CollectionType& coll) {
fassert(28634, coll.validate());
auto status = updateConfigDocument(
@@ -617,9 +618,9 @@ Status CatalogManagerReplicaSet::updateCollection(OperationContext* txn,
return Status::OK();
}
-Status CatalogManagerReplicaSet::updateDatabase(OperationContext* txn,
- const std::string& dbName,
- const DatabaseType& db) {
+Status ShardingCatalogClientImpl::updateDatabase(OperationContext* txn,
+ const std::string& dbName,
+ const DatabaseType& db) {
fassert(28616, db.validate());
auto status = updateConfigDocument(
@@ -633,7 +634,7 @@ Status CatalogManagerReplicaSet::updateDatabase(OperationContext* txn,
return Status::OK();
}
-Status CatalogManagerReplicaSet::createDatabase(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientImpl::createDatabase(OperationContext* txn, const std::string& dbName) {
invariant(nsIsDbOnly(dbName));
// The admin and config databases should never be explicitly created. They "just exist",
@@ -676,10 +677,10 @@ Status CatalogManagerReplicaSet::createDatabase(OperationContext* txn, const std
return status;
}
-Status CatalogManagerReplicaSet::logAction(OperationContext* txn,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail) {
+Status ShardingCatalogClientImpl::logAction(OperationContext* txn,
+ const std::string& what,
+ const std::string& ns,
+ const BSONObj& detail) {
if (_actionLogCollectionCreated.load() == 0) {
Status result = _createCappedConfigCollection(
txn, kActionLogCollectionName, kActionLogCollectionSizeMB);
@@ -694,10 +695,10 @@ Status CatalogManagerReplicaSet::logAction(OperationContext* txn,
return _log(txn, kActionLogCollectionName, what, ns, detail);
}
-Status CatalogManagerReplicaSet::logChange(OperationContext* txn,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail) {
+Status ShardingCatalogClientImpl::logChange(OperationContext* txn,
+ const std::string& what,
+ const std::string& ns,
+ const BSONObj& detail) {
if (_changeLogCollectionCreated.load() == 0) {
Status result = _createCappedConfigCollection(
txn, kChangeLogCollectionName, kChangeLogCollectionSizeMB);
@@ -713,7 +714,7 @@ Status CatalogManagerReplicaSet::logChange(OperationContext* txn,
}
// static
-StatusWith<ShardId> CatalogManagerReplicaSet::_selectShardForNewDatabase(
+StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
OperationContext* txn, ShardRegistry* shardRegistry) {
vector<ShardId> allShardIds;
@@ -751,7 +752,7 @@ StatusWith<ShardId> CatalogManagerReplicaSet::_selectShardForNewDatabase(
return candidateShardId;
}
-Status CatalogManagerReplicaSet::enableSharding(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const std::string& dbName) {
invariant(nsIsDbOnly(dbName));
DatabaseType db;
@@ -796,11 +797,11 @@ Status CatalogManagerReplicaSet::enableSharding(OperationContext* txn, const std
return updateDatabase(txn, dbName, db);
}
-Status CatalogManagerReplicaSet::_log(OperationContext* txn,
- const StringData& logCollName,
- const std::string& what,
- const std::string& operationNS,
- const BSONObj& detail) {
+Status ShardingCatalogClientImpl::_log(OperationContext* txn,
+ const StringData& logCollName,
+ const std::string& what,
+ const std::string& operationNS,
+ const BSONObj& detail) {
Date_t now = Grid::get(txn)->getNetwork()->now();
const std::string hostName = Grid::get(txn)->getNetwork()->getHostName();
const string changeId = str::stream() << hostName << "-" << now.toString() << "-" << OID::gen();
@@ -827,17 +828,17 @@ Status CatalogManagerReplicaSet::_log(OperationContext* txn,
return result;
}
-StatusWith<DistLockManager::ScopedDistLock> CatalogManagerReplicaSet::distLock(
+StatusWith<DistLockManager::ScopedDistLock> ShardingCatalogClientImpl::distLock(
OperationContext* txn, StringData name, StringData whyMessage, Milliseconds waitFor) {
return getDistLockManager()->lock(txn, name, whyMessage, waitFor);
}
-Status CatalogManagerReplicaSet::shardCollection(OperationContext* txn,
- const string& ns,
- const ShardKeyPattern& fieldsAndOrder,
- bool unique,
- const vector<BSONObj>& initPoints,
- const set<ShardId>& initShardIds) {
+Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
+ const string& ns,
+ const ShardKeyPattern& fieldsAndOrder,
+ bool unique,
+ const vector<BSONObj>& initPoints,
+ const set<ShardId>& initShardIds) {
// Lock the collection globally so that no other mongos can try to shard or drop the collection
// at the same time.
auto scopedDistLock = getDistLockManager()->lock(txn, ns, "shardCollection");
@@ -938,8 +939,8 @@ Status CatalogManagerReplicaSet::shardCollection(OperationContext* txn,
return Status::OK();
}
-StatusWith<ShardDrainingStatus> CatalogManagerReplicaSet::removeShard(OperationContext* txn,
- const std::string& name) {
+StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(OperationContext* txn,
+ const std::string& name) {
// Check preconditions for removing the shard
auto countStatus = _runCountCommandOnConfig(
txn,
@@ -1035,7 +1036,7 @@ StatusWith<ShardDrainingStatus> CatalogManagerReplicaSet::removeShard(OperationC
return ShardDrainingStatus::COMPLETED;
}
-StatusWith<repl::OpTimeWith<DatabaseType>> CatalogManagerReplicaSet::getDatabase(
+StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabase(
OperationContext* txn, const std::string& dbName) {
if (!NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)) {
return {ErrorCodes::InvalidNamespace, stream() << dbName << " is not a valid db name"};
@@ -1069,7 +1070,7 @@ StatusWith<repl::OpTimeWith<DatabaseType>> CatalogManagerReplicaSet::getDatabase
return result;
}
-StatusWith<repl::OpTimeWith<DatabaseType>> CatalogManagerReplicaSet::_fetchDatabaseMetadata(
+StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchDatabaseMetadata(
OperationContext* txn, const std::string& dbName, const ReadPreferenceSetting& readPref) {
dassert(dbName != "admin" && dbName != "config");
@@ -1098,7 +1099,7 @@ StatusWith<repl::OpTimeWith<DatabaseType>> CatalogManagerReplicaSet::_fetchDatab
return repl::OpTimeWith<DatabaseType>(parseStatus.getValue(), docsWithOpTime.opTime);
}
-StatusWith<repl::OpTimeWith<CollectionType>> CatalogManagerReplicaSet::getCollection(
+StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getCollection(
OperationContext* txn, const std::string& collNs) {
auto statusFind = _exhaustiveFindOnConfig(txn,
kConfigReadSelector,
@@ -1127,10 +1128,10 @@ StatusWith<repl::OpTimeWith<CollectionType>> CatalogManagerReplicaSet::getCollec
return repl::OpTimeWith<CollectionType>(parseStatus.getValue(), retOpTimePair.opTime);
}
-Status CatalogManagerReplicaSet::getCollections(OperationContext* txn,
- const std::string* dbName,
- std::vector<CollectionType>* collections,
- OpTime* opTime) {
+Status ShardingCatalogClientImpl::getCollections(OperationContext* txn,
+ const std::string* dbName,
+ std::vector<CollectionType>* collections,
+ OpTime* opTime) {
BSONObjBuilder b;
if (dbName) {
invariant(!dbName->empty());
@@ -1172,7 +1173,7 @@ Status CatalogManagerReplicaSet::getCollections(OperationContext* txn,
return Status::OK();
}
-Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const NamespaceString& ns) {
+Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const NamespaceString& ns) {
logChange(txn, "dropCollection.start", ns.ns(), BSONObj());
auto shardsStatus = getAllShards(txn);
@@ -1329,8 +1330,8 @@ Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const Nam
return Status::OK();
}
-StatusWith<BSONObj> CatalogManagerReplicaSet::getGlobalSettings(OperationContext* txn,
- StringData key) {
+StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContext* txn,
+ StringData key) {
auto findStatus = _exhaustiveFindOnConfig(
txn, kConfigReadSelector, kSettingsNamespace, BSON("_id" << key), BSONObj(), 1);
if (!findStatus.isOK()) {
@@ -1347,9 +1348,9 @@ StatusWith<BSONObj> CatalogManagerReplicaSet::getGlobalSettings(OperationContext
return docs.front();
}
-Status CatalogManagerReplicaSet::getDatabasesForShard(OperationContext* txn,
- const string& shardName,
- vector<string>* dbs) {
+Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* txn,
+ const string& shardName,
+ vector<string>* dbs) {
auto findStatus = _exhaustiveFindOnConfig(txn,
kConfigReadSelector,
NamespaceString(DatabaseType::ConfigNS),
@@ -1374,12 +1375,12 @@ Status CatalogManagerReplicaSet::getDatabasesForShard(OperationContext* txn,
return Status::OK();
}
-Status CatalogManagerReplicaSet::getChunks(OperationContext* txn,
- const BSONObj& query,
- const BSONObj& sort,
- boost::optional<int> limit,
- vector<ChunkType>* chunks,
- OpTime* opTime) {
+Status ShardingCatalogClientImpl::getChunks(OperationContext* txn,
+ const BSONObj& query,
+ const BSONObj& sort,
+ boost::optional<int> limit,
+ vector<ChunkType>* chunks,
+ OpTime* opTime) {
chunks->clear();
// Convert boost::optional<int> to boost::optional<long long>.
@@ -1412,9 +1413,9 @@ Status CatalogManagerReplicaSet::getChunks(OperationContext* txn,
return Status::OK();
}
-Status CatalogManagerReplicaSet::getTagsForCollection(OperationContext* txn,
- const std::string& collectionNs,
- std::vector<TagsType>* tags) {
+Status ShardingCatalogClientImpl::getTagsForCollection(OperationContext* txn,
+ const std::string& collectionNs,
+ std::vector<TagsType>* tags) {
tags->clear();
auto findStatus = _exhaustiveFindOnConfig(txn,
@@ -1441,9 +1442,9 @@ Status CatalogManagerReplicaSet::getTagsForCollection(OperationContext* txn,
return Status::OK();
}
-StatusWith<string> CatalogManagerReplicaSet::getTagForChunk(OperationContext* txn,
- const std::string& collectionNs,
- const ChunkType& chunk) {
+StatusWith<string> ShardingCatalogClientImpl::getTagForChunk(OperationContext* txn,
+ const std::string& collectionNs,
+ const ChunkType& chunk) {
BSONObj query =
BSON(TagsType::ns(collectionNs) << TagsType::min() << BSON("$lte" << chunk.getMin())
<< TagsType::max()
@@ -1472,7 +1473,7 @@ StatusWith<string> CatalogManagerReplicaSet::getTagForChunk(OperationContext* tx
return tagsResult.getValue().getTag();
}
-StatusWith<repl::OpTimeWith<std::vector<ShardType>>> CatalogManagerReplicaSet::getAllShards(
+StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::getAllShards(
OperationContext* txn) {
std::vector<ShardType> shards;
auto findStatus = _exhaustiveFindOnConfig(txn,
@@ -1512,11 +1513,11 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> CatalogManagerReplicaSet::g
findStatus.getValue().opTime};
}
-bool CatalogManagerReplicaSet::runUserManagementWriteCommand(OperationContext* txn,
- const std::string& commandName,
- const std::string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
+bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext* txn,
+ const std::string& commandName,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
BSONObj cmdToRun = cmdObj;
{
// Make sure that if the command has a write concern that it is w:1 or w:majority, and
@@ -1582,10 +1583,10 @@ bool CatalogManagerReplicaSet::runUserManagementWriteCommand(OperationContext* t
return true;
}
-bool CatalogManagerReplicaSet::runReadCommandForTest(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
+bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
BSONObjBuilder cmdBuilder;
cmdBuilder.appendElements(cmdObj);
_appendReadConcern(&cmdBuilder);
@@ -1600,10 +1601,10 @@ bool CatalogManagerReplicaSet::runReadCommandForTest(OperationContext* txn,
return Command::appendCommandStatus(*result, resultStatus.getStatus());
}
-bool CatalogManagerReplicaSet::runUserManagementReadCommand(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
+bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
auto resultStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
txn, kConfigPrimaryPreferredSelector, dbname, cmdObj, Shard::RetryPolicy::kIdempotent);
if (resultStatus.isOK()) {
@@ -1614,11 +1615,11 @@ bool CatalogManagerReplicaSet::runUserManagementReadCommand(OperationContext* tx
return Command::appendCommandStatus(*result, resultStatus.getStatus());
}
-Status CatalogManagerReplicaSet::applyChunkOpsDeprecated(OperationContext* txn,
- const BSONArray& updateOps,
- const BSONArray& preCondition,
- const std::string& nss,
- const ChunkVersion& lastChunkVersion) {
+Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* txn,
+ const BSONArray& updateOps,
+ const BSONArray& preCondition,
+ const std::string& nss,
+ const ChunkVersion& lastChunkVersion) {
BSONObj cmd =
BSON("applyOps" << updateOps << "preCondition" << preCondition << kWriteConcernField
<< kMajorityWriteConcern.toBSON());
@@ -1689,14 +1690,14 @@ Status CatalogManagerReplicaSet::applyChunkOpsDeprecated(OperationContext* txn,
return Status::OK();
}
-DistLockManager* CatalogManagerReplicaSet::getDistLockManager() {
+DistLockManager* ShardingCatalogClientImpl::getDistLockManager() {
invariant(_distLockManager);
return _distLockManager.get();
}
-void CatalogManagerReplicaSet::writeConfigServerDirect(OperationContext* txn,
- const BatchedCommandRequest& batchRequest,
- BatchedCommandResponse* batchResponse) {
+void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* txn,
+ const BatchedCommandRequest& batchRequest,
+ BatchedCommandResponse* batchResponse) {
// We only support batch sizes of one for config writes
if (batchRequest.sizeWriteOps() != 1) {
toBatchError(Status(ErrorCodes::InvalidOptions,
@@ -1710,10 +1711,10 @@ void CatalogManagerReplicaSet::writeConfigServerDirect(OperationContext* txn,
_runBatchWriteCommand(txn, batchRequest, batchResponse, Shard::RetryPolicy::kNotIdempotent);
}
-void CatalogManagerReplicaSet::_runBatchWriteCommand(OperationContext* txn,
- const BatchedCommandRequest& batchRequest,
- BatchedCommandResponse* batchResponse,
- Shard::RetryPolicy retryPolicy) {
+void ShardingCatalogClientImpl::_runBatchWriteCommand(OperationContext* txn,
+ const BatchedCommandRequest& batchRequest,
+ BatchedCommandResponse* batchResponse,
+ Shard::RetryPolicy retryPolicy) {
const std::string dbname = batchRequest.getNS().db().toString();
invariant(dbname == "config" || dbname == "admin");
@@ -1744,9 +1745,9 @@ void CatalogManagerReplicaSet::_runBatchWriteCommand(OperationContext* txn,
MONGO_UNREACHABLE;
}
-Status CatalogManagerReplicaSet::insertConfigDocument(OperationContext* txn,
- const std::string& ns,
- const BSONObj& doc) {
+Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& doc) {
const NamespaceString nss(ns);
invariant(nss.db() == "config");
@@ -1817,11 +1818,11 @@ Status CatalogManagerReplicaSet::insertConfigDocument(OperationContext* txn,
MONGO_UNREACHABLE;
}
-StatusWith<bool> CatalogManagerReplicaSet::updateConfigDocument(OperationContext* txn,
- const string& ns,
- const BSONObj& query,
- const BSONObj& update,
- bool upsert) {
+StatusWith<bool> ShardingCatalogClientImpl::updateConfigDocument(OperationContext* txn,
+ const string& ns,
+ const BSONObj& query,
+ const BSONObj& update,
+ bool upsert) {
const NamespaceString nss(ns);
invariant(nss.db() == "config");
@@ -1854,9 +1855,9 @@ StatusWith<bool> CatalogManagerReplicaSet::updateConfigDocument(OperationContext
return (nSelected == 1);
}
-Status CatalogManagerReplicaSet::removeConfigDocuments(OperationContext* txn,
- const string& ns,
- const BSONObj& query) {
+Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* txn,
+ const string& ns,
+ const BSONObj& query) {
const NamespaceString nss(ns);
invariant(nss.db() == "config");
@@ -1877,9 +1878,9 @@ Status CatalogManagerReplicaSet::removeConfigDocuments(OperationContext* txn,
return response.toStatus();
}
-Status CatalogManagerReplicaSet::_checkDbDoesNotExist(OperationContext* txn,
- const string& dbName,
- DatabaseType* db) {
+Status ShardingCatalogClientImpl::_checkDbDoesNotExist(OperationContext* txn,
+ const string& dbName,
+ DatabaseType* db) {
BSONObjBuilder queryBuilder;
queryBuilder.appendRegex(
DatabaseType::name(), (string) "^" + pcrecpp::RE::QuoteMeta(dbName) + "$", "i");
@@ -1923,7 +1924,7 @@ Status CatalogManagerReplicaSet::_checkDbDoesNotExist(OperationContext* txn,
<< dbName);
}
-StatusWith<std::string> CatalogManagerReplicaSet::_generateNewShardName(OperationContext* txn) {
+StatusWith<std::string> ShardingCatalogClientImpl::_generateNewShardName(OperationContext* txn) {
BSONObjBuilder shardNameRegex;
shardNameRegex.appendRegex(ShardType::name(), "^shard");
@@ -1961,9 +1962,9 @@ StatusWith<std::string> CatalogManagerReplicaSet::_generateNewShardName(Operatio
return Status(ErrorCodes::OperationFailed, "unable to generate new shard name");
}
-Status CatalogManagerReplicaSet::_createCappedConfigCollection(OperationContext* txn,
- StringData collName,
- int cappedSize) {
+Status ShardingCatalogClientImpl::_createCappedConfigCollection(OperationContext* txn,
+ StringData collName,
+ int cappedSize) {
BSONObj createCmd = BSON("create" << collName << "capped" << true << "size" << cappedSize);
auto result = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
@@ -1992,9 +1993,9 @@ Status CatalogManagerReplicaSet::_createCappedConfigCollection(OperationContext*
return result.getValue().writeConcernStatus;
}
-StatusWith<long long> CatalogManagerReplicaSet::_runCountCommandOnConfig(OperationContext* txn,
- const NamespaceString& ns,
- BSONObj query) {
+StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(OperationContext* txn,
+ const NamespaceString& ns,
+ BSONObj query) {
BSONObjBuilder countBuilder;
countBuilder.append("count", ns.coll());
countBuilder.append("query", query);
@@ -2024,7 +2025,7 @@ StatusWith<long long> CatalogManagerReplicaSet::_runCountCommandOnConfig(Operati
return result;
}
-Status CatalogManagerReplicaSet::initConfigVersion(OperationContext* txn) {
+Status ShardingCatalogClientImpl::initConfigVersion(OperationContext* txn) {
for (int x = 0; x < kMaxConfigVersionInitRetry; x++) {
auto versionStatus = _getConfigVersion(txn);
if (!versionStatus.isOK()) {
@@ -2085,7 +2086,7 @@ Status CatalogManagerReplicaSet::initConfigVersion(OperationContext* txn) {
<< " retries"};
}
-StatusWith<VersionType> CatalogManagerReplicaSet::_getConfigVersion(OperationContext* txn) {
+StatusWith<VersionType> ShardingCatalogClientImpl::_getConfigVersion(OperationContext* txn) {
auto findStatus = _exhaustiveFindOnConfig(txn,
kConfigReadSelector,
NamespaceString(VersionType::ConfigNS),
@@ -2121,7 +2122,7 @@ StatusWith<VersionType> CatalogManagerReplicaSet::_getConfigVersion(OperationCon
return versionTypeResult.getValue();
}
-StatusWith<repl::OpTimeWith<vector<BSONObj>>> CatalogManagerReplicaSet::_exhaustiveFindOnConfig(
+StatusWith<repl::OpTimeWith<vector<BSONObj>>> ShardingCatalogClientImpl::_exhaustiveFindOnConfig(
OperationContext* txn,
const ReadPreferenceSetting& readPref,
const NamespaceString& nss,
@@ -2138,14 +2139,14 @@ StatusWith<repl::OpTimeWith<vector<BSONObj>>> CatalogManagerReplicaSet::_exhaust
response.getValue().opTime);
}
-void CatalogManagerReplicaSet::_appendReadConcern(BSONObjBuilder* builder) {
+void ShardingCatalogClientImpl::_appendReadConcern(BSONObjBuilder* builder) {
repl::ReadConcernArgs readConcern(grid.configOpTime(),
repl::ReadConcernLevel::kMajorityReadConcern);
readConcern.appendInfo(builder);
}
-Status CatalogManagerReplicaSet::appendInfoForConfigServerDatabases(OperationContext* txn,
- BSONArrayBuilder* builder) {
+Status ShardingCatalogClientImpl::appendInfoForConfigServerDatabases(OperationContext* txn,
+ BSONArrayBuilder* builder) {
auto resultStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
txn,
kConfigPrimaryPreferredSelector,
@@ -2186,7 +2187,7 @@ Status CatalogManagerReplicaSet::appendInfoForConfigServerDatabases(OperationCon
return Status::OK();
}
-void CatalogManagerReplicaSet::appendConnectionStats(executor::ConnectionPoolStats* stats) {
+void ShardingCatalogClientImpl::appendConnectionStats(executor::ConnectionPoolStats* stats) {
_executorForAddShard->appendConnectionStats(stats);
}
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.h
index a26bf941ab2..9f8201f8d23 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h
+++ b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.h
@@ -30,7 +30,7 @@
#include "mongo/client/connection_string.h"
#include "mongo/db/repl/optime.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/stdx/mutex.h"
@@ -45,13 +45,13 @@ class TaskExecutor;
} // namespace executor
/**
- * Implements the catalog manager for talking to replica set config servers.
+ * Implements the catalog client for reading from replica set config servers.
*/
-class CatalogManagerReplicaSet final : public CatalogManager {
+class ShardingCatalogClientImpl final : public ShardingCatalogClient {
public:
- CatalogManagerReplicaSet(std::unique_ptr<DistLockManager> distLockManager,
- std::unique_ptr<executor::TaskExecutor> addShardExecutor);
- virtual ~CatalogManagerReplicaSet();
+ ShardingCatalogClientImpl(std::unique_ptr<DistLockManager> distLockManager,
+ std::unique_ptr<executor::TaskExecutor> addShardExecutor);
+ virtual ~ShardingCatalogClientImpl();
/**
* Safe to call multiple times as long as the calls are externally synchronized to be
diff --git a/src/mongo/s/catalog/catalog_manager.h b/src/mongo/s/catalog/sharding_catalog_client.h
index b3f87f56d37..e7d6716b1b0 100644
--- a/src/mongo/s/catalog/catalog_manager.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -77,20 +77,24 @@ enum ShardDrainingStatus {
};
/**
- * Abstracts reads and writes of the sharding catalog metadata.
+ * Abstracts reads of the sharding catalog metadata.
*
* All implementations of this interface should go directly to the persistent backing store
* and should avoid doing any caching of their own. The caching is delegated to a parallel
* read-only view of the catalog, which is maintained by a higher level code.
+ *
+ * TODO: For now this also includes some methods that write the sharding catalog metadata. Those
+ * should eventually all be moved to ShardingCatalogManager as catalog manipulation operations
+ * move to be run on the config server primary.
*/
-class CatalogManager {
- MONGO_DISALLOW_COPYING(CatalogManager);
+class ShardingCatalogClient {
+ MONGO_DISALLOW_COPYING(ShardingCatalogClient);
public:
- virtual ~CatalogManager() = default;
+ virtual ~ShardingCatalogClient() = default;
/**
- * Performs implementation-specific startup tasks. Must be run after the catalog manager
+ * Performs implementation-specific startup tasks. Must be run after the catalog client
* has been installed into the global 'grid' object. Implementation do not need to guarantee
* thread safety so callers should employ proper synchronization when calling this method.
*/
@@ -437,7 +441,7 @@ public:
BSONArrayBuilder* builder) = 0;
/**
- * Append information about the connection pools owned by the CatalogManager.
+ * Append information about the connection pools owned by the CatalogClient.
*/
virtual void appendConnectionStats(executor::ConnectionPoolStats* stats) = 0;
@@ -452,13 +456,14 @@ public:
* Obtains a reference to the distributed lock manager instance to use for synchronizing
* system-wide changes.
*
- * The returned reference is valid only as long as the catalog manager is valid and should not
+ * The returned reference is valid only as long as the catalog client is valid and should not
* be cached.
*/
virtual DistLockManager* getDistLockManager() = 0;
+
protected:
- CatalogManager() = default;
+ ShardingCatalogClient() = default;
};
} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
new file mode 100644
index 00000000000..72836e6b4d7
--- /dev/null
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -0,0 +1,238 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/s/catalog/sharding_catalog_client_mock.h"
+
+#include "mongo/base/status.h"
+#include "mongo/db/repl/optime.h"
+#include "mongo/s/catalog/type_collection.h"
+#include "mongo/s/catalog/type_database.h"
+#include "mongo/s/catalog/type_shard.h"
+#include "mongo/stdx/memory.h"
+
+namespace mongo {
+
+using std::string;
+using std::vector;
+
+ShardingCatalogClientMock::ShardingCatalogClientMock() {
+ _mockDistLockMgr = stdx::make_unique<DistLockManagerMock>();
+}
+
+ShardingCatalogClientMock::~ShardingCatalogClientMock() = default;
+
+Status ShardingCatalogClientMock::startup() {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+void ShardingCatalogClientMock::shutDown(OperationContext* txn) {}
+
+Status ShardingCatalogClientMock::enableSharding(OperationContext* txn, const std::string& dbName) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::shardCollection(OperationContext* txn,
+ const string& ns,
+ const ShardKeyPattern& fieldsAndOrder,
+ bool unique,
+ const vector<BSONObj>& initPoints,
+ const std::set<ShardId>& initShardIds) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+StatusWith<string> ShardingCatalogClientMock::addShard(
+ OperationContext* txn,
+ const std::string* shardProposedName,
+ const ConnectionString& shardConnectionString,
+ const long long maxSize) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+StatusWith<ShardDrainingStatus> ShardingCatalogClientMock::removeShard(OperationContext* txn,
+ const string& name) {
+ return ShardDrainingStatus::COMPLETED;
+}
+
+Status ShardingCatalogClientMock::updateDatabase(OperationContext* txn,
+ const string& dbName,
+ const DatabaseType& db) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientMock::getDatabase(
+ OperationContext* txn, const string& dbName) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::updateCollection(OperationContext* txn,
+ const string& collNs,
+ const CollectionType& coll) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientMock::getCollection(
+ OperationContext* txn, const string& collNs) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::getCollections(OperationContext* txn,
+ const string* dbName,
+ vector<CollectionType>* collections,
+ repl::OpTime* optime) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::dropCollection(OperationContext* txn, const NamespaceString& ns) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::getDatabasesForShard(OperationContext* txn,
+ const string& shardName,
+ vector<string>* dbs) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::getChunks(OperationContext* txn,
+ const BSONObj& filter,
+ const BSONObj& sort,
+ boost::optional<int> limit,
+ std::vector<ChunkType>* chunks,
+ repl::OpTime* opTime) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::getTagsForCollection(OperationContext* txn,
+ const string& collectionNs,
+ vector<TagsType>* tags) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+StatusWith<string> ShardingCatalogClientMock::getTagForChunk(OperationContext* txn,
+ const string& collectionNs,
+ const ChunkType& chunk) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientMock::getAllShards(
+ OperationContext* txn) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+StatusWith<DistLockManager::ScopedDistLock> ShardingCatalogClientMock::distLock(
+ OperationContext* txn, StringData name, StringData whyMessage, Milliseconds waitFor) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+bool ShardingCatalogClientMock::runUserManagementWriteCommand(OperationContext* txn,
+ const string& commandName,
+ const string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
+ return true;
+}
+
+bool ShardingCatalogClientMock::runUserManagementReadCommand(OperationContext* txn,
+ const string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
+ return true;
+}
+
+Status ShardingCatalogClientMock::applyChunkOpsDeprecated(OperationContext* txn,
+ const BSONArray& updateOps,
+ const BSONArray& preCondition,
+ const std::string& nss,
+ const ChunkVersion& lastChunkVersion) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::logAction(OperationContext* txn,
+ const std::string& what,
+ const std::string& ns,
+ const BSONObj& detail) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::logChange(OperationContext* txn,
+ const string& what,
+ const string& ns,
+ const BSONObj& detail) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+StatusWith<BSONObj> ShardingCatalogClientMock::getGlobalSettings(OperationContext* txn,
+ StringData key) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+void ShardingCatalogClientMock::writeConfigServerDirect(OperationContext* txn,
+ const BatchedCommandRequest& request,
+ BatchedCommandResponse* response) {}
+
+Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& doc) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+StatusWith<bool> ShardingCatalogClientMock::updateConfigDocument(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& update,
+ bool upsert) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::removeConfigDocuments(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& query) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::createDatabase(OperationContext* txn, const std::string& dbName) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+DistLockManager* ShardingCatalogClientMock::getDistLockManager() {
+ return _mockDistLockMgr.get();
+}
+
+Status ShardingCatalogClientMock::initConfigVersion(OperationContext* txn) {
+ return {ErrorCodes::InternalError, "Method not implemented"};
+}
+
+Status ShardingCatalogClientMock::appendInfoForConfigServerDatabases(OperationContext* txn,
+ BSONArrayBuilder* builder) {
+ return Status::OK();
+}
+
+void ShardingCatalogClientMock::appendConnectionStats(executor::ConnectionPoolStats* stats) {}
+
+} // namespace mongo
diff --git a/src/mongo/s/catalog/catalog_manager_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 51a43f7a1b5..177530a8bdc 100644
--- a/src/mongo/s/catalog/catalog_manager_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -28,18 +28,18 @@
#pragma once
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
namespace mongo {
/**
- * A dummy implementation of CatalogManager for testing purposes.
+ * A dummy implementation of ShardingCatalogClient for testing purposes.
*/
-class CatalogManagerMock : public CatalogManager {
+class ShardingCatalogClientMock : public ShardingCatalogClient {
public:
- CatalogManagerMock();
- ~CatalogManagerMock();
+ ShardingCatalogClientMock();
+ ~ShardingCatalogClientMock();
Status startup() override;
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 09addfc588c..24c72267772 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -38,7 +38,7 @@
#include "mongo/platform/random.h"
#include "mongo/s/balancer/balancer.h"
#include "mongo/s/balancer/balancer_configuration.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_registry.h"
@@ -349,7 +349,7 @@ bool Chunk::splitIfShould(OperationContext* txn, long dataWritten) {
bool shouldBalance = balancerConfig->isBalancerActive();
if (shouldBalance) {
- auto collStatus = grid.catalogManager(txn)->getCollection(txn, _manager->getns());
+ auto collStatus = grid.catalogClient(txn)->getCollection(txn, _manager->getns());
if (!collStatus.isOK()) {
warning() << "Auto-split for " << _manager->getns()
<< " failed to load collection metadata"
@@ -439,11 +439,11 @@ void Chunk::markAsJumbo(OperationContext* txn) const {
const string chunkName = ChunkType::genID(_manager->getns(), _min);
auto status =
- grid.catalogManager(txn)->updateConfigDocument(txn,
- ChunkType::ConfigNS,
- BSON(ChunkType::name(chunkName)),
- BSON("$set" << BSON(ChunkType::jumbo(true))),
- false);
+ grid.catalogClient(txn)->updateConfigDocument(txn,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::name(chunkName)),
+ BSON("$set" << BSON(ChunkType::jumbo(true))),
+ false);
if (!status.isOK()) {
warning() << "couldn't set jumbo for chunk: " << chunkName << causedBy(status.getStatus());
}
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index d1b1545310a..0dcad211c7e 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -47,7 +47,7 @@
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk.h"
#include "mongo/s/chunk_diff.h"
@@ -262,7 +262,7 @@ bool ChunkManager::_load(OperationContext* txn,
repl::OpTime opTime;
std::vector<ChunkType> chunks;
- uassertStatusOK(grid.catalogManager(txn)->getChunks(
+ uassertStatusOK(grid.catalogClient(txn)->getChunks(
txn, diffQuery.query, diffQuery.sort, boost::none, &chunks, &opTime));
invariant(opTime >= _configOpTime);
@@ -427,8 +427,8 @@ Status ChunkManager::createFirstChunks(OperationContext* txn,
chunk.setShard(shardIds[i % shardIds.size()]);
chunk.setVersion(version);
- Status status = grid.catalogManager(txn)->insertConfigDocument(
- txn, ChunkType::ConfigNS, chunk.toBSON());
+ Status status =
+ grid.catalogClient(txn)->insertConfigDocument(txn, ChunkType::ConfigNS, chunk.toBSON());
if (!status.isOK()) {
const string errMsg = str::stream() << "Creating first chunks failed: "
<< status.reason();
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 11f0e7e7c2c..34307cd1fe2 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -38,8 +38,7 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/connection_string.h"
#include "mongo/client/replica_set_monitor.h"
-#include "mongo/s/catalog/catalog_manager.h"
-#include "mongo/s/catalog/type_config_version.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_connection.h"
@@ -200,7 +199,7 @@ ShardRegistryData::ShardRegistryData(OperationContext* txn, ShardFactory* shardF
}
void ShardRegistryData::_init(OperationContext* txn, ShardFactory* shardFactory) {
- auto shardsStatus = grid.catalogManager(txn)->getAllShards(txn);
+ auto shardsStatus = grid.catalogClient(txn)->getAllShards(txn);
if (!shardsStatus.isOK()) {
uasserted(shardsStatus.getStatus().code(),
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index df6605b5e73..e65987485e9 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -105,7 +105,7 @@ public:
private:
/**
- * Reads shards docs from the catalog manager and fills in maps.
+ * Reads shards docs from the catalog client and fills in maps.
*/
void _init(OperationContext* txn, ShardFactory* factory);
diff --git a/src/mongo/s/client/sharding_network_connection_hook.h b/src/mongo/s/client/sharding_network_connection_hook.h
index 64132eabc41..4f1934e5075 100644
--- a/src/mongo/s/client/sharding_network_connection_hook.h
+++ b/src/mongo/s/client/sharding_network_connection_hook.h
@@ -34,8 +34,8 @@ namespace mongo {
/**
* An implementation of NetworkConnectionHook for handling sharding-specific operations such
- * as sending sharding initialization information to shards and indicating up the call stack that
- * swapping the active catalog manager is needed during upgrade to CSRS.
+ * as sending sharding initialization information to shards and maintaining this process' notion of
+ * the config server optime.
*/
class ShardingNetworkConnectionHook final : public executor::NetworkConnectionHook {
public:
@@ -43,9 +43,8 @@ public:
virtual ~ShardingNetworkConnectionHook() = default;
/**
- * Looks for the presence of a configsvr field in the ismaster response. If no such field
- * exits, does nothing and returns Status::OK(). If the field is present, asks the grid
- * whether swapping catalog managers is needed and returns its response.
+ * Checks that the given host is valid to be used in this sharded cluster, based on its
+ * isMaster response.
*/
Status validateHost(const HostAndPort& remoteHost,
const executor::RemoteCommandResponse& isMasterReply) override;
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index c0df531ec9f..d129ba9853d 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -35,7 +35,7 @@
#include "mongo/client/dbclient_rs.h"
#include "mongo/db/namespace_string.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_connection.h"
diff --git a/src/mongo/s/cluster_write.cpp b/src/mongo/s/cluster_write.cpp
index 9f999279db0..4714394e7e3 100644
--- a/src/mongo/s/cluster_write.cpp
+++ b/src/mongo/s/cluster_write.cpp
@@ -39,7 +39,7 @@
#include "mongo/db/write_concern_options.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/chunk_manager_targeter.h"
#include "mongo/s/client/dbclient_multi_command.h"
@@ -253,7 +253,7 @@ void ClusterWriter::write(OperationContext* txn,
request = requestWithWriteConcern.get();
}
- grid.catalogManager(txn)->writeConfigServerDirect(txn, *request, response);
+ grid.catalogClient(txn)->writeConfigServerDirect(txn, *request, response);
} else {
TargeterStats targeterStats;
diff --git a/src/mongo/s/commands/cluster_add_shard_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
index 9d6cb7629ff..40e861e6b6b 100644
--- a/src/mongo/s/commands/cluster_add_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
@@ -35,7 +35,7 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/audit.h"
#include "mongo/db/commands.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
index 49a07183f1e..ff11011a59d 100644
--- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
+++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
@@ -40,7 +40,7 @@
#include "mongo/db/client_basic.h"
#include "mongo/db/commands.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/util/log.h"
@@ -105,7 +105,7 @@ public:
return false;
}
- Status status = grid.catalogManager(txn)->enableSharding(txn, dbname);
+ Status status = grid.catalogClient(txn)->enableSharding(txn, dbname);
if (status.isOK()) {
audit::logEnableSharding(ClientBasic::getCurrent(), dbname);
}
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 0ad31d0c65d..b9fcc41af54 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -35,7 +35,7 @@
#include "mongo/client/read_preference.h"
#include "mongo/client/remote_command_targeter.h"
#include "mongo/db/commands.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
@@ -163,8 +163,8 @@ public:
}
// Get information for config and admin dbs from the config servers.
- auto catalogManager = grid.catalogManager(txn);
- auto appendStatus = catalogManager->appendInfoForConfigServerDatabases(txn, &dbListBuilder);
+ auto catalogClient = grid.catalogClient(txn);
+ auto appendStatus = catalogClient->appendInfoForConfigServerDatabases(txn, &dbListBuilder);
if (!appendStatus.isOK()) {
return Command::appendCommandStatus(result, appendStatus);
}
diff --git a/src/mongo/s/commands/cluster_list_shards_cmd.cpp b/src/mongo/s/commands/cluster_list_shards_cmd.cpp
index de6d1ec82b9..9deee999e12 100644
--- a/src/mongo/s/commands/cluster_list_shards_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_shards_cmd.cpp
@@ -32,7 +32,7 @@
#include "mongo/client/connpool.h"
#include "mongo/db/commands.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/grid.h"
@@ -74,7 +74,7 @@ public:
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- auto shardsStatus = grid.catalogManager(txn)->getAllShards(txn);
+ auto shardsStatus = grid.catalogClient(txn)->getAllShards(txn);
if (!shardsStatus.isOK()) {
return appendCommandStatus(result, shardsStatus.getStatus());
}
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index cd4e887a979..b69b5ea4add 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -41,8 +41,8 @@
#include "mongo/db/commands/mr.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/dist_lock_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
@@ -469,7 +469,7 @@ public:
BSONObj sortKey = BSON("_id" << 1);
ShardKeyPattern sortKeyPattern(sortKey);
- Status status = grid.catalogManager(txn)->shardCollection(
+ Status status = grid.catalogClient(txn)->shardCollection(
txn, outputCollNss.ns(), sortKeyPattern, true, sortedSplitPts, outShardIds);
if (!status.isOK()) {
return appendCommandStatus(result, status);
@@ -484,7 +484,7 @@ public:
map<BSONObj, int> chunkSizes;
{
// Take distributed lock to prevent split / migration.
- auto scopedDistLock = grid.catalogManager(txn)->distLock(
+ auto scopedDistLock = grid.catalogClient(txn)->distLock(
txn, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout);
if (!scopedDistLock.isOK()) {
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index be33df99fc5..63969303637 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -36,7 +36,7 @@
#include "mongo/db/field_parser.h"
#include "mongo/db/namespace_string.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index bc215790b7c..3ca7a66bb68 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -44,7 +44,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/commands/sharded_command_processing.h"
#include "mongo/s/config.h"
@@ -151,7 +151,7 @@ public:
<< " to: " << toShard->toString();
string whyMessage(str::stream() << "Moving primary shard of " << dbname);
- auto scopedDistLock = grid.catalogManager(txn)->distLock(
+ auto scopedDistLock = grid.catalogClient(txn)->distLock(
txn, dbname + "-movePrimary", whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
@@ -165,8 +165,8 @@ public:
BSONObj moveStartDetails =
_buildMoveEntry(dbname, fromShard->toString(), toShard->toString(), shardedColls);
- auto catalogManager = grid.catalogManager(txn);
- catalogManager->logChange(txn, "movePrimary.start", dbname, moveStartDetails);
+ auto catalogClient = grid.catalogClient(txn);
+ catalogClient->logChange(txn, "movePrimary.start", dbname, moveStartDetails);
BSONArrayBuilder barr;
barr.append(shardedColls);
@@ -174,8 +174,7 @@ public:
ScopedDbConnection toconn(toShard->getConnString());
{
- // Make sure the target node is sharding aware so that it can detect catalog manager
- // swaps.
+ // Make sure the target node is sharding aware.
auto ssvRequest = SetShardVersionRequest::makeForInitNoPersist(
grid.shardRegistry()->getConfigServerConnectionString(),
toShard->getId(),
@@ -290,7 +289,7 @@ public:
BSONObj moveFinishDetails =
_buildMoveEntry(dbname, oldPrimary, toShard->toString(), shardedColls);
- catalogManager->logChange(txn, "movePrimary", dbname, moveFinishDetails);
+ catalogClient->logChange(txn, "movePrimary", dbname, moveFinishDetails);
return true;
}
diff --git a/src/mongo/s/commands/cluster_netstat_cmd.cpp b/src/mongo/s/commands/cluster_netstat_cmd.cpp
index 56db7cc94d8..fd5a2b2e87d 100644
--- a/src/mongo/s/commands/cluster_netstat_cmd.cpp
+++ b/src/mongo/s/commands/cluster_netstat_cmd.cpp
@@ -29,7 +29,7 @@
#include "mongo/platform/basic.h"
#include "mongo/db/commands.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
index 4443ad6f2e5..a82bba5e38e 100644
--- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
@@ -36,7 +36,7 @@
#include "mongo/client/connpool.h"
#include "mongo/db/commands.h"
#include "mongo/db/operation_context.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_registry.h"
@@ -95,15 +95,15 @@ public:
return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
}
- auto catalogManager = grid.catalogManager(txn);
+ auto catalogClient = grid.catalogClient(txn);
StatusWith<ShardDrainingStatus> removeShardResult =
- catalogManager->removeShard(txn, s->getId());
+ catalogClient->removeShard(txn, s->getId());
if (!removeShardResult.isOK()) {
return appendCommandStatus(result, removeShardResult.getStatus());
}
vector<string> databases;
- Status status = catalogManager->getDatabasesForShard(txn, s->getId(), &databases);
+ Status status = catalogClient->getDatabasesForShard(txn, s->getId(), &databases);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -136,12 +136,12 @@ public:
break;
case ShardDrainingStatus::ONGOING: {
vector<ChunkType> chunks;
- Status status = catalogManager->getChunks(txn,
- BSON(ChunkType::shard(s->getId())),
- BSONObj(),
- boost::none, // return all
- &chunks,
- nullptr);
+ Status status = catalogClient->getChunks(txn,
+ BSON(ChunkType::shard(s->getId())),
+ BSONObj(),
+ boost::none, // return all
+ &chunks,
+ nullptr);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index faf63593d58..7d36ae20ee5 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -47,7 +47,7 @@
#include "mongo/s/balancer/balancer.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_write.h"
@@ -404,7 +404,7 @@ public:
audit::logShardCollection(
ClientBasic::getCurrent(), nss.ns(), proposedKey, careAboutUnique);
- Status status = grid.catalogManager(txn)->shardCollection(
+ Status status = grid.catalogClient(txn)->shardCollection(
txn, nss.ns(), proposedShardKey, careAboutUnique, initSplits, {});
if (!status.isOK()) {
return appendCommandStatus(result, status);
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index a282cd57d1d..fd425be5691 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -42,7 +42,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/jsobj.h"
#include "mongo/rpc/write_concern_error_detail.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/commands/sharded_command_processing.h"
@@ -85,7 +85,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- return grid.catalogManager(txn)->runUserManagementWriteCommand(
+ return grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
}
@@ -129,7 +129,7 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -179,7 +179,7 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -220,7 +220,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -268,7 +268,7 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -316,7 +316,7 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -361,7 +361,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- return grid.catalogManager(txn)->runUserManagementReadCommand(txn, dbname, cmdObj, &result);
+ return grid.catalogClient(txn)->runUserManagementReadCommand(txn, dbname, cmdObj, &result);
}
} cmdUsersInfo;
@@ -395,7 +395,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- return grid.catalogManager(txn)->runUserManagementWriteCommand(
+ return grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
}
@@ -430,7 +430,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -471,7 +471,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -512,7 +512,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -553,7 +553,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -594,7 +594,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -638,7 +638,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -683,7 +683,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = grid.catalogManager(txn)->runUserManagementWriteCommand(
+ const bool ok = grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
@@ -728,7 +728,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- return grid.catalogManager(txn)->runUserManagementReadCommand(txn, dbname, cmdObj, &result);
+ return grid.catalogClient(txn)->runUserManagementReadCommand(txn, dbname, cmdObj, &result);
}
} cmdRolesInfo;
@@ -817,7 +817,7 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- return grid.catalogManager(txn)->runUserManagementWriteCommand(
+ return grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result);
}
@@ -916,7 +916,7 @@ public:
string& errmsg,
BSONObjBuilder& result) {
// Run the authSchemaUpgrade command on the config servers
- if (!grid.catalogManager(txn)->runUserManagementWriteCommand(
+ if (!grid.catalogClient(txn)->runUserManagementWriteCommand(
txn, getName(), dbname, cmdObj, &result)) {
return false;
}
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 4e7993a97ee..f3115964c1b 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -45,7 +45,7 @@
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
@@ -561,7 +561,7 @@ public:
return passthrough(txn, db.get(), cmdObj, result);
}
- uassertStatusOK(grid.catalogManager(txn)->dropCollection(txn, fullns));
+ uassertStatusOK(grid.catalogClient(txn)->dropCollection(txn, fullns));
// Force a full reload next time the just dropped namespace is accessed
db->invalidateNs(fullns.ns());
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 14abfd14398..5773d7a3995 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -39,7 +39,7 @@
#include "mongo/db/write_concern.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_database.h"
@@ -125,7 +125,7 @@ void CollectionInfo::save(OperationContext* txn, const string& ns) {
coll.setUpdatedAt(Date_t::now());
}
- uassertStatusOK(grid.catalogManager(txn)->updateCollection(txn, ns, coll));
+ uassertStatusOK(grid.catalogClient(txn)->updateCollection(txn, ns, coll));
_dirty = false;
}
@@ -311,12 +311,12 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
vector<ChunkType> newestChunk;
if (oldVersion.isSet() && !forceReload) {
uassertStatusOK(
- grid.catalogManager(txn)->getChunks(txn,
- BSON(ChunkType::ns(ns)),
- BSON(ChunkType::DEPRECATED_lastmod() << -1),
- 1,
- &newestChunk,
- nullptr));
+ grid.catalogClient(txn)->getChunks(txn,
+ BSON(ChunkType::ns(ns)),
+ BSON(ChunkType::DEPRECATED_lastmod() << -1),
+ 1,
+ &newestChunk,
+ nullptr));
if (!newestChunk.empty()) {
invariant(newestChunk.size() == 1);
@@ -442,7 +442,7 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
return true;
}
- auto status = grid.catalogManager(txn)->getDatabase(txn, _name);
+ auto status = grid.catalogClient(txn)->getDatabase(txn, _name);
if (status == ErrorCodes::NamespaceNotFound) {
return false;
}
@@ -462,7 +462,7 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
// Load all collections
vector<CollectionType> collections;
repl::OpTime configOpTimeWhenLoadingColl;
- uassertStatusOK(grid.catalogManager(txn)->getCollections(
+ uassertStatusOK(grid.catalogClient(txn)->getCollections(
txn, &_name, &collections, &configOpTimeWhenLoadingColl));
int numCollsErased = 0;
@@ -501,7 +501,7 @@ void DBConfig::_save(OperationContext* txn, bool db, bool coll) {
dbt.setPrimary(_primaryId);
dbt.setSharded(_shardingEnabled);
- uassertStatusOK(grid.catalogManager(txn)->updateDatabase(txn, _name, dbt));
+ uassertStatusOK(grid.catalogClient(txn)->updateDatabase(txn, _name, dbt));
}
if (coll) {
@@ -542,12 +542,12 @@ bool DBConfig::dropDatabase(OperationContext* txn, string& errmsg) {
*/
log() << "DBConfig::dropDatabase: " << _name;
- grid.catalogManager(txn)->logChange(txn, "dropDatabase.start", _name, BSONObj());
+ grid.catalogClient(txn)->logChange(txn, "dropDatabase.start", _name, BSONObj());
// 1
grid.catalogCache()->invalidate(_name);
- Status result = grid.catalogManager(txn)->removeConfigDocuments(
+ Status result = grid.catalogClient(txn)->removeConfigDocuments(
txn, DatabaseType::ConfigNS, BSON(DatabaseType::name(_name)));
if (!result.isOK()) {
errmsg = result.reason();
@@ -617,7 +617,7 @@ bool DBConfig::dropDatabase(OperationContext* txn, string& errmsg) {
LOG(1) << "\t dropped primary db for: " << _name;
- grid.catalogManager(txn)->logChange(txn, "dropDatabase", _name, BSONObj());
+ grid.catalogClient(txn)->logChange(txn, "dropDatabase", _name, BSONObj());
return true;
}
@@ -650,7 +650,7 @@ bool DBConfig::_dropShardedCollections(OperationContext* txn,
i->second.getCM()->getAllShardIds(&shardIds);
- uassertStatusOK(grid.catalogManager(txn)->dropCollection(txn, NamespaceString(i->first)));
+ uassertStatusOK(grid.catalogClient(txn)->dropCollection(txn, NamespaceString(i->first)));
// We should warn, but it's not a fatal error if someone else reloaded the db/coll as
// unsharded in the meantime
@@ -721,7 +721,7 @@ void ConfigServer::replicaSetChangeConfigServerUpdateHook(const string& setName,
return;
}
- auto status = grid.catalogManager(txn.get())->updateConfigDocument(
+ auto status = grid.catalogClient(txn.get())->updateConfigDocument(
txn.get(),
ShardType::ConfigNS,
BSON(ShardType::name(s->getId())),
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index 0f102def839..d4a6a827af5 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -37,7 +37,7 @@
#include "mongo/executor/task_executor_pool.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/query/cluster_cursor_manager.h"
@@ -57,14 +57,14 @@ Grid* Grid::get(OperationContext* operationContext) {
return &grid;
}
-void Grid::init(std::unique_ptr<CatalogManager> catalogManager,
+void Grid::init(std::unique_ptr<ShardingCatalogClient> catalogClient,
std::unique_ptr<CatalogCache> catalogCache,
std::unique_ptr<ShardRegistry> shardRegistry,
std::unique_ptr<ClusterCursorManager> cursorManager,
std::unique_ptr<BalancerConfiguration> balancerConfig,
std::unique_ptr<executor::TaskExecutorPool> executorPool,
executor::NetworkInterface* network) {
- invariant(!_catalogManager);
+ invariant(!_catalogClient);
invariant(!_catalogCache);
invariant(!_shardRegistry);
invariant(!_cursorManager);
@@ -72,7 +72,7 @@ void Grid::init(std::unique_ptr<CatalogManager> catalogManager,
invariant(!_executorPool);
invariant(!_network);
- _catalogManager = std::move(catalogManager);
+ _catalogClient = std::move(catalogClient);
_catalogCache = std::move(catalogCache);
_shardRegistry = std::move(shardRegistry);
_cursorManager = std::move(cursorManager);
@@ -108,7 +108,7 @@ void Grid::advanceConfigOpTime(repl::OpTime opTime) {
}
void Grid::clearForUnitTests() {
- _catalogManager.reset();
+ _catalogClient.reset();
_catalogCache.reset();
_shardRegistry.reset();
_cursorManager.reset();
diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h
index 02d5798d80d..4cc1cf71970 100644
--- a/src/mongo/s/grid.h
+++ b/src/mongo/s/grid.h
@@ -37,7 +37,7 @@ namespace mongo {
class BalancerConfiguration;
class CatalogCache;
-class CatalogManager;
+class ShardingCatalogClient;
class ClusterCursorManager;
class OperationContext;
class ShardRegistry;
@@ -68,7 +68,7 @@ public:
* NOTE: Unit-tests are allowed to call it more than once, provided they reset the object's
* state using clearForUnitTests.
*/
- void init(std::unique_ptr<CatalogManager> catalogManager,
+ void init(std::unique_ptr<ShardingCatalogClient> catalogClient,
std::unique_ptr<CatalogCache> catalogCache,
std::unique_ptr<ShardRegistry> shardRegistry,
std::unique_ptr<ClusterCursorManager> cursorManager,
@@ -91,11 +91,11 @@ public:
void setAllowLocalHost(bool allow);
/**
- * Returns a pointer to a CatalogManager to use for accessing catalog data stored on the config
- * servers.
+ * Returns a pointer to a ShardingCatalogClient to use for accessing catalog data stored on the
+ * config servers.
*/
- CatalogManager* catalogManager(OperationContext* txn) {
- return _catalogManager.get();
+ ShardingCatalogClient* catalogClient(OperationContext* txn) {
+ return _catalogClient.get();
}
CatalogCache* catalogCache() const {
@@ -148,7 +148,7 @@ public:
void clearForUnitTests();
private:
- std::unique_ptr<CatalogManager> _catalogManager;
+ std::unique_ptr<ShardingCatalogClient> _catalogClient;
std::unique_ptr<CatalogCache> _catalogCache;
std::unique_ptr<ShardRegistry> _shardRegistry;
std::unique_ptr<ClusterCursorManager> _cursorManager;
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 7662de8fba7..4fc1248f778 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -62,7 +62,7 @@
#include "mongo/platform/process_id.h"
#include "mongo/s/balancer/balancer.h"
#include "mongo/s/balancer/balancer_configuration.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
@@ -139,7 +139,7 @@ static void cleanupTask() {
auto cursorManager = grid.getCursorManager();
cursorManager->shutdown();
grid.getExecutorPool()->shutdownAndJoin();
- grid.catalogManager(txn)->shutDown(txn);
+ grid.catalogClient(txn)->shutDown(txn);
}
audit::logShutdown(ClientBasic::getCurrent());
@@ -314,8 +314,8 @@ static Status initializeSharding(OperationContext* txn) {
return status;
}
- auto catalogManager = grid.catalogManager(txn);
- status = catalogManager->initConfigVersion(txn);
+ auto catalogClient = grid.catalogClient(txn);
+ status = catalogClient->initConfigVersion(txn);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp
index 4e6ff7f6688..6138842505c 100644
--- a/src/mongo/s/sharding_initialization.cpp
+++ b/src/mongo/s/sharding_initialization.cpp
@@ -48,9 +48,9 @@
#include "mongo/rpc/metadata/metadata_hook.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/dist_lock_catalog_impl.h"
#include "mongo/s/catalog/replset/replset_dist_lock_manager.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/client/sharding_network_connection_hook.h"
@@ -77,9 +77,9 @@ std::unique_ptr<ThreadPoolTaskExecutor> makeTaskExecutor(std::unique_ptr<Network
stdx::make_unique<NetworkInterfaceThreadPool>(netPtr), std::move(net));
}
-std::unique_ptr<CatalogManager> makeCatalogManager(ServiceContext* service,
- ShardRegistry* shardRegistry,
- const HostAndPort& thisHost) {
+std::unique_ptr<ShardingCatalogClient> makeCatalogClient(ServiceContext* service,
+ ShardRegistry* shardRegistry,
+ const HostAndPort& thisHost) {
std::unique_ptr<SecureRandom> rng(SecureRandom::create());
std::string distLockProcessId = str::stream()
<< thisHost.toString() << ':'
@@ -94,7 +94,7 @@ std::unique_ptr<CatalogManager> makeCatalogManager(ServiceContext* service,
ReplSetDistLockManager::kDistLockPingInterval,
ReplSetDistLockManager::kDistLockExpirationTime);
- return stdx::make_unique<CatalogManagerReplicaSet>(
+ return stdx::make_unique<ShardingCatalogClientImpl>(
std::move(distLockManager),
makeTaskExecutor(
executor::makeNetworkInterface("NetworkInterfaceASIO-AddShard-TaskExecutor")));
@@ -145,13 +145,13 @@ Status initializeGlobalShardingState(const ConnectionString& configCS,
auto shardRegistry(stdx::make_unique<ShardRegistry>(std::move(shardFactory), configCS));
- auto catalogManager = makeCatalogManager(getGlobalServiceContext(),
- shardRegistry.get(),
- HostAndPort(getHostName(), serverGlobalParams.port));
+ auto catalogClient = makeCatalogClient(getGlobalServiceContext(),
+ shardRegistry.get(),
+ HostAndPort(getHostName(), serverGlobalParams.port));
- auto rawCatalogManager = catalogManager.get();
+ auto rawCatalogClient = catalogClient.get();
grid.init(
- std::move(catalogManager),
+ std::move(catalogClient),
stdx::make_unique<CatalogCache>(),
std::move(shardRegistry),
stdx::make_unique<ClusterCursorManager>(getGlobalServiceContext()->getPreciseClockSource()),
@@ -159,7 +159,7 @@ Status initializeGlobalShardingState(const ConnectionString& configCS,
std::move(executorPool),
networkPtr);
- auto status = rawCatalogManager->startup();
+ auto status = rawCatalogClient->startup();
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/s/sharding_initialization.h b/src/mongo/s/sharding_initialization.h
index 962ff9c1789..b7cb2e174f0 100644
--- a/src/mongo/s/sharding_initialization.h
+++ b/src/mongo/s/sharding_initialization.h
@@ -48,7 +48,7 @@ using ShardingEgressMetadataHookBuilder =
/**
* Takes in the connection string for reaching the config servers and initializes the global
- * CatalogManager, ShardingRegistry, and grid objects.
+ * ShardingCatalogClient, ShardingCatalogManager, ShardRegistry, and Grid objects.
*/
Status initializeGlobalShardingState(const ConnectionString& configCS,
std::unique_ptr<ShardFactory> shardFactory,
diff --git a/src/mongo/s/sharding_raii.cpp b/src/mongo/s/sharding_raii.cpp
index caacb4de8a8..a948893d36e 100644
--- a/src/mongo/s/sharding_raii.cpp
+++ b/src/mongo/s/sharding_raii.cpp
@@ -32,7 +32,7 @@
#include "mongo/base/status_with.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/grid.h"
@@ -67,7 +67,7 @@ StatusWith<ScopedShardDatabase> ScopedShardDatabase::getOrCreate(OperationContex
if (dbStatus == ErrorCodes::NamespaceNotFound) {
auto statusCreateDb =
- Grid::get(txn)->catalogManager(txn)->createDatabase(txn, dbName.toString());
+ Grid::get(txn)->catalogClient(txn)->createDatabase(txn, dbName.toString());
if (statusCreateDb.isOK() || statusCreateDb == ErrorCodes::NamespaceExists) {
return getExisting(txn, dbName);
}
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp
index 60347c00b03..d50b2e75115 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_test_fixture.cpp
@@ -50,7 +50,7 @@
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
+#include "mongo/s/catalog/replset/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_changelog.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_shard.h"
@@ -121,13 +121,14 @@ void ShardingTestFixture::setUp() {
auto uniqueDistLockManager = stdx::make_unique<DistLockManagerMock>();
_distLockManager = uniqueDistLockManager.get();
- std::unique_ptr<CatalogManagerReplicaSet> cm(stdx::make_unique<CatalogManagerReplicaSet>(
- std::move(uniqueDistLockManager), std::move(specialExec)));
- _catalogManagerRS = cm.get();
- cm->startup();
+ std::unique_ptr<ShardingCatalogClientImpl> catalogClient(
+ stdx::make_unique<ShardingCatalogClientImpl>(std::move(uniqueDistLockManager),
+ std::move(specialExec)));
+ _catalogClient = catalogClient.get();
+ catalogClient->startup();
ConnectionString configCS = ConnectionString::forReplicaSet(
- "CatalogManagerReplSetTest", {HostAndPort{"TestHost1"}, HostAndPort{"TestHost2"}});
+ "configRS", {HostAndPort{"TestHost1"}, HostAndPort{"TestHost2"}});
auto targeterFactory(stdx::make_unique<RemoteCommandTargeterFactoryMock>());
auto targeterFactoryPtr = targeterFactory.get();
@@ -169,7 +170,7 @@ void ShardingTestFixture::setUp() {
// For now initialize the global grid object. All sharding objects will be accessible from there
// until we get rid of it.
- grid.init(std::move(cm),
+ grid.init(std::move(catalogClient),
stdx::make_unique<CatalogCache>(),
std::move(shardRegistry),
stdx::make_unique<ClusterCursorManager>(_service->getPreciseClockSource()),
@@ -180,7 +181,7 @@ void ShardingTestFixture::setUp() {
void ShardingTestFixture::tearDown() {
grid.getExecutorPool()->shutdownAndJoin();
- grid.catalogManager(_opCtx.get())->shutDown(_opCtx.get());
+ grid.catalogClient(_opCtx.get())->shutDown(_opCtx.get());
grid.clearForUnitTests();
_opCtx.reset();
@@ -195,12 +196,12 @@ void ShardingTestFixture::shutdownExecutor() {
}
}
-CatalogManager* ShardingTestFixture::catalogManager() const {
- return grid.catalogManager(_opCtx.get());
+ShardingCatalogClient* ShardingTestFixture::catalogClient() const {
+ return grid.catalogClient(_opCtx.get());
}
-CatalogManagerReplicaSet* ShardingTestFixture::getCatalogManagerReplicaSet() const {
- return _catalogManagerRS;
+ShardingCatalogClientImpl* ShardingTestFixture::getCatalogClient() const {
+ return _catalogClient;
}
ShardRegistry* ShardingTestFixture::shardRegistry() const {
diff --git a/src/mongo/s/sharding_test_fixture.h b/src/mongo/s/sharding_test_fixture.h
index 0ed720de482..f1129c06845 100644
--- a/src/mongo/s/sharding_test_fixture.h
+++ b/src/mongo/s/sharding_test_fixture.h
@@ -39,8 +39,8 @@ namespace mongo {
class BSONObj;
class CatalogCache;
-class CatalogManager;
-class CatalogManagerReplicaSet;
+class ShardingCatalogClient;
+class ShardingCatalogClientImpl;
struct ChunkVersion;
class CollectionType;
class DistLockManagerMock;
@@ -59,7 +59,8 @@ class TaskExecutor;
} // namespace executor
/**
- * Sets up the mocked out objects for testing the replica-set backed catalog manager.
+ * Sets up the mocked out objects for testing the replica-set backed catalog manager and catalog
+ * client.
*/
class ShardingTestFixture : public mongo::unittest::Test {
public:
@@ -75,12 +76,12 @@ protected:
return _networkTestEnv->launchAsync(std::forward<Lambda>(func));
}
- CatalogManager* catalogManager() const;
+ ShardingCatalogClient* catalogClient() const;
/**
- * Prefer catalogManager() method over this as much as possible.
+ * Prefer catalogClient() method over this as much as possible.
*/
- CatalogManagerReplicaSet* getCatalogManagerReplicaSet() const;
+ ShardingCatalogClientImpl* getCatalogClient() const;
ShardRegistry* shardRegistry() const;
@@ -213,7 +214,7 @@ private:
std::unique_ptr<executor::NetworkTestEnv> _networkTestEnv;
std::unique_ptr<executor::NetworkTestEnv> _addShardNetworkTestEnv;
DistLockManagerMock* _distLockManager = nullptr;
- CatalogManagerReplicaSet* _catalogManagerRS = nullptr;
+ ShardingCatalogClientImpl* _catalogClient = nullptr;
};
} // namespace mongo
diff --git a/src/mongo/s/sharding_uptime_reporter.cpp b/src/mongo/s/sharding_uptime_reporter.cpp
index e73ebfe64cf..8ee3cca9472 100644
--- a/src/mongo/s/sharding_uptime_reporter.cpp
+++ b/src/mongo/s/sharding_uptime_reporter.cpp
@@ -34,7 +34,7 @@
#include "mongo/db/client.h"
#include "mongo/db/server_options.h"
-#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_mongos.h"
#include "mongo/s/grid.h"
#include "mongo/util/exit.h"
@@ -87,7 +87,7 @@ void ShardingUptimeReporter::reportStatus(OperationContext* txn, bool isBalancer
mType.setMongoVersion(versionString);
try {
- Grid::get(txn)->catalogManager(txn)->updateConfigDocument(
+ Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
txn,
MongosType::ConfigNS,
BSON(MongosType::name(getInstanceId())),