diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-07-07 13:42:56 -0400 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-07-17 09:36:14 -0400 |
commit | 24e418966985b9bbaaa497f92dfbc69a2420e649 (patch) | |
tree | 044847a0caf5bb477b81f8e4dd9d42781aae36c9 /src/mongo/db | |
parent | 66d65846c739326663428aab92eda5c4a8fbad47 (diff) | |
download | mongo-24e418966985b9bbaaa497f92dfbc69a2420e649.tar.gz |
SERVER-30053 Get rid of the ShardingCatalogManager interface
As part of this change also moves ShardingCatalogManager to be a
decoration on ServiceContext and decouples the dependency between the
'connPoolStats' command and ShardingCatalogManager.
Diffstat (limited to 'src/mongo/db')
20 files changed, 66 insertions, 102 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript index 864c8ead925..2588c37deb2 100644 --- a/src/mongo/db/SConscript +++ b/src/mongo/db/SConscript @@ -779,10 +779,11 @@ env.Library( ], LIBDEPS=[ "$BUILD_DIR/mongo/db/bson/dotted_path_support", - '$BUILD_DIR/mongo/db/logical_time_metadata_hook', + "$BUILD_DIR/mongo/db/logical_time_metadata_hook", + "$BUILD_DIR/mongo/db/storage/mmap_v1/file_allocator", + "$BUILD_DIR/mongo/db/ttl_collection_cache", "$BUILD_DIR/mongo/executor/network_interface_factory", "$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl", - "$BUILD_DIR/mongo/s/catalog/sharding_catalog_manager_impl", "$BUILD_DIR/mongo/s/client/sharding_connection_hook", "$BUILD_DIR/mongo/s/coreshard", "$BUILD_DIR/mongo/s/shard_id", @@ -790,9 +791,7 @@ env.Library( "$BUILD_DIR/mongo/util/clock_sources", "$BUILD_DIR/mongo/util/elapsed_tracker", "$BUILD_DIR/mongo/util/net/network", - "$BUILD_DIR/mongo/db/storage/mmap_v1/file_allocator", "$BUILD_DIR/third_party/shim_snappy", - '$BUILD_DIR/mongo/db/ttl_collection_cache', "auth/authmongod", "authz_manager_external_state_factory_d", "background", @@ -802,24 +801,24 @@ env.Library( "catalog/index_key_validate", "clientcursor", "cloner", - "commands/killcursors_common", - "commands/dcommands", "collection_index_usage_tracker", + "commands/dcommands", + "commands/killcursors_common", "common", "concurrency/lock_manager", "concurrency/write_conflict_exception", "curop", "curop_metrics", + "db_raii", "dbdirectclient", "dbhelpers", - "db_raii", "diag_log", "exec/exec", "exec/working_set", - "fts/ftsmongod", "ftdc/ftdc_mongod", - "index/index_descriptor", + "fts/ftsmongod", "index/index_access_methods", + "index/index_descriptor", "index_d", "introspect", "matcher/expressions_mongod_only", @@ -832,8 +831,8 @@ env.Library( "query/query", "repair_database", "repl/bgsync", - "repl/oplog_buffer_collection", "repl/oplog_buffer_blocking_queue", + "repl/oplog_buffer_collection", "repl/oplog_buffer_proxy", "repl/repl_coordinator_global", "repl/repl_coordinator_impl", @@ -845,8 +844,8 @@ env.Library( "repl/sync_tail", "repl/topology_coordinator_impl", "rw_concern_d", - "s/commands", "s/collection_metadata", + "s/commands", "s/sharding", "service_context_d", "startup_warnings_mongod", diff --git a/src/mongo/db/commands/conn_pool_stats.cpp b/src/mongo/db/commands/conn_pool_stats.cpp index 119712ce811..6eda96aa727 100644 --- a/src/mongo/db/commands/conn_pool_stats.cpp +++ b/src/mongo/db/commands/conn_pool_stats.cpp @@ -37,13 +37,9 @@ #include "mongo/db/commands.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_global.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/db/server_options.h" #include "mongo/executor/connection_pool_stats.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/task_executor_pool.h" -#include "mongo/s/catalog/sharding_catalog_manager.h" -#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" namespace mongo { @@ -84,18 +80,24 @@ public: result.appendNumber("numClientConnections", DBClientConnection::getNumConnections()); result.appendNumber("numAScopedConnections", AScopedConnection::getNumConnections()); - // Replication connections, if we have them. - auto const replCoord = repl::ReplicationCoordinator::get(opCtx); - if (replCoord && replCoord->isReplEnabled()) { - replCoord->appendConnectionStats(&stats); + // Replication connections, if we have any + { + auto const replCoord = repl::ReplicationCoordinator::get(opCtx); + if (replCoord && replCoord->isReplEnabled()) { + replCoord->appendConnectionStats(&stats); + } } - // Sharding connections, if we have any. - auto const grid = Grid::get(opCtx); - if (grid->getExecutorPool()) { - grid->getExecutorPool()->appendConnectionStats(&stats); - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { - grid->catalogManager()->appendConnectionStats(&stats); + // Sharding connections, if we have any + { + auto const grid = Grid::get(opCtx); + if (grid->getExecutorPool()) { + grid->getExecutorPool()->appendConnectionStats(&stats); + } + + auto const customConnPoolStatsFn = grid->getCustomConnectionPoolStatsFn(); + if (customConnPoolStatsFn) { + customConnPoolStatsFn(&stats); } } diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 5201792dd3a..b6710f91c58 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -118,8 +118,11 @@ #include "mongo/db/wire_version.h" #include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/platform/process_id.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/s/catalog/sharding_catalog_manager.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/sharding_initialization.h" @@ -672,7 +675,12 @@ ExitCode _initAndListen(int listenPort) { initializeGlobalShardingStateForMongod(startupOpCtx.get(), ConnectionString::forLocal(), kDistLockProcessIdForConfigServer)); + Balancer::create(startupOpCtx->getServiceContext()); + + ShardingCatalogManager::create( + startupOpCtx->getServiceContext(), + makeShardingTaskExecutor(executor::makeNetworkInterface("AddShard-TaskExecutor"))); } repl::getGlobalReplicationCoordinator()->startup(startupOpCtx.get()); diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript index 6e4fb1d466b..1828febd807 100644 --- a/src/mongo/db/repl/SConscript +++ b/src/mongo/db/repl/SConscript @@ -1476,6 +1476,7 @@ env.Library( '$BUILD_DIR/mongo/db/sessions', '$BUILD_DIR/mongo/db/stats/counters', '$BUILD_DIR/mongo/rpc/client_metadata', + '$BUILD_DIR/mongo/s/catalog/sharding_catalog_manager', 'bgsync', 'drop_pending_collection_reaper', 'oplog_buffer_collection', diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index bff3aa40f5e..57f19fe9751 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -743,7 +743,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook fassertStatusOK(40107, status); if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { - status = Grid::get(opCtx)->catalogManager()->initializeConfigDatabaseIfNeeded(opCtx); + status = ShardingCatalogManager::get(opCtx)->initializeConfigDatabaseIfNeeded(opCtx); if (!status.isOK() && status != ErrorCodes::AlreadyInitialized) { if (ErrorCodes::isShutdownError(status.code())) { // Don't fassert if we're mid-shutdown, let the shutdown happen gracefully. diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript index 32814079adc..0fb3ed0112b 100644 --- a/src/mongo/db/s/SConscript +++ b/src/mongo/db/s/SConscript @@ -113,12 +113,12 @@ env.Library( 'shard_metadata_util', 'sharding_task_executor', 'type_shard_identity', + #'$BUILD_DIR/mongo/db/catalog/catalog', # CYCLE + #'$BUILD_DIR/mongo/db/db_raii', # CYCLE #'$BUILD_DIR/mongo/db/dbhelpers', # CYCLE #'$BUILD_DIR/mongo/db/ops/write_ops', # CYCLE - #'$BUILD_DIR/mongo/s/catalog/sharding_catalog_manager_impl', # CYCLE #'$BUILD_DIR/mongo/db/query/query', # CYCLE - #'$BUILD_DIR/mongo/db/catalog/catalog', # CYCLE - #'$BUILD_DIR/mongo/db/db_raii', # CYCLE + #'$BUILD_DIR/mongo/s/catalog/sharding_catalog_manager', # CYCLE ], LIBDEPS_TAGS=[ # TODO(ADAM, 2017-01-06): See `CYCLE` tags above @@ -204,6 +204,7 @@ env.Library( '$BUILD_DIR/mongo/db/dbhelpers', '$BUILD_DIR/mongo/db/index_d', '$BUILD_DIR/mongo/db/repl/repl_coordinator_global', + '$BUILD_DIR/mongo/s/catalog/sharding_catalog_manager', '$BUILD_DIR/mongo/s/commands/shared_cluster_commands', '$BUILD_DIR/mongo/s/sharding_request_types', 'balancer', @@ -255,8 +256,8 @@ env.CppUnitTest( source=[ 'collection_metadata_test.cpp', 'collection_range_deleter_test.cpp', - 'metadata_manager_test.cpp', 'collection_sharding_state_test.cpp', + 'metadata_manager_test.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/client/remote_command_targeter_mock', @@ -264,8 +265,6 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/serveronly', '$BUILD_DIR/mongo/executor/network_test_env', '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture', - '$BUILD_DIR/mongo/s/catalog/sharding_catalog_mock', - '$BUILD_DIR/mongo/s/coreshard', '$BUILD_DIR/mongo/s/shard_server_test_fixture', ], ) diff --git a/src/mongo/db/s/collection_range_deleter_test.cpp b/src/mongo/db/s/collection_range_deleter_test.cpp index 90c91ac1418..14528c4267e 100644 --- a/src/mongo/db/s/collection_range_deleter_test.cpp +++ b/src/mongo/db/s/collection_range_deleter_test.cpp @@ -44,9 +44,6 @@ #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/s/catalog/dist_lock_catalog_impl.h" -#include "mongo/s/catalog/dist_lock_manager_mock.h" -#include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/chunk_version.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/sharding_mongod_test_fixture.h" @@ -68,13 +65,15 @@ public: using Deletion = CollectionRangeDeleter::Deletion; protected: - auto next(CollectionRangeDeleter& rangeDeleter, int maxToDelete) -> boost::optional<Date_t> { + boost::optional<Date_t> next(CollectionRangeDeleter& rangeDeleter, int maxToDelete) { return CollectionRangeDeleter::cleanUpNextRange( operationContext(), kNss, epoch(), maxToDelete, &rangeDeleter); } + std::shared_ptr<RemoteCommandTargeterMock> configTargeter() { return RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter()); } + OID const& epoch() { return _epoch; } @@ -83,21 +82,6 @@ private: void setUp() override; void tearDown() override; - std::unique_ptr<DistLockCatalog> makeDistLockCatalog(ShardRegistry* shardRegistry) override { - invariant(shardRegistry); - return stdx::make_unique<DistLockCatalogImpl>(shardRegistry); - } - - std::unique_ptr<DistLockManager> makeDistLockManager( - std::unique_ptr<DistLockCatalog> distLockCatalog) override { - return stdx::make_unique<DistLockManagerMock>(std::move(distLockCatalog)); - } - - std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient( - std::unique_ptr<DistLockManager> distLockManager) override { - return stdx::make_unique<ShardingCatalogClientMock>(std::move(distLockManager)); - } - OID _epoch; }; diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp index ee9848b08b9..c7f410eeda4 100644 --- a/src/mongo/db/s/collection_sharding_state.cpp +++ b/src/mongo/db/s/collection_sharding_state.cpp @@ -351,8 +351,7 @@ void CollectionShardingState::onDeleteOp(OperationContext* opCtx, uasserted(40302, "cannot delete config.version document while in --configsvr mode"); } else { // Throw out any cached information related to the cluster ID. - Grid::get(opCtx) - ->catalogManager() + ShardingCatalogManager::get(opCtx) ->discardCachedConfigDatabaseInitializationState(); ClusterIdentityLoader::get(opCtx)->discardCachedClusterId(); } @@ -389,8 +388,7 @@ void CollectionShardingState::onDropCollection(OperationContext* opCtx, uasserted(40303, "cannot drop config.version document while in --configsvr mode"); } else { // Throw out any cached information related to the cluster ID. - Grid::get(opCtx) - ->catalogManager() + ShardingCatalogManager::get(opCtx) ->discardCachedConfigDatabaseInitializationState(); ClusterIdentityLoader::get(opCtx)->discardCachedClusterId(); } diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp index e568ccb0a89..2399033d97d 100644 --- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp @@ -117,7 +117,7 @@ public: parsedRequest.hasMaxSize() ? parsedRequest.getMaxSize() : kMaxSizeMBDefault); - StatusWith<string> addShardResult = Grid::get(opCtx)->catalogManager()->addShard( + StatusWith<string> addShardResult = ShardingCatalogManager::get(opCtx)->addShard( opCtx, parsedRequest.hasName() ? &parsedRequest.getName() : nullptr, parsedRequest.getConnString(), diff --git a/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp index 50d0779737f..910401f2825 100644 --- a/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp +++ b/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp @@ -98,7 +98,7 @@ public: auto parsedRequest = uassertStatusOK(AddShardToZoneRequest::parseFromConfigCommand(cmdObj)); - uassertStatusOK(Grid::get(opCtx)->catalogManager()->addShardToZone( + uassertStatusOK(ShardingCatalogManager::get(opCtx)->addShardToZone( opCtx, parsedRequest.getShardName(), parsedRequest.getZoneName())); return true; diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp index 4581f2d9b9e..7be915ca4f4 100644 --- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp @@ -127,7 +127,7 @@ public: auto commitRequest = uassertStatusOK(CommitChunkMigrationRequest::createFromCommand(nss, cmdObj)); - StatusWith<BSONObj> response = Grid::get(opCtx)->catalogManager()->commitChunkMigration( + StatusWith<BSONObj> response = ShardingCatalogManager::get(opCtx)->commitChunkMigration( opCtx, nss, commitRequest.getMigratedChunk(), diff --git a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp index 5dbc9401d58..07d4af626f3 100644 --- a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp +++ b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp @@ -110,7 +110,7 @@ public: auto parsedRequest = uassertStatusOK(MergeChunkRequest::parseFromConfigCommand(cmdObj)); Status mergeChunkResult = - Grid::get(opCtx)->catalogManager()->commitChunkMerge(opCtx, + ShardingCatalogManager::get(opCtx)->commitChunkMerge(opCtx, parsedRequest.getNamespace(), parsedRequest.getEpoch(), parsedRequest.getChunkBoundaries(), diff --git a/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp index 7ca7c14ebbd..f40b1059734 100644 --- a/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp @@ -99,7 +99,7 @@ public: auto parsedRequest = uassertStatusOK(RemoveShardFromZoneRequest::parseFromConfigCommand(cmdObj)); - uassertStatusOK(Grid::get(opCtx)->catalogManager()->removeShardFromZone( + uassertStatusOK(ShardingCatalogManager::get(opCtx)->removeShardFromZone( opCtx, parsedRequest.getShardName(), parsedRequest.getZoneName())); return true; diff --git a/src/mongo/db/s/config/configsvr_set_feature_compatibility_version_command.cpp b/src/mongo/db/s/config/configsvr_set_feature_compatibility_version_command.cpp index 233f5678bca..a303e6df49d 100644 --- a/src/mongo/db/s/config/configsvr_set_feature_compatibility_version_command.cpp +++ b/src/mongo/db/s/config/configsvr_set_feature_compatibility_version_command.cpp @@ -110,7 +110,7 @@ public: } // Forward to all shards. - uassertStatusOK(Grid::get(opCtx)->catalogManager()->setFeatureCompatibilityVersionOnShards( + uassertStatusOK(ShardingCatalogManager::get(opCtx)->setFeatureCompatibilityVersionOnShards( opCtx, version)); // On success, set featureCompatibilityVersion on self. diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp index 44eb62c753b..c30f77f71b0 100644 --- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp @@ -174,7 +174,7 @@ public: IDLParserErrorContext("ConfigsvrShardCollection"), cmdObj); auto const catalogClient = Grid::get(opCtx)->catalogClient(); - auto const catalogManager = Grid::get(opCtx)->catalogManager(); + auto const catalogManager = ShardingCatalogManager::get(opCtx); auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); auto const catalogCache = Grid::get(opCtx)->catalogCache(); diff --git a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp index 10cc59c6d4c..23e243384dd 100644 --- a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp +++ b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp @@ -108,7 +108,7 @@ public: auto parsedRequest = uassertStatusOK(SplitChunkRequest::parseFromConfigCommand(cmdObj)); Status splitChunkResult = - Grid::get(opCtx)->catalogManager()->commitChunkSplit(opCtx, + ShardingCatalogManager::get(opCtx)->commitChunkSplit(opCtx, parsedRequest.getNamespace(), parsedRequest.getEpoch(), parsedRequest.getChunkRange(), diff --git a/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp b/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp index f41ce235507..88c6465b5a2 100644 --- a/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp +++ b/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp @@ -107,10 +107,10 @@ public: } if (parsedRequest.isRemove()) { - uassertStatusOK(Grid::get(opCtx)->catalogManager()->removeKeyRangeFromZone( + uassertStatusOK(ShardingCatalogManager::get(opCtx)->removeKeyRangeFromZone( opCtx, parsedRequest.getNS(), parsedRequest.getRange())); } else { - uassertStatusOK(Grid::get(opCtx)->catalogManager()->assignKeyRangeToZone( + uassertStatusOK(ShardingCatalogManager::get(opCtx)->assignKeyRangeToZone( opCtx, parsedRequest.getNS(), parsedRequest.getRange(), zoneName)); } diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp index ee35fe24cce..16cb8a5e122 100644 --- a/src/mongo/db/s/metadata_manager_test.cpp +++ b/src/mongo/db/s/metadata_manager_test.cpp @@ -30,6 +30,8 @@ #include "mongo/db/s/metadata_manager.h" +#include <boost/optional.hpp> + #include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/client.h" @@ -46,9 +48,6 @@ #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/executor/task_executor.h" -#include "mongo/s/catalog/dist_lock_catalog_impl.h" -#include "mongo/s/catalog/dist_lock_manager_mock.h" -#include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/sharding_mongod_test_fixture.h" @@ -57,9 +56,6 @@ #include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" - -#include <boost/optional.hpp> - namespace mongo { namespace { @@ -91,21 +87,6 @@ protected: _manager = std::make_shared<MetadataManager>(getServiceContext(), kNss, executor()); } - std::unique_ptr<DistLockCatalog> makeDistLockCatalog(ShardRegistry* shardRegistry) override { - invariant(shardRegistry); - return stdx::make_unique<DistLockCatalogImpl>(shardRegistry); - } - - std::unique_ptr<DistLockManager> makeDistLockManager( - std::unique_ptr<DistLockCatalog> distLockCatalog) override { - return stdx::make_unique<DistLockManagerMock>(std::move(distLockCatalog)); - } - - std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient( - std::unique_ptr<DistLockManager> distLockManager) override { - return stdx::make_unique<ShardingCatalogClientMock>(std::move(distLockManager)); - } - static std::unique_ptr<CollectionMetadata> makeEmptyMetadata() { const OID epoch = OID::gen(); diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp index 46dd675fc13..e00d8cbf5d7 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod.cpp @@ -43,7 +43,7 @@ #include "mongo/db/server_options.h" #include "mongo/executor/task_executor.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/s/catalog/sharding_catalog_manager_impl.h" +#include "mongo/s/catalog/sharding_catalog_manager.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/client/shard_factory.h" #include "mongo/s/client/shard_local.h" @@ -103,15 +103,7 @@ Status initializeGlobalShardingStateForMongod(OperationContext* opCtx, stdx::make_unique<rpc::LogicalTimeMetadataHook>(opCtx->getServiceContext())); hookList->addHook(stdx::make_unique<rpc::ShardingEgressMetadataHookForMongod>()); return hookList; - }, - [](ShardingCatalogClient* catalogClient, std::unique_ptr<executor::TaskExecutor> executor) - -> std::unique_ptr<ShardingCatalogManager> { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { - return stdx::make_unique<ShardingCatalogManagerImpl>(std::move(executor)); - } else { - return nullptr; // Only config servers get a real ShardingCatalogManager - } - }); + }); } } // namespace mongo diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp index 4e528d26ef3..090b1ba30ed 100644 --- a/src/mongo/db/s/sharding_state_recovery.cpp +++ b/src/mongo/db/s/sharding_state_recovery.cpp @@ -288,12 +288,12 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) { "to retrieve the most recent opTime."; // Need to fetch the latest uptime from the config server, so do a logging write - Status status = - grid.catalogClient()->logChange(opCtx, - "Sharding minOpTime recovery", - NamespaceString::kServerConfigurationNamespace.ns(), - recoveryDocBSON, - ShardingCatalogClient::kMajorityWriteConcern); + Status status = Grid::get(opCtx)->catalogClient()->logChange( + opCtx, + "Sharding minOpTime recovery", + NamespaceString::kServerConfigurationNamespace.ns(), + recoveryDocBSON, + ShardingCatalogClient::kMajorityWriteConcern); if (!status.isOK()) return status; |