diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-07-12 15:59:59 -0400 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-07-13 09:19:05 -0400 |
commit | fc300ed58b943b93a3164103e4a767e3f81d1c3b (patch) | |
tree | 2ff6a0e61b4b1c40a93e83dd063f6ee3ea9ec8ca /src | |
parent | 98bb1900c5aa0c8f7ede0b9df201f9cd9b7c9da5 (diff) | |
download | mongo-fc300ed58b943b93a3164103e4a767e3f81d1c3b.tar.gz |
SERVER-30053 Remove 'opCtx' parameter from Grid::catalogClient()
This method is now just a simple getter and doesn't require operation
context.
Diffstat (limited to 'src')
52 files changed, 170 insertions, 182 deletions
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp index b999cc002ef..d941b3f0557 100644 --- a/src/mongo/db/auth/authz_manager_external_state_s.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp @@ -100,7 +100,7 @@ Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(OperationC // that runs this command BSONObj getParameterCmd = BSON("getParameter" << 1 << authSchemaVersionServerParameter << 1); BSONObjBuilder builder; - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", getParameterCmd, &builder); BSONObj cmdResult = builder.obj(); if (!ok) { @@ -130,7 +130,7 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC << "showCredentials" << true); BSONObjBuilder builder; - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", usersInfoCmd, &builder); BSONObj cmdResult = builder.obj(); if (!ok) { @@ -169,7 +169,7 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC << "asUserFragment"); BSONObjBuilder cmdResultBuilder; - const bool cmdOk = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + const bool cmdOk = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", rolesInfoCmd, &cmdResultBuilder); BSONObj cmdResult = cmdResultBuilder.obj(); if (!cmdOk || !cmdResult["userFragment"].ok()) { @@ -217,7 +217,7 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* opC addShowPrivilegesToBuilder(&rolesInfoCmd, showPrivileges); BSONObjBuilder builder; - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", rolesInfoCmd.obj(), &builder); BSONObj cmdResult = builder.obj(); if (!ok) { @@ -257,7 +257,7 @@ Status AuthzManagerExternalStateMongos::getRolesDescription(OperationContext* op addShowPrivilegesToBuilder(&rolesInfoCmd, showPrivileges); BSONObjBuilder builder; - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", rolesInfoCmd.obj(), &builder); BSONObj cmdResult = builder.obj(); if (!ok) { @@ -283,7 +283,7 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex addShowPrivilegesToBuilder(&rolesInfoCmd, showPrivileges); BSONObjBuilder builder; - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, dbname, rolesInfoCmd.obj(), &builder); BSONObj cmdResult = builder.obj(); if (!ok) { @@ -300,7 +300,7 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext* opCtx) { BSONObj usersInfoCmd = BSON("usersInfo" << 1); BSONObjBuilder userBuilder; - bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", usersInfoCmd, &userBuilder); if (!ok) { // If we were unable to complete the query, @@ -318,7 +318,7 @@ bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext* BSONObj rolesInfoCmd = BSON("rolesInfo" << 1); BSONObjBuilder roleBuilder; - ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", rolesInfoCmd, &roleBuilder); if (!ok) { return true; diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp index a86cad229b4..5c9b7c30d8c 100644 --- a/src/mongo/db/auth/user_cache_invalidator_job.cpp +++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp @@ -94,7 +94,7 @@ public: StatusWith<OID> getCurrentCacheGeneration(OperationContext* opCtx) { try { BSONObjBuilder result; - const bool ok = grid.catalogClient(opCtx)->runUserManagementReadCommand( + const bool ok = grid.catalogClient()->runUserManagementReadCommand( opCtx, "admin", BSON("_getUserCacheGeneration" << 1), &result); if (!ok) { return getStatusFromCommandResult(result.obj()); diff --git a/src/mongo/db/commands/conn_pool_stats.cpp b/src/mongo/db/commands/conn_pool_stats.cpp index 88a74cdbd30..0799ae12c48 100644 --- a/src/mongo/db/commands/conn_pool_stats.cpp +++ b/src/mongo/db/commands/conn_pool_stats.cpp @@ -47,6 +47,7 @@ #include "mongo/s/grid.h" namespace mongo { +namespace { class PoolStats final : public Command { public: @@ -56,8 +57,11 @@ public: help << "stats about connections between servers in a replica set or sharded cluster."; } + bool slaveOk() const override { + return true; + } - virtual bool supportsWriteConcern(const BSONObj& cmd) const override { + bool supportsWriteConcern(const BSONObj& cmd) const override { return false; } @@ -70,9 +74,9 @@ public: } bool run(OperationContext* opCtx, - const std::string&, - const mongo::BSONObj&, - std::string&, + const std::string& db, + const mongo::BSONObj& cmdObj, + std::string& errmsg, mongo::BSONObjBuilder& result) override { executor::ConnectionPoolStats stats{}; @@ -82,14 +86,14 @@ public: result.appendNumber("numAScopedConnections", AScopedConnection::getNumConnections()); // Replication connections, if we have them. - auto replCoord = repl::ReplicationCoordinator::get(opCtx); + auto const replCoord = repl::ReplicationCoordinator::get(opCtx); if (replCoord && replCoord->isReplEnabled()) { replCoord->appendConnectionStats(&stats); } // Sharding connections, if we have any. - auto grid = Grid::get(opCtx); - if (grid->shardRegistry()) { + auto const grid = Grid::get(opCtx); + if (grid->getExecutorPool()) { grid->getExecutorPool()->appendConnectionStats(&stats); if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { grid->catalogManager()->appendConnectionStats(&stats); @@ -107,10 +111,7 @@ public: return true; } - bool slaveOk() const override { - return true; - } - } poolStatsCmd; +} // namespace } // namespace mongo diff --git a/src/mongo/db/keys_collection_cache_reader_and_updater_test.cpp b/src/mongo/db/keys_collection_cache_reader_and_updater_test.cpp index 44ab43d95da..1a12ecc1a2d 100644 --- a/src/mongo/db/keys_collection_cache_reader_and_updater_test.cpp +++ b/src/mongo/db/keys_collection_cache_reader_and_updater_test.cpp @@ -69,7 +69,7 @@ protected: }; TEST_F(CacheUpdaterTest, ShouldCreate2KeysFromEmpty) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReaderAndUpdater updater("dummy", catalogClient, Seconds(5)); const LogicalTime currentTime(LogicalTime(Timestamp(100, 2))); @@ -103,7 +103,7 @@ TEST_F(CacheUpdaterTest, ShouldCreate2KeysFromEmpty) { } TEST_F(CacheUpdaterTest, ShouldPropagateWriteError) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReaderAndUpdater updater("dummy", catalogClient, Seconds(5)); const LogicalTime currentTime(LogicalTime(Timestamp(100, 2))); @@ -116,7 +116,7 @@ TEST_F(CacheUpdaterTest, ShouldPropagateWriteError) { } TEST_F(CacheUpdaterTest, ShouldCreateAnotherKeyIfOnlyOneKeyExists) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReaderAndUpdater updater("dummy", catalogClient, Seconds(5)); LogicalClock::get(operationContext()) @@ -171,7 +171,7 @@ TEST_F(CacheUpdaterTest, ShouldCreateAnotherKeyIfOnlyOneKeyExists) { } TEST_F(CacheUpdaterTest, ShouldCreateAnotherKeyIfNoValidKeyAfterCurrent) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReaderAndUpdater updater("dummy", catalogClient, Seconds(5)); LogicalClock::get(operationContext()) @@ -263,7 +263,7 @@ TEST_F(CacheUpdaterTest, ShouldCreateAnotherKeyIfNoValidKeyAfterCurrent) { } TEST_F(CacheUpdaterTest, ShouldCreate2KeysIfAllKeysAreExpired) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReaderAndUpdater updater("dummy", catalogClient, Seconds(5)); LogicalClock::get(operationContext()) @@ -368,7 +368,7 @@ TEST_F(CacheUpdaterTest, ShouldCreate2KeysIfAllKeysAreExpired) { } TEST_F(CacheUpdaterTest, ShouldNotCreateNewKeyIfThereAre2UnexpiredKeys) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReaderAndUpdater updater("dummy", catalogClient, Seconds(5)); LogicalClock::get(operationContext()) @@ -435,7 +435,7 @@ TEST_F(CacheUpdaterTest, ShouldNotCreateNewKeyIfThereAre2UnexpiredKeys) { } TEST_F(CacheUpdaterTest, ShouldNotCreateKeysWithDisableKeyGenerationFailPoint) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReaderAndUpdater updater("dummy", catalogClient, Seconds(5)); const LogicalTime currentTime(LogicalTime(Timestamp(100, 0))); @@ -457,7 +457,7 @@ TEST_F(CacheUpdaterTest, ShouldNotCreateNewKeysInFeatureCompatiblityVersion34) { serverGlobalParams.featureCompatibility.version.store( ServerGlobalParams::FeatureCompatibility::Version::k34); - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReaderAndUpdater updater("dummy", catalogClient, Seconds(5)); const LogicalTime currentTime(LogicalTime(Timestamp(100, 0))); diff --git a/src/mongo/db/keys_collection_cache_reader_test.cpp b/src/mongo/db/keys_collection_cache_reader_test.cpp index 013003a3c16..87100bca834 100644 --- a/src/mongo/db/keys_collection_cache_reader_test.cpp +++ b/src/mongo/db/keys_collection_cache_reader_test.cpp @@ -41,7 +41,7 @@ namespace mongo { using CacheReaderTest = ConfigServerTestFixture; TEST_F(CacheReaderTest, ErrorsIfCacheIsEmpty) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReader reader("test", catalogClient); auto status = reader.getKey(LogicalTime(Timestamp(1, 0))).getStatus(); ASSERT_EQ(ErrorCodes::KeyNotFound, status.code()); @@ -49,7 +49,7 @@ TEST_F(CacheReaderTest, ErrorsIfCacheIsEmpty) { } TEST_F(CacheReaderTest, RefreshErrorsIfCacheIsEmpty) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReader reader("test", catalogClient); auto status = reader.refresh(operationContext()).getStatus(); ASSERT_EQ(ErrorCodes::KeyNotFound, status.code()); @@ -57,7 +57,7 @@ TEST_F(CacheReaderTest, RefreshErrorsIfCacheIsEmpty) { } TEST_F(CacheReaderTest, GetKeyShouldReturnCorrectKeyAfterRefresh) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReader reader("test", catalogClient); KeysCollectionDocument origKey1( @@ -89,7 +89,7 @@ TEST_F(CacheReaderTest, GetKeyShouldReturnCorrectKeyAfterRefresh) { } TEST_F(CacheReaderTest, GetKeyShouldReturnErrorIfNoKeyIsValidForGivenTime) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReader reader("test", catalogClient); KeysCollectionDocument origKey1( @@ -113,7 +113,7 @@ TEST_F(CacheReaderTest, GetKeyShouldReturnErrorIfNoKeyIsValidForGivenTime) { } TEST_F(CacheReaderTest, GetKeyShouldReturnOldestKeyPossible) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReader reader("test", catalogClient); KeysCollectionDocument origKey0( @@ -155,7 +155,7 @@ TEST_F(CacheReaderTest, GetKeyShouldReturnOldestKeyPossible) { } TEST_F(CacheReaderTest, RefreshShouldNotGetKeysForOtherPurpose) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReader reader("test", catalogClient); KeysCollectionDocument origKey0( @@ -200,7 +200,7 @@ TEST_F(CacheReaderTest, RefreshShouldNotGetKeysForOtherPurpose) { } TEST_F(CacheReaderTest, RefreshCanIncrementallyGetNewKeys) { - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); KeysCollectionCacheReader reader("test", catalogClient); KeysCollectionDocument origKey0( diff --git a/src/mongo/db/keys_collection_manager_test.cpp b/src/mongo/db/keys_collection_manager_test.cpp index a6e49a51986..e68d9285cc0 100644 --- a/src/mongo/db/keys_collection_manager_test.cpp +++ b/src/mongo/db/keys_collection_manager_test.cpp @@ -64,7 +64,7 @@ protected: auto clockSource = stdx::make_unique<ClockSourceMock>(); operationContext()->getServiceContext()->setFastClockSource(std::move(clockSource)); - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); _keyManager = stdx::make_unique<KeysCollectionManager>("dummy", catalogClient, Seconds(1)); } diff --git a/src/mongo/db/logical_time_validator_test.cpp b/src/mongo/db/logical_time_validator_test.cpp index dcdc3e410e9..9a4c289d983 100644 --- a/src/mongo/db/logical_time_validator_test.cpp +++ b/src/mongo/db/logical_time_validator_test.cpp @@ -65,7 +65,7 @@ protected: auto clockSource = stdx::make_unique<ClockSourceMock>(); operationContext()->getServiceContext()->setFastClockSource(std::move(clockSource)); - auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); + auto catalogClient = Grid::get(operationContext())->catalogClient(); const LogicalTime currentTime(LogicalTime(Timestamp(1, 0))); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index eddb2a06ca0..e14878e88f4 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -778,7 +778,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook } // Free any leftover locks from previous instantiations. - auto distLockManager = Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager(); + auto distLockManager = Grid::get(opCtx)->catalogClient()->getDistLockManager(); distLockManager->unlockAll(opCtx, distLockManager->getProcessID()); // If this is a config server node becoming a primary, start the balancer diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp index b8ab1945627..6815bff130e 100644 --- a/src/mongo/db/s/balancer/balancer.cpp +++ b/src/mongo/db/s/balancer/balancer.cpp @@ -388,7 +388,7 @@ void Balancer::_mainThread() { roundDetails.setSucceeded(static_cast<int>(candidateChunks.size()), _balancedLastTime); - shardingContext->catalogClient(opCtx.get()) + shardingContext->catalogClient() ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON()) .transitional_ignore(); } @@ -408,7 +408,7 @@ void Balancer::_mainThread() { // This round failed, tell the world! roundDetails.setFailed(e.what()); - shardingContext->catalogClient(opCtx.get()) + shardingContext->catalogClient() ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON()) .transitional_ignore(); @@ -651,7 +651,7 @@ void Balancer::_splitOrMarkJumbo(OperationContext* opCtx, const std::string chunkName = ChunkType::genID(nss.ns(), chunk->getMin()); - auto status = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument( + auto status = Grid::get(opCtx)->catalogClient()->updateConfigDocument( opCtx, ChunkType::ConfigNS, BSON(ChunkType::name(chunkName)), diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp index 273dfbb6eb3..1fd2a2ef475 100644 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp +++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp @@ -84,7 +84,7 @@ StatusWith<DistributionStatus> createCollectionDistributionStatus( } vector<TagsType> collectionTags; - Status tagsStatus = Grid::get(opCtx)->catalogClient(opCtx)->getTagsForCollection( + Status tagsStatus = Grid::get(opCtx)->catalogClient()->getTagsForCollection( opCtx, chunkMgr->getns(), &collectionTags); if (!tagsStatus.isOK()) { return {tagsStatus.code(), @@ -193,8 +193,8 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToSpli vector<CollectionType> collections; - Status collsStatus = Grid::get(opCtx)->catalogClient(opCtx)->getCollections( - opCtx, nullptr, &collections, nullptr); + Status collsStatus = + Grid::get(opCtx)->catalogClient()->getCollections(opCtx, nullptr, &collections, nullptr); if (!collsStatus.isOK()) { return collsStatus; } @@ -245,8 +245,8 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo vector<CollectionType> collections; - Status collsStatus = Grid::get(opCtx)->catalogClient(opCtx)->getCollections( - opCtx, nullptr, &collections, nullptr); + Status collsStatus = + Grid::get(opCtx)->catalogClient()->getCollections(opCtx, nullptr, &collections, nullptr); if (!collsStatus.isOK()) { return collsStatus; } diff --git a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp index 0547cea1124..a0f949d219b 100644 --- a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp +++ b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp @@ -106,7 +106,7 @@ StatusWith<vector<ShardStatistics>> ClusterStatisticsImpl::getStats(OperationCon // db.serverStatus() (mem.mapped) to all shards. // // TODO: skip unresponsive shards and mark information as stale. - auto shardsStatus = Grid::get(opCtx)->catalogClient(opCtx)->getAllShards( + auto shardsStatus = Grid::get(opCtx)->catalogClient()->getAllShards( opCtx, repl::ReadConcernLevel::kMajorityReadConcern); if (!shardsStatus.isOK()) { return shardsStatus.getStatus(); diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp index a8108a1f540..9552c959323 100644 --- a/src/mongo/db/s/balancer/migration_manager.cpp +++ b/src/mongo/db/s/balancer/migration_manager.cpp @@ -220,7 +220,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx) _abandonActiveMigrationsAndEnableManager(opCtx); }); - auto distLockManager = Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager(); + auto distLockManager = Grid::get(opCtx)->catalogClient()->getDistLockManager(); // Load the active migrations from the config.migrations collection. auto statusWithMigrationsQueryResponse = @@ -358,7 +358,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx, // If no migrations were scheduled for this namespace, free the dist lock if (!scheduledMigrations) { - Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager()->unlock( + Grid::get(opCtx)->catalogClient()->getDistLockManager()->unlock( opCtx, _lockSessionID, nss.ns()); } } @@ -489,7 +489,7 @@ void MigrationManager::_schedule_inlock(OperationContext* opCtx, // Acquire the collection distributed lock (blocking call) auto statusWithDistLockHandle = - Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager()->lockWithSessionID( + Grid::get(opCtx)->catalogClient()->getDistLockManager()->lockWithSessionID( opCtx, nss.ns(), whyMessage, @@ -554,7 +554,7 @@ void MigrationManager::_complete_inlock(OperationContext* opCtx, migrations->erase(itMigration); if (migrations->empty()) { - Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager()->unlock( + Grid::get(opCtx)->catalogClient()->getDistLockManager()->unlock( opCtx, _lockSessionID, nss.ns()); _activeMigrations.erase(it); _checkDrained_inlock(); @@ -587,7 +587,7 @@ void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext } invariant(_state == State::kRecovering); - auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + auto catalogClient = Grid::get(opCtx)->catalogClient(); // Unlock all balancer distlocks we aren't using anymore. auto distLockManager = catalogClient->getDistLockManager(); diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp index bbbcb0174f0..fda4b5dbbfc 100644 --- a/src/mongo/db/s/balancer/scoped_migration_request.cpp +++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp @@ -64,7 +64,7 @@ ScopedMigrationRequest::~ScopedMigrationRequest() { // okay. BSONObj migrationDocumentIdentifier = BSON(MigrationType::ns(_nss.ns()) << MigrationType::min(_minKey)); - Status result = grid.catalogClient(_opCtx)->removeConfigDocuments( + Status result = Grid::get(_opCtx)->catalogClient()->removeConfigDocuments( _opCtx, MigrationType::ConfigNS, migrationDocumentIdentifier, kMajorityWriteConcern); if (!result.isOK()) { @@ -98,7 +98,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration( const MigrationType migrationType(migrateInfo, waitForDelete); for (int retry = 0; retry < kDuplicateKeyErrorMaxRetries; ++retry) { - Status result = grid.catalogClient(opCtx)->insertConfigDocument( + Status result = grid.catalogClient()->insertConfigDocument( opCtx, MigrationType::ConfigNS, migrationType.toBSON(), kMajorityWriteConcern); if (result == ErrorCodes::DuplicateKey) { @@ -190,7 +190,7 @@ Status ScopedMigrationRequest::tryToRemoveMigration() { invariant(_opCtx); BSONObj migrationDocumentIdentifier = BSON(MigrationType::ns(_nss.ns()) << MigrationType::min(_minKey)); - Status status = grid.catalogClient(_opCtx)->removeConfigDocuments( + Status status = Grid::get(_opCtx)->catalogClient()->removeConfigDocuments( _opCtx, MigrationType::ConfigNS, migrationDocumentIdentifier, kMajorityWriteConcern); if (status.isOK()) { // Don't try to do a no-op remove in the destructor. diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp index 043db353644..ca592283df6 100644 --- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp +++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp @@ -128,7 +128,7 @@ public: str::stream() << "Can't move primary for " << dbname << " database"}); } - auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + auto const catalogClient = Grid::get(opCtx)->catalogClient(); auto const catalogCache = Grid::get(opCtx)->catalogCache(); auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp index cdeb15e5953..b582f4f5899 100644 --- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp @@ -174,7 +174,7 @@ public: auto shardCollRequest = ConfigsvrShardCollection::parse( IDLParserErrorContext("ConfigsvrShardCollection"), cmdObj); - auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + auto const catalogClient = Grid::get(opCtx)->catalogClient(); auto const catalogManager = Grid::get(opCtx)->catalogManager(); auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); auto const catalogCache = Grid::get(opCtx)->catalogCache(); diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp index 6a3db0908ce..37fe8fcd58d 100644 --- a/src/mongo/db/s/merge_chunks_command.cpp +++ b/src/mongo/db/s/merge_chunks_command.cpp @@ -86,7 +86,8 @@ Status mergeChunks(OperationContext* opCtx, // TODO(SERVER-25086): Remove distLock acquisition from merge chunk const string whyMessage = stream() << "merging chunks in " << nss.ns() << " from " << minKey << " to " << maxKey; - auto scopedDistLock = grid.catalogClient(opCtx)->getDistLockManager()->lock( + + auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock( opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout); if (!scopedDistLock.isOK()) { diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp index 75c77e016cc..d7b661e939c 100644 --- a/src/mongo/db/s/migration_source_manager.cpp +++ b/src/mongo/db/s/migration_source_manager.cpp @@ -164,7 +164,8 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) { invariant(_state == kCreated); auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); }); - grid.catalogClient(opCtx) + Grid::get(opCtx) + ->catalogClient() ->logChange(opCtx, "moveChunk.start", getNss().ns(), @@ -320,7 +321,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC } auto commitChunkMigrationResponse = - grid.shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts( + Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts( opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, "admin", @@ -344,7 +345,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC "against the config server to obtain its latest optime" << causedBy(redact(migrationCommitStatus)); - Status status = grid.catalogClient(opCtx)->logChange( + Status status = Grid::get(opCtx)->catalogClient()->logChange( opCtx, "moveChunk.validating", getNss().ns(), @@ -438,7 +439,8 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC scopedGuard.Dismiss(); _cleanup(opCtx); - grid.catalogClient(opCtx) + Grid::get(opCtx) + ->catalogClient() ->logChange(opCtx, "moveChunk.commit", getNss().ns(), @@ -457,7 +459,8 @@ void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) { return; } - grid.catalogClient(opCtx) + Grid::get(opCtx) + ->catalogClient() ->logChange(opCtx, "moveChunk.error", getNss().ns(), diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp index 32b1a82b3c3..0ea22bf3adf 100644 --- a/src/mongo/db/s/move_timing_helper.cpp +++ b/src/mongo/db/s/move_timing_helper.cpp @@ -82,7 +82,8 @@ MoveTimingHelper::~MoveTimingHelper() { _b.append("errmsg", *_cmdErrmsg); } - grid.catalogClient(_opCtx) + Grid::get(_opCtx) + ->catalogClient() ->logChange(_opCtx, str::stream() << "moveChunk." << _where, _ns, diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp index 91a2fc6f9eb..d9273b5cfc1 100644 --- a/src/mongo/db/s/sharding_state.cpp +++ b/src/mongo/db/s/sharding_state.cpp @@ -160,7 +160,7 @@ void ShardingState::shutDown(OperationContext* opCtx) { stdx::unique_lock<stdx::mutex> lk(_mutex); if (enabled()) { grid.getExecutorPool()->shutdownAndJoin(); - grid.catalogClient(opCtx)->shutDown(opCtx); + grid.catalogClient()->shutDown(opCtx); } } diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp index 35f9c4e04a6..4e528d26ef3 100644 --- a/src/mongo/db/s/sharding_state_recovery.cpp +++ b/src/mongo/db/s/sharding_state_recovery.cpp @@ -289,11 +289,11 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) { // Need to fetch the latest uptime from the config server, so do a logging write Status status = - grid.catalogClient(opCtx)->logChange(opCtx, - "Sharding minOpTime recovery", - NamespaceString::kServerConfigurationNamespace.ns(), - recoveryDocBSON, - ShardingCatalogClient::kMajorityWriteConcern); + grid.catalogClient()->logChange(opCtx, + "Sharding minOpTime recovery", + NamespaceString::kServerConfigurationNamespace.ns(), + recoveryDocBSON, + ShardingCatalogClient::kMajorityWriteConcern); if (!status.isOK()) return status; diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp index 846d6ca4b87..cfc2c04ad55 100644 --- a/src/mongo/db/s/split_chunk_command.cpp +++ b/src/mongo/db/s/split_chunk_command.cpp @@ -232,7 +232,7 @@ public: const string whyMessage(str::stream() << "splitting chunk [" << min << ", " << max << ") in " << nss.toString()); - auto scopedDistLock = grid.catalogClient(opCtx)->getDistLockManager()->lock( + auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock( opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout); if (!scopedDistLock.isOK()) { errmsg = str::stream() << "could not acquire collection lock for " << nss.toString() diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp index b332444a389..413a39af5b1 100644 --- a/src/mongo/s/balancer_configuration.cpp +++ b/src/mongo/s/balancer_configuration.cpp @@ -79,7 +79,7 @@ BalancerSettingsType::BalancerMode BalancerConfiguration::getBalancerMode() cons Status BalancerConfiguration::setBalancerMode(OperationContext* opCtx, BalancerSettingsType::BalancerMode mode) { - auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument( + auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument( opCtx, kSettingsNamespace.ns(), BSON("_id" << BalancerSettingsType::kKey), @@ -162,8 +162,8 @@ Status BalancerConfiguration::refreshAndCheck(OperationContext* opCtx) { Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* opCtx) { BalancerSettingsType settings = BalancerSettingsType::createDefault(); - auto settingsObjStatus = Grid::get(opCtx)->catalogClient(opCtx)->getGlobalSettings( - opCtx, BalancerSettingsType::kKey); + auto settingsObjStatus = + Grid::get(opCtx)->catalogClient()->getGlobalSettings(opCtx, BalancerSettingsType::kKey); if (settingsObjStatus.isOK()) { auto settingsStatus = BalancerSettingsType::fromBSON(settingsObjStatus.getValue()); if (!settingsStatus.isOK()) { @@ -185,7 +185,7 @@ Status BalancerConfiguration::_refreshChunkSizeSettings(OperationContext* opCtx) ChunkSizeSettingsType settings = ChunkSizeSettingsType::createDefault(); auto settingsObjStatus = - grid.catalogClient(opCtx)->getGlobalSettings(opCtx, ChunkSizeSettingsType::kKey); + grid.catalogClient()->getGlobalSettings(opCtx, ChunkSizeSettingsType::kKey); if (settingsObjStatus.isOK()) { auto settingsStatus = ChunkSizeSettingsType::fromBSON(settingsObjStatus.getValue()); if (!settingsStatus.isOK()) { @@ -211,7 +211,7 @@ Status BalancerConfiguration::_refreshAutoSplitSettings(OperationContext* opCtx) AutoSplitSettingsType settings = AutoSplitSettingsType::createDefault(); auto settingsObjStatus = - grid.catalogClient(opCtx)->getGlobalSettings(opCtx, AutoSplitSettingsType::kKey); + grid.catalogClient()->getGlobalSettings(opCtx, AutoSplitSettingsType::kKey); if (settingsObjStatus.isOK()) { auto settingsStatus = AutoSplitSettingsType::fromBSON(settingsObjStatus.getValue()); if (!settingsStatus.isOK()) { diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h index 07b357f2c9f..d7045f8821b 100644 --- a/src/mongo/s/catalog/sharding_catalog_client.h +++ b/src/mongo/s/catalog/sharding_catalog_client.h @@ -104,7 +104,7 @@ public: * has been installed into the global 'grid' object. Implementations do not need to guarantee * thread safety so callers should employ proper synchronization when calling this method. */ - virtual Status startup() = 0; + virtual void startup() = 0; /** * Performs necessary cleanup when shutting down cleanly. diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp index c71afb38429..327b1409a68 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp @@ -123,14 +123,14 @@ ShardingCatalogClientImpl::ShardingCatalogClientImpl( ShardingCatalogClientImpl::~ShardingCatalogClientImpl() = default; -Status ShardingCatalogClientImpl::startup() { +void ShardingCatalogClientImpl::startup() { stdx::lock_guard<stdx::mutex> lk(_mutex); if (_started) { - return Status::OK(); + return; } + _started = true; _distLockManager->startUp(); - return Status::OK(); } void ShardingCatalogClientImpl::shutDown(OperationContext* opCtx) { diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h index b100f025ac4..c55899ec91c 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.h +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h @@ -66,7 +66,7 @@ public: * Safe to call multiple times as long as the calls are externally synchronized to be * non-overlapping. */ - Status startup() override; + void startup() override; void shutDown(OperationContext* opCtx) override; diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp index bf73fac0fae..b37fdbc5ec2 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp @@ -49,11 +49,10 @@ ShardingCatalogClientMock::ShardingCatalogClientMock( ShardingCatalogClientMock::~ShardingCatalogClientMock() = default; -Status ShardingCatalogClientMock::startup() { +void ShardingCatalogClientMock::startup() { if (_distLockManager) { _distLockManager->startUp(); } - return Status::OK(); } void ShardingCatalogClientMock::shutDown(OperationContext* opCtx) { diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h index abe759e3fb4..1965dfb9f2d 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_mock.h +++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h @@ -41,7 +41,7 @@ public: ShardingCatalogClientMock(std::unique_ptr<DistLockManager> distLockManager); ~ShardingCatalogClientMock(); - Status startup() override; + void startup() override; void shutDown(OperationContext* opCtx) override; diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp index 80922b52be9..72826851a96 100644 --- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp @@ -368,7 +368,7 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* opCtx, } // apply the batch of updates to remote and local metadata - Status applyOpsStatus = Grid::get(opCtx)->catalogClient(opCtx)->applyChunkOpsDeprecated( + Status applyOpsStatus = Grid::get(opCtx)->catalogClient()->applyChunkOpsDeprecated( opCtx, updates.arr(), preCond.arr(), @@ -394,7 +394,7 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* opCtx, appendShortVersion(&logDetail.subobjStart("right"), newChunks[1]); Grid::get(opCtx) - ->catalogClient(opCtx) + ->catalogClient() ->logChange(opCtx, "split", ns.ns(), logDetail.obj(), WriteConcernOptions()) .transitional_ignore(); } else { @@ -410,7 +410,7 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* opCtx, appendShortVersion(&chunkDetail.subobjStart("chunk"), newChunks[i]); Grid::get(opCtx) - ->catalogClient(opCtx) + ->catalogClient() ->logChange(opCtx, "multi-split", ns.ns(), chunkDetail.obj(), WriteConcernOptions()) .transitional_ignore(); } @@ -496,7 +496,7 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* opCtx, auto preCond = buildMergeChunksApplyOpsPrecond(chunksToMerge, collVersion); // apply the batch of updates to remote and local metadata - Status applyOpsStatus = Grid::get(opCtx)->catalogClient(opCtx)->applyChunkOpsDeprecated( + Status applyOpsStatus = Grid::get(opCtx)->catalogClient()->applyChunkOpsDeprecated( opCtx, updates, preCond, @@ -520,7 +520,7 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* opCtx, mergeVersion.addToBSON(logDetail, "mergedVersion"); Grid::get(opCtx) - ->catalogClient(opCtx) + ->catalogClient() ->logChange(opCtx, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions()) .transitional_ignore(); diff --git a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations_impl.cpp index d82db079052..63f9de7114f 100644 --- a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations_impl.cpp @@ -167,7 +167,7 @@ ChunkVersion createFirstChunks(OperationContext* opCtx, chunk.setShard(shardIds[i % shardIds.size()]); chunk.setVersion(version); - uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->insertConfigDocument( + uassertStatusOK(Grid::get(opCtx)->catalogClient()->insertConfigDocument( opCtx, ChunkType::ConfigNS, chunk.toConfigBSON(), @@ -221,7 +221,7 @@ void ShardingCatalogManagerImpl::shardCollection(OperationContext* opCtx, bool unique, const vector<BSONObj>& initPoints, const bool distributeInitialChunks) { - const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + const auto catalogClient = Grid::get(opCtx)->catalogClient(); const auto shardRegistry = Grid::get(opCtx)->shardRegistry(); // Lock the collection globally so that no other mongos can try to shard or drop the collection diff --git a/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp index 94ca3eb154c..13e99e1851d 100644 --- a/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp @@ -120,7 +120,7 @@ void ShardingCatalogManagerImpl::discardCachedConfigDatabaseInitializationState( } Status ShardingCatalogManagerImpl::_initConfigVersion(OperationContext* opCtx) { - const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + const auto catalogClient = Grid::get(opCtx)->catalogClient(); auto versionStatus = catalogClient->getConfigVersion(opCtx, repl::ReadConcernLevel::kLocalReadConcern); diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp index 12ffa5c278d..71a84600c4d 100644 --- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp @@ -206,7 +206,7 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManagerImpl::_checkIfShard const std::string* proposedShardName, long long proposedShardMaxSize) { // Check whether any host in the connection is already part of the cluster. - const auto existingShards = Grid::get(opCtx)->catalogClient(opCtx)->getAllShards( + const auto existingShards = Grid::get(opCtx)->catalogClient()->getAllShards( opCtx, repl::ReadConcernLevel::kLocalReadConcern); if (!existingShards.isOK()) { return Status(existingShards.getStatus().code(), @@ -587,7 +587,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard( } for (const auto& dbName : dbNamesStatus.getValue()) { - auto dbt = Grid::get(opCtx)->catalogClient(opCtx)->getDatabase(opCtx, dbName); + auto dbt = Grid::get(opCtx)->catalogClient()->getDatabase(opCtx, dbName); if (dbt.isOK()) { const auto& dbDoc = dbt.getValue().value; return Status(ErrorCodes::OperationFailed, @@ -655,7 +655,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard( log() << "going to insert new entry for shard into config.shards: " << shardType.toString(); - Status result = Grid::get(opCtx)->catalogClient(opCtx)->insertConfigDocument( + Status result = Grid::get(opCtx)->catalogClient()->insertConfigDocument( opCtx, ShardType::ConfigNS, shardType.toBSON(), @@ -672,7 +672,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard( dbt.setPrimary(shardType.getName()); dbt.setSharded(false); - Status status = Grid::get(opCtx)->catalogClient(opCtx)->updateDatabase(opCtx, dbName, dbt); + Status status = Grid::get(opCtx)->catalogClient()->updateDatabase(opCtx, dbName, dbt); if (!status.isOK()) { log() << "adding shard " << shardConnectionString.toString() << " even though could not add database " << dbName; @@ -685,7 +685,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard( shardDetails.append("host", shardConnectionString.toString()); Grid::get(opCtx) - ->catalogClient(opCtx) + ->catalogClient() ->logChange( opCtx, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern) .transitional_ignore(); diff --git a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp index b3e57ce1a0b..b9717617c46 100644 --- a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp @@ -177,7 +177,7 @@ Status ShardingCatalogManagerImpl::addShardToZone(OperationContext* opCtx, const std::string& zoneName) { Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock); - auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument( + auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument( opCtx, ShardType::ConfigNS, BSON(ShardType::name(shardName)), @@ -287,7 +287,7 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* opCtx, // Perform update. // - auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument( + auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument( opCtx, ShardType::ConfigNS, BSON(ShardType::name(shardName)), @@ -362,7 +362,7 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* opCtx, updateBuilder.append(TagsType::max(), fullShardKeyRange.getMax()); updateBuilder.append(TagsType::tag(), zoneName); - auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument( + auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument( opCtx, TagsType::ConfigNS, updateQuery, updateBuilder.obj(), true, kNoWaitWriteConcern); if (!updateStatus.isOK()) { @@ -390,7 +390,7 @@ Status ShardingCatalogManagerImpl::removeKeyRangeFromZone(OperationContext* opCt removeBuilder.append("_id", BSON(TagsType::ns(ns.ns()) << TagsType::min(range.getMin()))); removeBuilder.append(TagsType::max(), range.getMax()); - return Grid::get(opCtx)->catalogClient(opCtx)->removeConfigDocuments( + return Grid::get(opCtx)->catalogClient()->removeConfigDocuments( opCtx, TagsType::ConfigNS, removeBuilder.obj(), kNoWaitWriteConcern); } diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp index 831e7c4a264..f9f9b049ede 100644 --- a/src/mongo/s/catalog_cache.cpp +++ b/src/mongo/s/catalog_cache.cpp @@ -312,7 +312,7 @@ std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase(Oper return it->second; } - const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + const auto catalogClient = Grid::get(opCtx)->catalogClient(); const auto dbNameCopy = dbName.toString(); diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp index dd14458ad95..6fc44deefb2 100644 --- a/src/mongo/s/client/shard_registry.cpp +++ b/src/mongo/s/client/shard_registry.cpp @@ -346,7 +346,7 @@ void ShardRegistry::replicaSetChangeConfigServerUpdateHook(const std::string& se auto status = Grid::get(opCtx.get()) - ->catalogClient(opCtx.get()) + ->catalogClient() ->updateConfigDocument(opCtx.get(), ShardType::ConfigNS, BSON(ShardType::name(s->getId().toString())), @@ -371,8 +371,8 @@ ShardRegistryData::ShardRegistryData(OperationContext* opCtx, ShardFactory* shar } void ShardRegistryData::_init(OperationContext* opCtx, ShardFactory* shardFactory) { - auto shardsStatus = grid.catalogClient(opCtx)->getAllShards( - opCtx, repl::ReadConcernLevel::kMajorityReadConcern); + auto shardsStatus = + grid.catalogClient()->getAllShards(opCtx, repl::ReadConcernLevel::kMajorityReadConcern); if (!shardsStatus.isOK()) { uasserted(shardsStatus.getStatus().code(), diff --git a/src/mongo/s/cluster_identity_loader.cpp b/src/mongo/s/cluster_identity_loader.cpp index 37a63a5cee2..d3cad97c4d7 100644 --- a/src/mongo/s/cluster_identity_loader.cpp +++ b/src/mongo/s/cluster_identity_loader.cpp @@ -95,7 +95,7 @@ Status ClusterIdentityLoader::loadClusterId(OperationContext* opCtx, StatusWith<OID> ClusterIdentityLoader::_fetchClusterIdFromConfig( OperationContext* opCtx, const repl::ReadConcernLevel& readConcernLevel) { - auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + auto catalogClient = Grid::get(opCtx)->catalogClient(); auto loadResult = catalogClient->getConfigVersion(opCtx, readConcernLevel); if (!loadResult.isOK()) { return Status(loadResult.getStatus().code(), diff --git a/src/mongo/s/commands/cluster_commands_helpers.cpp b/src/mongo/s/commands/cluster_commands_helpers.cpp index 1dfdd40271d..ffda19bfc27 100644 --- a/src/mongo/s/commands/cluster_commands_helpers.cpp +++ b/src/mongo/s/commands/cluster_commands_helpers.cpp @@ -409,7 +409,7 @@ std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* opC const auto dbNameStr = dbName.toString(); std::vector<CollectionType> collectionsOnConfig; - uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getCollections( + uassertStatusOK(Grid::get(opCtx)->catalogClient()->getCollections( opCtx, &dbNameStr, &collectionsOnConfig, nullptr)); std::vector<NamespaceString> collectionsToReturn; @@ -438,7 +438,7 @@ StatusWith<CachedDatabaseInfo> createShardDatabase(OperationContext* opCtx, Stri auto dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName); if (dbStatus == ErrorCodes::NamespaceNotFound) { auto createDbStatus = - Grid::get(opCtx)->catalogClient(opCtx)->createDatabase(opCtx, dbName.toString()); + Grid::get(opCtx)->catalogClient()->createDatabase(opCtx, dbName.toString()); if (createDbStatus.isOK() || createDbStatus == ErrorCodes::NamespaceExists) { dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName); } else { diff --git a/src/mongo/s/commands/cluster_drop_cmd.cpp b/src/mongo/s/commands/cluster_drop_cmd.cpp index 159070dce24..fd05eae78dd 100644 --- a/src/mongo/s/commands/cluster_drop_cmd.cpp +++ b/src/mongo/s/commands/cluster_drop_cmd.cpp @@ -89,7 +89,7 @@ public: if (!routingInfo.cm()) { _dropUnshardedCollectionFromShard(opCtx, routingInfo.primaryId(), nss, &result); } else { - uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->dropCollection(opCtx, nss)); + uassertStatusOK(Grid::get(opCtx)->catalogClient()->dropCollection(opCtx, nss)); catalogCache->invalidateShardedCollection(nss); } @@ -105,7 +105,7 @@ private: const ShardId& shardId, const NamespaceString& nss, BSONObjBuilder* result) { - const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + const auto catalogClient = Grid::get(opCtx)->catalogClient(); const auto shardRegistry = Grid::get(opCtx)->shardRegistry(); auto scopedDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock( diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp index 8f7d6bb81c8..dfc71cc62c3 100644 --- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp +++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp @@ -82,7 +82,7 @@ public: "have to pass 1 as db parameter", cmdObj.firstElement().isNumber() && cmdObj.firstElement().number() == 1); - auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + auto const catalogClient = Grid::get(opCtx)->catalogClient(); // Lock the database globally to prevent conflicts with simultaneous database // creation/modification. diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp index a30686735d8..231f03d3f91 100644 --- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp +++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp @@ -104,7 +104,7 @@ public: return false; } - uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->enableSharding(opCtx, dbname)); + uassertStatusOK(Grid::get(opCtx)->catalogClient()->enableSharding(opCtx, dbname)); audit::logEnableSharding(Client::getCurrent(), dbname); // Make sure to force update of any stale metadata diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp index bc81d7f5b8d..27fc6d35840 100644 --- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp +++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp @@ -166,7 +166,7 @@ public: } // Get information for config and admin dbs from the config servers. - auto catalogClient = grid.catalogClient(opCtx); + auto catalogClient = grid.catalogClient(); auto appendStatus = catalogClient->appendInfoForConfigServerDatabases( opCtx, filterCommandRequestForPassthrough(cmdObj), &dbListBuilder); dbListBuilder.doneFast(); diff --git a/src/mongo/s/commands/cluster_list_shards_cmd.cpp b/src/mongo/s/commands/cluster_list_shards_cmd.cpp index d9ca47830fc..ba1cecee308 100644 --- a/src/mongo/s/commands/cluster_list_shards_cmd.cpp +++ b/src/mongo/s/commands/cluster_list_shards_cmd.cpp @@ -73,8 +73,8 @@ public: const BSONObj& cmdObj, std::string& errmsg, BSONObjBuilder& result) { - auto shardsStatus = grid.catalogClient(opCtx)->getAllShards( - opCtx, repl::ReadConcernLevel::kMajorityReadConcern); + auto shardsStatus = + grid.catalogClient()->getAllShards(opCtx, repl::ReadConcernLevel::kMajorityReadConcern); if (!shardsStatus.isOK()) { return appendCommandStatus(result, shardsStatus.getStatus()); } diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp index c8c69c4c69d..10ad111e0ec 100644 --- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp +++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp @@ -465,9 +465,8 @@ public: auto chunkSizes = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<int>(); { // Take distributed lock to prevent split / migration. - auto scopedDistLock = - Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager()->lock( - opCtx, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout); + auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock( + opCtx, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout); if (!scopedDistLock.isOK()) { return appendCommandStatus(result, scopedDistLock.getStatus()); } @@ -609,7 +608,7 @@ private: static CachedCollectionRoutingInfo createShardedOutputCollection(OperationContext* opCtx, const NamespaceString& nss, const BSONObjSet& splitPts) { - auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + auto const catalogClient = Grid::get(opCtx)->catalogClient(); auto const catalogCache = Grid::get(opCtx)->catalogCache(); // Enable sharding on the output db diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp index 96376ad2702..b6d5ebca214 100644 --- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp +++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp @@ -99,7 +99,7 @@ public: } const auto s = shardStatus.getValue(); - auto catalogClient = grid.catalogClient(opCtx); + auto catalogClient = grid.catalogClient(); StatusWith<ShardDrainingStatus> removeShardResult = catalogClient->removeShard(opCtx, s->getId()); if (!removeShardResult.isOK()) { diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp index e5474e6ce7b..9b9fc7b0d82 100644 --- a/src/mongo/s/commands/cluster_user_management_commands.cpp +++ b/src/mongo/s/commands/cluster_user_management_commands.cpp @@ -91,7 +91,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + return Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); } @@ -134,7 +134,7 @@ public: if (!status.isOK()) { return appendCommandStatus(result, status); } - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -183,7 +183,7 @@ public: if (!status.isOK()) { return appendCommandStatus(result, status); } - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -223,7 +223,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -270,7 +270,7 @@ public: if (!status.isOK()) { return appendCommandStatus(result, status); } - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -317,7 +317,7 @@ public: if (!status.isOK()) { return appendCommandStatus(result, status); } - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -361,7 +361,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + return Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, dbname, filterCommandRequestForPassthrough(cmdObj), &result); } @@ -395,7 +395,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + return Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); } @@ -429,7 +429,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -469,7 +469,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -509,7 +509,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -549,7 +549,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -589,7 +589,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -632,7 +632,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -676,7 +676,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); AuthorizationManager* authzManager = getGlobalAuthorizationManager(); @@ -720,7 +720,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand( + return Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, dbname, filterCommandRequestForPassthrough(cmdObj), &result); } @@ -808,7 +808,7 @@ public: const BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { - return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + return Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result); } @@ -898,7 +898,7 @@ public: string& errmsg, BSONObjBuilder& result) { // Run the authSchemaUpgrade command on the config servers - if (!Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand( + if (!Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand( opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result)) { return false; } diff --git a/src/mongo/s/commands/cluster_write.cpp b/src/mongo/s/commands/cluster_write.cpp index ecc68ae8b0c..4a6b88baa8a 100644 --- a/src/mongo/s/commands/cluster_write.cpp +++ b/src/mongo/s/commands/cluster_write.cpp @@ -247,7 +247,7 @@ void ClusterWriter::write(OperationContext* opCtx, // Config writes and shard writes are done differently if (nss.db() == NamespaceString::kConfigDb || nss.db() == NamespaceString::kAdminDb) { - Grid::get(opCtx)->catalogClient(opCtx)->writeConfigServerDirect(opCtx, *request, response); + Grid::get(opCtx)->catalogClient()->writeConfigServerDirect(opCtx, *request, response); } else { TargeterStats targeterStats; @@ -407,7 +407,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx, return false; auto collStatus = - Grid::get(opCtx)->catalogClient(opCtx)->getCollection(opCtx, manager->getns()); + Grid::get(opCtx)->catalogClient()->getCollection(opCtx, manager->getns()); if (!collStatus.isOK()) { log() << "Auto-split for " << nss << " failed to load collection metadata" << causedBy(redact(collStatus.getStatus())); diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp index 79cea4d8211..230e2515780 100644 --- a/src/mongo/s/config_server_catalog_cache_loader.cpp +++ b/src/mongo/s/config_server_catalog_cache_loader.cpp @@ -90,7 +90,7 @@ QueryAndSort createConfigDiffQuery(const NamespaceString& nss, ChunkVersion coll CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx, const NamespaceString& nss, ChunkVersion sinceVersion) { - const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx); + const auto catalogClient = Grid::get(opCtx)->catalogClient(); // Decide whether to do a full or partial load based on the state of the collection const auto coll = uassertStatusOK(catalogClient->getCollection(opCtx, nss.ns())).value; @@ -109,14 +109,14 @@ CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx, // Query the chunks which have changed std::vector<ChunkType> changedChunks; repl::OpTime opTime; - uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getChunks( - opCtx, - diffQuery.query, - diffQuery.sort, - boost::none, - &changedChunks, - &opTime, - repl::ReadConcernLevel::kMajorityReadConcern)); + uassertStatusOK( + Grid::get(opCtx)->catalogClient()->getChunks(opCtx, + diffQuery.query, + diffQuery.sort, + boost::none, + &changedChunks, + &opTime, + repl::ReadConcernLevel::kMajorityReadConcern)); uassert(ErrorCodes::ConflictingOperationInProgress, "No chunks were found for the collection", diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h index 4170e3ee152..293013f1aec 100644 --- a/src/mongo/s/grid.h +++ b/src/mongo/s/grid.h @@ -94,18 +94,10 @@ public: */ void setAllowLocalHost(bool allow); - /** - * Returns a pointer to a ShardingCatalogClient to use for accessing catalog data stored on the - * config servers. - */ - ShardingCatalogClient* catalogClient(OperationContext* opCtx) { + ShardingCatalogClient* catalogClient() { return _catalogClient.get(); } - /** - * Returns a pointer to a ShardingCatalogManager to use for manipulating catalog data stored on - * the config servers. - */ ShardingCatalogManager* catalogManager() { return _catalogManager.get(); } diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp index 67a6ef16c6e..db45eb41ba1 100644 --- a/src/mongo/s/server.cpp +++ b/src/mongo/s/server.cpp @@ -172,7 +172,7 @@ static void cleanupTask() { if (auto pool = Grid::get(opCtx)->getExecutorPool()) { pool->shutdownAndJoin(); } - if (auto catalog = Grid::get(opCtx)->catalogClient(opCtx)) { + if (auto catalog = Grid::get(opCtx)->catalogClient()) { catalog->shutDown(opCtx); } diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp index 7c1dddb4e2f..3dbaf4f19ea 100644 --- a/src/mongo/s/sharding_initialization.cpp +++ b/src/mongo/s/sharding_initialization.cpp @@ -210,7 +210,7 @@ Status initializeGlobalShardingState(OperationContext* opCtx, makeTaskExecutor(executor::makeNetworkInterface("AddShard-TaskExecutor"))); auto rawCatalogManager = catalogManager.get(); - auto grid = Grid::get(opCtx); + auto const grid = Grid::get(opCtx); grid->init( std::move(catalogClient), std::move(catalogManager), @@ -221,24 +221,21 @@ Status initializeGlobalShardingState(OperationContext* opCtx, std::move(executorPool), networkPtr); - // must be started once the grid is initialized + // The shard registry must be started once the grid is initialized grid->shardRegistry()->startup(opCtx); - auto status = rawCatalogClient->startup(); - if (!status.isOK()) { - return status; - } + grid->catalogClient()->startup(); if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { // Only config servers get a ShardingCatalogManager. - status = rawCatalogManager->startup(); + Status status = rawCatalogManager->startup(); if (!status.isOK()) { return status; } } auto keyManager = stdx::make_unique<KeysCollectionManager>( - kKeyManagerPurposeString, grid->catalogClient(opCtx), Seconds(KeysRotationIntervalSec)); + kKeyManagerPurposeString, grid->catalogClient(), Seconds(KeysRotationIntervalSec)); keyManager->startMonitoring(opCtx->getServiceContext()); LogicalTimeValidator::set(opCtx->getServiceContext(), diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp index 6ab6b1cb119..b7c1c0179fe 100644 --- a/src/mongo/s/sharding_mongod_test_fixture.cpp +++ b/src/mongo/s/sharding_mongod_test_fixture.cpp @@ -304,11 +304,8 @@ Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest( // Note: ShardRegistry::startup() is not called because it starts a task executor with a self- // rescheduling task to reload the ShardRegistry over the network. - if (Grid::get(operationContext())->catalogClient(operationContext())) { - auto status = Grid::get(operationContext())->catalogClient(operationContext())->startup(); - if (!status.isOK()) { - return status; - } + if (Grid::get(operationContext())->catalogClient()) { + Grid::get(operationContext())->catalogClient()->startup(); } if (Grid::get(operationContext())->catalogManager()) { @@ -332,10 +329,8 @@ void ShardingMongodTestFixture::tearDown() { Grid::get(operationContext())->catalogManager()->shutDown(operationContext()); } - if (Grid::get(operationContext())->catalogClient(operationContext())) { - Grid::get(operationContext()) - ->catalogClient(operationContext()) - ->shutDown(operationContext()); + if (Grid::get(operationContext())->catalogClient()) { + Grid::get(operationContext())->catalogClient()->shutDown(operationContext()); } Grid::get(operationContext())->clearForUnitTests(); @@ -347,8 +342,8 @@ void ShardingMongodTestFixture::tearDown() { } ShardingCatalogClient* ShardingMongodTestFixture::catalogClient() const { - invariant(Grid::get(operationContext())->catalogClient(operationContext())); - return Grid::get(operationContext())->catalogClient(operationContext()); + invariant(Grid::get(operationContext())->catalogClient()); + return Grid::get(operationContext())->catalogClient(); } ShardingCatalogManager* ShardingMongodTestFixture::catalogManager() const { diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp index c0ed6210782..6884cd4475e 100644 --- a/src/mongo/s/sharding_test_fixture.cpp +++ b/src/mongo/s/sharding_test_fixture.cpp @@ -148,7 +148,7 @@ void ShardingTestFixture::setUp() { std::unique_ptr<ShardingCatalogClientImpl> catalogClient( stdx::make_unique<ShardingCatalogClientImpl>(std::move(uniqueDistLockManager))); _catalogClient = catalogClient.get(); - catalogClient->startup().transitional_ignore(); + catalogClient->startup(); ConnectionString configCS = ConnectionString::forReplicaSet( "configRS", {HostAndPort{"TestHost1"}, HostAndPort{"TestHost2"}}); @@ -199,7 +199,7 @@ void ShardingTestFixture::setUp() { void ShardingTestFixture::tearDown() { Grid::get(operationContext())->getExecutorPool()->shutdownAndJoin(); - Grid::get(operationContext())->catalogClient(_opCtx.get())->shutDown(_opCtx.get()); + Grid::get(operationContext())->catalogClient()->shutDown(_opCtx.get()); Grid::get(operationContext())->clearForUnitTests(); _transportSession.reset(); @@ -213,7 +213,7 @@ void ShardingTestFixture::shutdownExecutor() { } ShardingCatalogClient* ShardingTestFixture::catalogClient() const { - return Grid::get(operationContext())->catalogClient(_opCtx.get()); + return Grid::get(operationContext())->catalogClient(); } ShardingCatalogClientImpl* ShardingTestFixture::getCatalogClient() const { diff --git a/src/mongo/s/sharding_uptime_reporter.cpp b/src/mongo/s/sharding_uptime_reporter.cpp index 8f668b34f56..f53db37a2ec 100644 --- a/src/mongo/s/sharding_uptime_reporter.cpp +++ b/src/mongo/s/sharding_uptime_reporter.cpp @@ -71,7 +71,7 @@ void reportStatus(OperationContext* opCtx, try { Grid::get(opCtx) - ->catalogClient(opCtx) + ->catalogClient() ->updateConfigDocument(opCtx, MongosType::ConfigNS, BSON(MongosType::name(instanceId)), |