From 23d351be938149019adb870b1d76f8e3e9fc847d Mon Sep 17 00:00:00 2001 From: Ramon Fernandez Date: Tue, 29 Mar 2016 18:01:31 -0400 Subject: Revert "SERVER-22937 Retry operations run through the ShardRegistry wherever possible" This reverts commit aa87323a825af440fe431ae82b1fc37adfd20e2c. --- src/mongo/s/balance.cpp | 6 +- src/mongo/s/balancer_policy.cpp | 12 ++-- src/mongo/s/catalog/catalog_manager_common.cpp | 18 +++--- .../replset/catalog_manager_replica_set.cpp | 68 ++++++++++---------- .../catalog/replset/catalog_manager_replica_set.h | 9 +++ .../catalog_manager_replica_set_add_shard_test.cpp | 10 ++- .../s/catalog/replset/dist_lock_catalog_impl.cpp | 2 +- src/mongo/s/chunk.cpp | 2 +- src/mongo/s/chunk_manager.cpp | 2 +- src/mongo/s/client/shard_registry.cpp | 72 ++++++++++++++-------- src/mongo/s/client/shard_registry.h | 68 ++++++++++++-------- src/mongo/s/commands/cluster_fsync_cmd.cpp | 2 +- .../s/commands/cluster_list_databases_cmd.cpp | 2 +- .../commands/cluster_user_management_commands.cpp | 4 +- src/mongo/s/shard_util.cpp | 12 ++-- 15 files changed, 166 insertions(+), 123 deletions(-) diff --git a/src/mongo/s/balance.cpp b/src/mongo/s/balance.cpp index 2f37268af74..a4fa019772c 100644 --- a/src/mongo/s/balance.cpp +++ b/src/mongo/s/balance.cpp @@ -278,7 +278,7 @@ bool Balancer::_checkOIDs(OperationContext* txn) { continue; } - BSONObj f = uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard( + BSONObj f = uassertStatusOK(grid.shardRegistry()->runCommandOnShard( txn, s, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, @@ -292,7 +292,7 @@ bool Balancer::_checkOIDs(OperationContext* txn) { log() << "error: 2 machines have " << x << " as oid machine piece: " << shardId << " and " << oids[x]; - uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard( + uassertStatusOK(grid.shardRegistry()->runCommandOnShard( txn, s, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, @@ -301,7 +301,7 @@ bool Balancer::_checkOIDs(OperationContext* txn) { const auto otherShard = grid.shardRegistry()->getShard(txn, oids[x]); if (otherShard) { - uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard( + uassertStatusOK(grid.shardRegistry()->runCommandOnShard( txn, otherShard, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, diff --git a/src/mongo/s/balancer_policy.cpp b/src/mongo/s/balancer_policy.cpp index f8ff4c49f65..473e13ed6c9 100644 --- a/src/mongo/s/balancer_policy.cpp +++ b/src/mongo/s/balancer_policy.cpp @@ -69,12 +69,12 @@ namespace { std::string retrieveShardMongoDVersion(OperationContext* txn, ShardId shardId, ShardRegistry* shardRegistry) { - BSONObj serverStatus = uassertStatusOK(shardRegistry->runIdempotentCommandOnShard( - txn, - shardId, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - "admin", - BSON("serverStatus" << 1))); + BSONObj serverStatus = uassertStatusOK( + shardRegistry->runCommandOnShard(txn, + shardId, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + "admin", + BSON("serverStatus" << 1))); BSONElement versionElement = serverStatus["version"]; if (versionElement.type() != String) { uassertStatusOK({ErrorCodes::NoSuchKey, "version field not found in serverStatus"}); diff --git a/src/mongo/s/catalog/catalog_manager_common.cpp b/src/mongo/s/catalog/catalog_manager_common.cpp index 82c2a863fa9..d8943f626d3 100644 --- a/src/mongo/s/catalog/catalog_manager_common.cpp +++ b/src/mongo/s/catalog/catalog_manager_common.cpp @@ -111,7 +111,7 @@ StatusWith validateHostAsShard(OperationContext* txn, const ReadPreferenceSetting readPref{ReadPreference::PrimaryOnly}; // Is it mongos? - auto cmdStatus = shardRegistry->runIdempotentCommandForAddShard( + auto cmdStatus = shardRegistry->runCommandForAddShard( txn, shardConn, readPref, "admin", BSON("isdbgrid" << 1)); if (!cmdStatus.isOK()) { return cmdStatus.getStatus(); @@ -123,7 +123,7 @@ StatusWith validateHostAsShard(OperationContext* txn, } // Is it a replica set? - cmdStatus = shardRegistry->runIdempotentCommandForAddShard( + cmdStatus = shardRegistry->runCommandForAddShard( txn, shardConn, readPref, "admin", BSON("isMaster" << 1)); if (!cmdStatus.isOK()) { return cmdStatus.getStatus(); @@ -157,7 +157,7 @@ StatusWith validateHostAsShard(OperationContext* txn, } // Is it a mongos config server? - cmdStatus = shardRegistry->runIdempotentCommandForAddShard( + cmdStatus = shardRegistry->runCommandForAddShard( txn, shardConn, readPref, "admin", BSON("replSetGetStatus" << 1)); if (!cmdStatus.isOK()) { return cmdStatus.getStatus(); @@ -260,12 +260,12 @@ StatusWith> getDBNamesListFromShard( shardRegistry->createConnection(connectionString).release()}; invariant(shardConn); - auto cmdStatus = shardRegistry->runIdempotentCommandForAddShard( - txn, - shardConn, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - "admin", - BSON("listDatabases" << 1)); + auto cmdStatus = + shardRegistry->runCommandForAddShard(txn, + shardConn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + "admin", + BSON("listDatabases" << 1)); if (!cmdStatus.isOK()) { return cmdStatus.getStatus(); } diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp index ccefd01d7f0..9179b0f3c0a 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp @@ -220,12 +220,8 @@ Status CatalogManagerReplicaSet::shardCollection(OperationContext* txn, manager->getVersion(), true); - auto ssvStatus = grid.shardRegistry()->runIdempotentCommandOnShard( - txn, - dbPrimaryShardId, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - "admin", - ssv.toBSON()); + auto ssvStatus = grid.shardRegistry()->runCommandWithNotMasterRetries( + txn, dbPrimaryShardId, "admin", ssv.toBSON()); if (!ssvStatus.isOK()) { warning() << "could not update initial version of " << ns << " on shard primary " << dbPrimaryShardId << ssvStatus.getStatus(); @@ -495,12 +491,8 @@ Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const Nam auto* shardRegistry = grid.shardRegistry(); for (const auto& shardEntry : allShards) { - auto dropResult = shardRegistry->runIdempotentCommandOnShard( - txn, - shardEntry.getName(), - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - ns.db().toString(), - BSON("drop" << ns.coll())); + auto dropResult = shardRegistry->runCommandWithNotMasterRetries( + txn, shardEntry.getName(), ns.db().toString(), BSON("drop" << ns.coll())); if (!dropResult.isOK()) { return dropResult.getStatus(); @@ -564,12 +556,8 @@ Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const Nam ChunkVersion::DROPPED(), true); - auto ssvResult = shardRegistry->runIdempotentCommandOnShard( - txn, - shardEntry.getName(), - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - "admin", - ssv.toBSON()); + auto ssvResult = shardRegistry->runCommandWithNotMasterRetries( + txn, shardEntry.getName(), "admin", ssv.toBSON()); if (!ssvResult.isOK()) { return ssvResult.getStatus(); @@ -580,12 +568,8 @@ Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const Nam return ssvStatus; } - auto unsetShardingStatus = shardRegistry->runIdempotentCommandOnShard( - txn, - shardEntry.getName(), - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - "admin", - BSON("unsetSharding" << 1)); + auto unsetShardingStatus = shardRegistry->runCommandWithNotMasterRetries( + txn, shardEntry.getName(), "admin", BSON("unsetSharding" << 1)); if (!unsetShardingStatus.isOK()) { return unsetShardingStatus.getStatus(); @@ -867,8 +851,7 @@ bool CatalogManagerReplicaSet::runReadCommandForTest(OperationContext* txn, cmdBuilder.appendElements(cmdObj); _appendReadConcern(&cmdBuilder); - auto resultStatus = grid.shardRegistry()->runIdempotentCommandOnConfig( - txn, kConfigReadSelector, dbname, cmdBuilder.done()); + auto resultStatus = _runReadCommand(txn, dbname, cmdBuilder.done(), kConfigReadSelector); if (resultStatus.isOK()) { result->appendElements(resultStatus.getValue()); return Command::getStatusFromCommandResult(resultStatus.getValue()).isOK(); @@ -881,8 +864,7 @@ bool CatalogManagerReplicaSet::runUserManagementReadCommand(OperationContext* tx const std::string& dbname, const BSONObj& cmdObj, BSONObjBuilder* result) { - auto resultStatus = grid.shardRegistry()->runIdempotentCommandOnConfig( - txn, kConfigPrimaryPreferredSelector, dbname, cmdObj); + auto resultStatus = _runReadCommand(txn, dbname, cmdObj, kConfigPrimaryPreferredSelector); if (resultStatus.isOK()) { result->appendElements(resultStatus.getValue()); return Command::getStatusFromCommandResult(resultStatus.getValue()).isOK(); @@ -1253,8 +1235,8 @@ StatusWith CatalogManagerReplicaSet::_runCountCommandOnConfig(Operati countBuilder.append("query", query); _appendReadConcern(&countBuilder); - auto resultStatus = grid.shardRegistry()->runIdempotentCommandOnConfig( - txn, kConfigReadSelector, ns.db().toString(), countBuilder.done()); + auto resultStatus = + _runReadCommand(txn, ns.db().toString(), countBuilder.done(), kConfigReadSelector); if (!resultStatus.isOK()) { return resultStatus.getStatus(); } @@ -1405,10 +1387,32 @@ void CatalogManagerReplicaSet::_appendReadConcern(BSONObjBuilder* builder) { readConcern.appendInfo(builder); } +StatusWith CatalogManagerReplicaSet::_runReadCommand( + OperationContext* txn, + const std::string& dbname, + const BSONObj& cmdObj, + const ReadPreferenceSetting& readPref) { + for (int retry = 1; retry <= kMaxReadRetry; ++retry) { + auto response = grid.shardRegistry()->runCommandOnConfig(txn, readPref, dbname, cmdObj); + if (response.isOK()) { + return response; + } + + if (ShardRegistry::kAllRetriableErrors.count(response.getStatus().code()) && + retry < kMaxReadRetry) { + continue; + } + + return response.getStatus(); + } + + MONGO_UNREACHABLE; +} + Status CatalogManagerReplicaSet::appendInfoForConfigServerDatabases(OperationContext* txn, BSONArrayBuilder* builder) { - auto resultStatus = grid.shardRegistry()->runIdempotentCommandOnConfig( - txn, kConfigPrimaryPreferredSelector, "admin", BSON("listDatabases" << 1)); + auto resultStatus = + _runReadCommand(txn, "admin", BSON("listDatabases" << 1), kConfigPrimaryPreferredSelector); if (!resultStatus.isOK()) { return resultStatus.getStatus(); diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h index 29a592724c7..52feade2277 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h @@ -167,6 +167,15 @@ private: StringData collName, int cappedSize) override; + /** + * Helper method for running a read command against the config server. Automatically retries on + * NotMaster and network errors, so these will never be returned. + */ + StatusWith _runReadCommand(OperationContext* txn, + const std::string& dbname, + const BSONObj& cmdObj, + const ReadPreferenceSetting& readPref); + /** * Executes the specified batch write command on the current config server's primary and retries * on the specified set of errors using the default retry policy. diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp index 0d8cbfa7aae..cdfede01e91 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp @@ -485,12 +485,10 @@ TEST_F(AddShardTest, UnreachableHost) { ASSERT_EQUALS("host unreachable", status.getStatus().reason()); }); - for (int i = 0; i < 3; i++) { // ShardRegistry will retry 3 times - onCommandForAddShard([](const RemoteCommandRequest& request) { - ASSERT_EQ(request.target, HostAndPort("StandaloneHost:12345")); - return StatusWith{ErrorCodes::HostUnreachable, "host unreachable"}; - }); - } + onCommandForAddShard([](const RemoteCommandRequest& request) { + ASSERT_EQ(request.target, HostAndPort("StandaloneHost:12345")); + return StatusWith{ErrorCodes::HostUnreachable, "host unreachable"}; + }); future.timed_get(kFutureTimeout); } diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp index e1eebf80f26..a617ef1a670 100644 --- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp +++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp @@ -342,7 +342,7 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string& StatusWith DistLockCatalogImpl::getServerInfo(OperationContext* txn) { auto resultStatus = - _client->runIdempotentCommandOnConfig(txn, kReadPref, "admin", BSON("serverStatus" << 1)); + _client->runCommandOnConfig(txn, kReadPref, "admin", BSON("serverStatus" << 1)); if (!resultStatus.isOK()) { return resultStatus.getStatus(); diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp index b0f7ac78da6..3c0c72f750c 100644 --- a/src/mongo/s/chunk.cpp +++ b/src/mongo/s/chunk.cpp @@ -310,7 +310,7 @@ void Chunk::pickSplitVector(OperationContext* txn, BSONObj cmdObj = cmd.obj(); - auto result = grid.shardRegistry()->runIdempotentCommandOnShard( + auto result = grid.shardRegistry()->runCommandOnShard( txn, getShardId(), ReadPreferenceSetting{ReadPreference::PrimaryPreferred}, diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp index fcb21b5002c..3bca3bfe94c 100644 --- a/src/mongo/s/chunk_manager.cpp +++ b/src/mongo/s/chunk_manager.cpp @@ -358,7 +358,7 @@ void ChunkManager::calcInitSplitsAndShards(OperationContext* txn, // discover split points const auto primaryShard = grid.shardRegistry()->getShard(txn, primaryShardId); const NamespaceString nss{getns()}; - auto result = grid.shardRegistry()->runIdempotentCommandOnShard( + auto result = grid.shardRegistry()->runCommandOnShard( txn, primaryShard, ReadPreferenceSetting{ReadPreference::PrimaryPreferred}, diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp index 486e91cdbf4..9ebacebfcf0 100644 --- a/src/mongo/s/client/shard_registry.cpp +++ b/src/mongo/s/client/shard_registry.cpp @@ -645,12 +645,11 @@ StatusWith ShardRegistry::exhaustiveFindOnConfig( MONGO_UNREACHABLE; } -StatusWith ShardRegistry::runIdempotentCommandOnShard( - OperationContext* txn, - const std::shared_ptr& shard, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj) { +StatusWith ShardRegistry::runCommandOnShard(OperationContext* txn, + const std::shared_ptr& shard, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj) { auto response = _runCommandWithRetries(txn, _executorPool->getFixedExecutor(), shard, @@ -660,7 +659,7 @@ StatusWith ShardRegistry::runIdempotentCommandOnShard( readPref.pref == ReadPreference::PrimaryOnly ? rpc::makeEmptyMetadata() : kSecondaryOkMetadata, - kAllRetriableErrors); + kNotMasterErrors); if (!response.isOK()) { return response.getStatus(); } @@ -668,26 +667,24 @@ StatusWith ShardRegistry::runIdempotentCommandOnShard( return response.getValue().response; } -StatusWith ShardRegistry::runIdempotentCommandOnShard( - OperationContext* txn, - ShardId shardId, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj) { +StatusWith ShardRegistry::runCommandOnShard(OperationContext* txn, + ShardId shardId, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj) { auto shard = getShard(txn, shardId); if (!shard) { return {ErrorCodes::ShardNotFound, str::stream() << "shard " << shardId << " not found"}; } - return runIdempotentCommandOnShard(txn, shard, readPref, dbName, cmdObj); + return runCommandOnShard(txn, shard, readPref, dbName, cmdObj); } -StatusWith ShardRegistry::runIdempotentCommandForAddShard( - OperationContext* txn, - const std::shared_ptr& shard, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj) { +StatusWith ShardRegistry::runCommandForAddShard(OperationContext* txn, + const std::shared_ptr& shard, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj) { auto status = _runCommandWithRetries(txn, _executorForAddShard.get(), shard, @@ -697,7 +694,7 @@ StatusWith ShardRegistry::runIdempotentCommandForAddShard( readPref.pref == ReadPreference::PrimaryOnly ? rpc::makeEmptyMetadata() : kSecondaryOkMetadata, - kAllRetriableErrors); + kNotMasterErrors); if (!status.isOK()) { return status.getStatus(); } @@ -705,11 +702,10 @@ StatusWith ShardRegistry::runIdempotentCommandForAddShard( return status.getValue().response; } -StatusWith ShardRegistry::runIdempotentCommandOnConfig( - OperationContext* txn, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj) { +StatusWith ShardRegistry::runCommandOnConfig(OperationContext* txn, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj) { auto response = _runCommandWithRetries( txn, _executorPool->getFixedExecutor(), @@ -718,7 +714,7 @@ StatusWith ShardRegistry::runIdempotentCommandOnConfig( dbName, cmdObj, readPref.pref == ReadPreference::PrimaryOnly ? kReplMetadata : kReplSecondaryOkMetadata, - kAllRetriableErrors); + kNotMasterErrors); if (!response.isOK()) { return response.getStatus(); @@ -727,6 +723,28 @@ StatusWith ShardRegistry::runIdempotentCommandOnConfig( return response.getValue().response; } +StatusWith ShardRegistry::runCommandWithNotMasterRetries(OperationContext* txn, + const ShardId& shardId, + const std::string& dbname, + const BSONObj& cmdObj) { + auto shard = getShard(txn, shardId); + invariant(!shard->isConfig()); + + auto response = _runCommandWithRetries(txn, + _executorPool->getFixedExecutor(), + shard, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + dbname, + cmdObj, + rpc::makeEmptyMetadata(), + kNotMasterErrors); + if (!response.isOK()) { + return response.getStatus(); + } + + return response.getValue().response; +} + StatusWith ShardRegistry::runCommandOnConfigWithRetries( OperationContext* txn, const std::string& dbname, diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h index 3cfcfc0dac6..73fa8feee35 100644 --- a/src/mongo/s/client/shard_registry.h +++ b/src/mongo/s/client/shard_registry.h @@ -243,41 +243,55 @@ public: /** * Runs a command against a host belonging to the specified shard and matching the given * readPref, and returns the result. It is the responsibility of the caller to check the - * returned BSON for command-specific failures. It is also important that the command is safe - * to be retried in case we cannot verify whether or not it ran successfully. + * returned BSON for command-specific failures. */ - StatusWith runIdempotentCommandOnShard(OperationContext* txn, - const std::shared_ptr& shard, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj); - StatusWith runIdempotentCommandOnShard(OperationContext* txn, - ShardId shardId, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj); + StatusWith runCommandOnShard(OperationContext* txn, + const std::shared_ptr& shard, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj); + StatusWith runCommandOnShard(OperationContext* txn, + ShardId shardId, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj); /** - * Same as runIdempotentCommandOnShard above but used for talking to nodes that are not yet in - * the ShardRegistry. + * Same as runCommandOnShard above but used for talking to nodes that are not yet in the + * ShardRegistry. */ - StatusWith runIdempotentCommandForAddShard(OperationContext* txn, - const std::shared_ptr& shard, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj); + StatusWith runCommandForAddShard(OperationContext* txn, + const std::shared_ptr& shard, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj); /** - * Runs command against a config server that matches the given read preference, and returns - * the result. It is the responsibility of the caller to check the returned BSON - * for command-specific failures. It is also important that the command is safe to be retried - * in case we cannot verify whether or not it ran successfully. + * Runs a command against a config server that matches the given read preference, and returns + * the result. It is the responsibility of the caller to check the returned BSON for + * command-specific failures. */ - StatusWith runIdempotentCommandOnConfig(OperationContext* txn, - const ReadPreferenceSetting& readPref, - const std::string& dbname, - const BSONObj& cmdObj); + StatusWith runCommandOnConfig(OperationContext* txn, + const ReadPreferenceSetting& readPref, + const std::string& dbname, + const BSONObj& cmdObj); + + /** + * Helpers for running commands against a given shard with logic for retargeting and + * retrying the command in the event of a NotMaster response. + * Returns ErrorCodes::NotMaster if after the max number of retries we still haven't + * successfully delivered the command to a primary. Can also return a non-ok status in the + * event of a network error communicating with the shard. If we are able to get + * a valid response from running the command then we will return it, even if the command + * response indicates failure. Thus the caller is responsible for checking the command + * response object for any kind of command-specific failure. The only exception is + * NotMaster errors, which we intercept and follow the rules described above for handling. + */ + StatusWith runCommandWithNotMasterRetries(OperationContext* txn, + const ShardId& shard, + const std::string& dbname, + const BSONObj& cmdObj); class ErrorCodesHash { public: diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp index 732c75c2866..0f4cea8c754 100644 --- a/src/mongo/s/commands/cluster_fsync_cmd.cpp +++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp @@ -91,7 +91,7 @@ public: continue; } - BSONObj x = uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard( + BSONObj x = uassertStatusOK(grid.shardRegistry()->runCommandOnShard( txn, s, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp index 23dad76f0a3..98dcbe593dc 100644 --- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp +++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp @@ -99,7 +99,7 @@ public: continue; } - BSONObj x = uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard( + BSONObj x = uassertStatusOK(grid.shardRegistry()->runCommandOnShard( txn, s, ReadPreferenceSetting{ReadPreference::PrimaryPreferred}, diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp index a3d98a0fc0a..cedb28b3c87 100644 --- a/src/mongo/s/commands/cluster_user_management_commands.cpp +++ b/src/mongo/s/commands/cluster_user_management_commands.cpp @@ -829,8 +829,8 @@ Status runUpgradeOnAllShards(OperationContext* txn, int maxSteps, const BSONObj& shardRegistry->getAllShardIds(&shardIds); for (const auto& shardId : shardIds) { - auto cmdResult = shardRegistry->runIdempotentCommandOnShard( - txn, shardId, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, "admin", cmdObj); + auto cmdResult = + shardRegistry->runCommandWithNotMasterRetries(txn, shardId, "admin", cmdObj); if (!cmdResult.isOK()) { return Status(cmdResult.getStatus().code(), diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp index 777c33db427..8b7ced7748e 100644 --- a/src/mongo/s/shard_util.cpp +++ b/src/mongo/s/shard_util.cpp @@ -42,12 +42,12 @@ namespace shardutil { StatusWith retrieveTotalShardSize(OperationContext* txn, ShardId shardId, ShardRegistry* shardRegistry) { - auto listDatabasesStatus = shardRegistry->runIdempotentCommandOnShard( - txn, - shardId, - ReadPreferenceSetting{ReadPreference::PrimaryPreferred}, - "admin", - BSON("listDatabases" << 1)); + auto listDatabasesStatus = + shardRegistry->runCommandOnShard(txn, + shardId, + ReadPreferenceSetting{ReadPreference::PrimaryPreferred}, + "admin", + BSON("listDatabases" << 1)); if (!listDatabasesStatus.isOK()) { return listDatabasesStatus.getStatus(); } -- cgit v1.2.1