summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSpencer T Brody <spencer@mongodb.com>2016-08-26 17:47:07 -0400
committerSpencer T Brody <spencer@mongodb.com>2016-08-26 18:23:45 -0400
commit7614c0eb2449eb4ec22d21b677177124d61f1888 (patch)
tree270f40e904bec17cbb30f696450e3401cfe03172
parentff52022e5cd7ff7c75daf24c8760f0377785ba1e (diff)
downloadmongo-7614c0eb2449eb4ec22d21b677177124d61f1888.tar.gz
SERVER-25832 Rename Shard::runCommand to Shard::runCommandWithFixedRetryAttempts
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp2
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp13
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp13
-rw-r--r--src/mongo/s/balancer/balancer.cpp37
-rw-r--r--src/mongo/s/balancer/cluster_statistics_impl.cpp11
-rw-r--r--src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp85
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp123
-rw-r--r--src/mongo/s/chunk_manager.cpp12
-rw-r--r--src/mongo/s/client/shard.cpp22
-rw-r--r--src/mongo/s/client/shard.h25
-rw-r--r--src/mongo/s/client/shard_local_test.cpp22
-rw-r--r--src/mongo/s/commands/cluster_add_shard_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_control_balancer_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_fsync_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp12
-rw-r--r--src/mongo/s/config_server_client.cpp14
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp24
-rw-r--r--src/mongo/s/shard_util.cpp48
23 files changed, 311 insertions, 248 deletions
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index 59b234f5eae..ece43e1b676 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -273,7 +273,7 @@ public:
}
auto applyOpsCommandResponse =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
+ Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
nss.db().toString(),
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index f1bb4701e34..9fdd4f04a83 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -320,12 +320,13 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeCommitMigration);
- auto commitChunkMigrationResponse = grid.shardRegistry()->getConfigShard()->runCommand(
- txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- builder.obj(),
- Shard::RetryPolicy::kIdempotent);
+ auto commitChunkMigrationResponse =
+ grid.shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ builder.obj(),
+ Shard::RetryPolicy::kIdempotent);
if (MONGO_FAIL_POINT(failCommitMigrationCommand)) {
commitChunkMigrationResponse = Status(
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index 8f40c2fcf94..f4ceaf19855 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -354,12 +354,13 @@ public:
auto configCmdObj = request.toConfigCommandBSON(
BSON(WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
- auto cmdResponseStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
- txn,
- kPrimaryOnlyReadPreference,
- "admin",
- configCmdObj,
- Shard::RetryPolicy::kIdempotent);
+ auto cmdResponseStatus =
+ Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ kPrimaryOnlyReadPreference,
+ "admin",
+ configCmdObj,
+ Shard::RetryPolicy::kIdempotent);
//
// Refresh chunk metadata regardless of whether or not the split succeeded
diff --git a/src/mongo/s/balancer/balancer.cpp b/src/mongo/s/balancer/balancer.cpp
index 6296d42cb88..209cea777b3 100644
--- a/src/mongo/s/balancer/balancer.cpp
+++ b/src/mongo/s/balancer/balancer.cpp
@@ -450,12 +450,12 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
}
const auto s = shardStatus.getValue();
- auto result =
- uassertStatusOK(s->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("features" << 1),
- Shard::RetryPolicy::kIdempotent));
+ auto result = uassertStatusOK(
+ s->runCommandWithFixedRetryAttempts(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("features" << 1),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(result.commandStatus);
BSONObj f = std::move(result.response);
@@ -467,22 +467,23 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
log() << "error: 2 machines have " << x << " as oid machine piece: " << shardId
<< " and " << oids[x];
- result = uassertStatusOK(
- s->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("features" << 1 << "oidReset" << 1),
- Shard::RetryPolicy::kIdempotent));
+ result = uassertStatusOK(s->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("features" << 1 << "oidReset" << 1),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(result.commandStatus);
auto otherShardStatus = shardingContext->shardRegistry()->getShard(txn, oids[x]);
if (otherShardStatus.isOK()) {
- result = uassertStatusOK(otherShardStatus.getValue()->runCommand(
- txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("features" << 1 << "oidReset" << 1),
- Shard::RetryPolicy::kIdempotent));
+ result = uassertStatusOK(
+ otherShardStatus.getValue()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("features" << 1 << "oidReset" << 1),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(result.commandStatus);
}
diff --git a/src/mongo/s/balancer/cluster_statistics_impl.cpp b/src/mongo/s/balancer/cluster_statistics_impl.cpp
index 8f229997cb2..b6e734c6fc2 100644
--- a/src/mongo/s/balancer/cluster_statistics_impl.cpp
+++ b/src/mongo/s/balancer/cluster_statistics_impl.cpp
@@ -68,11 +68,12 @@ StatusWith<string> retrieveShardMongoDVersion(OperationContext* txn, ShardId sha
}
auto shard = shardStatus.getValue();
- auto commandResponse = shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("serverStatus" << 1),
- Shard::RetryPolicy::kIdempotent);
+ auto commandResponse =
+ shard->runCommandWithFixedRetryAttempts(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("serverStatus" << 1),
+ Shard::RetryPolicy::kIdempotent);
if (!commandResponse.isOK()) {
return commandResponse.getStatus();
}
diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp
index b88ca069558..0d45188c3de 100644
--- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp
@@ -178,13 +178,13 @@ Status DistLockCatalogImpl::ping(OperationContext* txn, StringData processID, Da
request.setUpsert(true);
request.setWriteConcern(kMajorityWriteConcern);
- auto resultStatus =
- _client->getConfigShard()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- _locksNS.db().toString(),
- request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kNotIdempotent);
+ auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ _locksNS.db().toString(),
+ request.toBSON(),
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kNotIdempotent);
auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus));
return findAndModifyStatus.getStatus();
@@ -214,7 +214,7 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
request.setShouldReturnNew(true);
request.setWriteConcern(kMajorityWriteConcern);
- auto resultStatus = _client->getConfigShard()->runCommand(
+ auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
@@ -271,13 +271,13 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
request.setShouldReturnNew(true);
request.setWriteConcern(kMajorityWriteConcern);
- auto resultStatus =
- _client->getConfigShard()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- _locksNS.db().toString(),
- request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kNotIdempotent);
+ auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ _locksNS.db().toString(),
+ request.toBSON(),
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kNotIdempotent);
auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus));
if (!findAndModifyStatus.isOK()) {
@@ -316,13 +316,13 @@ Status DistLockCatalogImpl::unlock(OperationContext* txn,
}
Status DistLockCatalogImpl::_unlock(OperationContext* txn, const FindAndModifyRequest& request) {
- auto resultStatus =
- _client->getConfigShard()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- _locksNS.db().toString(),
- request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent);
+ auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ _locksNS.db().toString(),
+ request.toBSON(),
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kIdempotent);
auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus));
if (findAndModifyStatus == ErrorCodes::LockStateChangeFailed) {
@@ -351,13 +351,13 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string&
BSONObj cmdObj = request.toBSON();
- auto response =
- _client->getConfigShard()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- _locksNS.db().toString(),
- cmdObj,
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent);
+ auto response = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ _locksNS.db().toString(),
+ cmdObj,
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kIdempotent);
if (!response.isOK()) {
return response.getStatus();
@@ -382,12 +382,13 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string&
}
StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogImpl::getServerInfo(OperationContext* txn) {
- auto resultStatus = _client->getConfigShard()->runCommand(txn,
- kReadPref,
- "admin",
- BSON("serverStatus" << 1),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent);
+ auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ kReadPref,
+ "admin",
+ BSON("serverStatus" << 1),
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kIdempotent);
if (!resultStatus.isOK()) {
return resultStatus.getStatus();
@@ -473,13 +474,13 @@ Status DistLockCatalogImpl::stopPing(OperationContext* txn, StringData processId
FindAndModifyRequest::makeRemove(_lockPingNS, BSON(LockpingsType::process() << processId));
request.setWriteConcern(kMajorityWriteConcern);
- auto resultStatus =
- _client->getConfigShard()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- _locksNS.db().toString(),
- request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kNotIdempotent);
+ auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ _locksNS.db().toString(),
+ request.toBSON(),
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kNotIdempotent);
auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus));
return findAndModifyStatus.getStatus();
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
index 6f7315272ce..bd037972976 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
@@ -508,11 +508,12 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
}
auto shard = shardStatus.getValue();
- auto ssvResponse = shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- ssv.toBSON(),
- Shard::RetryPolicy::kIdempotent);
+ auto ssvResponse =
+ shard->runCommandWithFixedRetryAttempts(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ ssv.toBSON(),
+ Shard::RetryPolicy::kIdempotent);
auto status = ssvResponse.isOK() ? std::move(ssvResponse.getValue().commandStatus)
: std::move(ssvResponse.getStatus());
if (!status.isOK()) {
@@ -815,7 +816,7 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
- auto dropResult = shardStatus.getValue()->runCommand(
+ auto dropResult = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
ns.db().toString(),
@@ -903,11 +904,12 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
}
auto shard = shardStatus.getValue();
- auto ssvResult = shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- ssv.toBSON(),
- Shard::RetryPolicy::kIdempotent);
+ auto ssvResult = shard->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ ssv.toBSON(),
+ Shard::RetryPolicy::kIdempotent);
if (!ssvResult.isOK()) {
return ssvResult.getStatus();
@@ -918,12 +920,12 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
return ssvStatus;
}
- auto unsetShardingStatus =
- shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("unsetSharding" << 1),
- Shard::RetryPolicy::kIdempotent);
+ auto unsetShardingStatus = shard->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("unsetSharding" << 1),
+ Shard::RetryPolicy::kIdempotent);
if (!unsetShardingStatus.isOK()) {
return unsetShardingStatus.getStatus();
@@ -1241,13 +1243,14 @@ bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext*
cmdToRun = modifiedCmd.obj();
}
- auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
- txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- dbname,
- cmdToRun,
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kNotIdempotent);
+ auto response =
+ Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ dbname,
+ cmdToRun,
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kNotIdempotent);
if (!response.isOK()) {
return Command::appendCommandStatus(*result, response.getStatus());
@@ -1271,8 +1274,9 @@ bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* txn,
cmdBuilder.appendElements(cmdObj);
_appendReadConcern(&cmdBuilder);
- auto resultStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
- txn, kConfigReadSelector, dbname, cmdBuilder.done(), Shard::RetryPolicy::kIdempotent);
+ auto resultStatus =
+ Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn, kConfigReadSelector, dbname, cmdBuilder.done(), Shard::RetryPolicy::kIdempotent);
if (resultStatus.isOK()) {
result->appendElements(resultStatus.getValue().response);
return resultStatus.getValue().commandStatus.isOK();
@@ -1285,13 +1289,14 @@ bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* t
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
- auto resultStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
- txn,
- kConfigPrimaryPreferredSelector,
- dbname,
- cmdObj,
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent);
+ auto resultStatus =
+ Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ kConfigPrimaryPreferredSelector,
+ dbname,
+ cmdObj,
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kIdempotent);
if (resultStatus.isOK()) {
result->appendElements(resultStatus.getValue().response);
return resultStatus.getValue().commandStatus.isOK();
@@ -1314,12 +1319,13 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* txn,
<< WriteConcernOptions::kWriteConcernField
<< writeConcern.toBSON());
- auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
- txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "config",
- cmd,
- Shard::RetryPolicy::kIdempotent);
+ auto response =
+ Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "config",
+ cmd,
+ Shard::RetryPolicy::kIdempotent);
if (!response.isOK()) {
return response.getStatus();
@@ -1601,13 +1607,14 @@ Status ShardingCatalogClientImpl::_createCappedConfigCollection(
<< WriteConcernOptions::kWriteConcernField
<< writeConcern.toBSON());
- auto result = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
- txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "config",
- createCmd,
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent);
+ auto result =
+ Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "config",
+ createCmd,
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kIdempotent);
if (!result.isOK()) {
return result.getStatus();
@@ -1637,12 +1644,13 @@ StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(Operat
_appendReadConcern(&countBuilder);
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- auto resultStatus = configShard->runCommand(txn,
- kConfigReadSelector,
- ns.db().toString(),
- countBuilder.done(),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent);
+ auto resultStatus =
+ configShard->runCommandWithFixedRetryAttempts(txn,
+ kConfigReadSelector,
+ ns.db().toString(),
+ countBuilder.done(),
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kIdempotent);
if (!resultStatus.isOK()) {
return resultStatus.getStatus();
}
@@ -1688,11 +1696,12 @@ void ShardingCatalogClientImpl::_appendReadConcern(BSONObjBuilder* builder) {
Status ShardingCatalogClientImpl::appendInfoForConfigServerDatabases(OperationContext* txn,
BSONArrayBuilder* builder) {
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- auto resultStatus = configShard->runCommand(txn,
- kConfigPrimaryPreferredSelector,
- "admin",
- BSON("listDatabases" << 1),
- Shard::RetryPolicy::kIdempotent);
+ auto resultStatus =
+ configShard->runCommandWithFixedRetryAttempts(txn,
+ kConfigPrimaryPreferredSelector,
+ "admin",
+ BSON("listDatabases" << 1),
+ Shard::RetryPolicy::kIdempotent);
if (!resultStatus.isOK()) {
return resultStatus.getStatus();
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 43bd27ee42f..bf9998a48a5 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -384,12 +384,12 @@ void ChunkManager::calcInitSplitsAndShards(OperationContext* txn,
auto primaryShard = uassertStatusOK(grid.shardRegistry()->getShard(txn, primaryShardId));
const NamespaceString nss{getns()};
- auto result = uassertStatusOK(
- primaryShard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- nss.db().toString(),
- BSON("count" << nss.coll()),
- Shard::RetryPolicy::kIdempotent));
+ auto result = uassertStatusOK(primaryShard->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ nss.db().toString(),
+ BSON("count" << nss.coll()),
+ Shard::RetryPolicy::kIdempotent));
long long numObjects = 0;
uassertStatusOK(result.commandStatus);
diff --git a/src/mongo/s/client/shard.cpp b/src/mongo/s/client/shard.cpp
index 445b63b3dbe..622c6ea7c50 100644
--- a/src/mongo/s/client/shard.cpp
+++ b/src/mongo/s/client/shard.cpp
@@ -106,7 +106,7 @@ StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn,
const std::string& dbName,
const BSONObj& cmdObj,
RetryPolicy retryPolicy) {
- return runCommand(txn, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
+ MONGO_UNREACHABLE;
}
StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn,
@@ -115,6 +115,26 @@ StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn,
const BSONObj& cmdObj,
Milliseconds maxTimeMSOverride,
RetryPolicy retryPolicy) {
+ MONGO_UNREACHABLE;
+}
+
+StatusWith<Shard::CommandResponse> Shard::runCommandWithFixedRetryAttempts(
+ OperationContext* txn,
+ const ReadPreferenceSetting& readPref,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ RetryPolicy retryPolicy) {
+ return runCommandWithFixedRetryAttempts(
+ txn, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
+}
+
+StatusWith<Shard::CommandResponse> Shard::runCommandWithFixedRetryAttempts(
+ OperationContext* txn,
+ const ReadPreferenceSetting& readPref,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ Milliseconds maxTimeMSOverride,
+ RetryPolicy retryPolicy) {
for (int retry = 1; retry <= kOnErrorNumRetries; ++retry) {
auto hostWithResponse = _runCommand(txn, readPref, dbName, maxTimeMSOverride, cmdObj);
auto swCmdResponse = std::move(hostWithResponse.commandResponse);
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 96dfecd204c..1f3866e84e3 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -160,6 +160,31 @@ public:
RetryPolicy retryPolicy);
/**
+ * Same as runCommand, but will only retry failed operations up to 3 times, regardless of
+ * the retryPolicy or the remaining maxTimeMs.
+ * Wherever possible this method should be avoided in favor of runCommand.
+ */
+ StatusWith<CommandResponse> runCommandWithFixedRetryAttempts(
+ OperationContext* txn,
+ const ReadPreferenceSetting& readPref,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ RetryPolicy retryPolicy);
+
+ /**
+ * Same as runCommand, but will only retry failed operations up to 3 times, regardless of
+ * the retryPolicy or the remaining maxTimeMs.
+ * Wherever possible this method should be avoided in favor of runCommand.
+ */
+ StatusWith<CommandResponse> runCommandWithFixedRetryAttempts(
+ OperationContext* txn,
+ const ReadPreferenceSetting& readPref,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ Milliseconds maxTimeMSOverride,
+ RetryPolicy retryPolicy);
+
+ /**
* Expects a single-entry batch wrtie command and runs it on the config server's primary using
* the specified retry policy.
*/
diff --git a/src/mongo/s/client/shard_local_test.cpp b/src/mongo/s/client/shard_local_test.cpp
index 409d8dba86d..3bd026db9ed 100644
--- a/src/mongo/s/client/shard_local_test.cpp
+++ b/src/mongo/s/client/shard_local_test.cpp
@@ -103,19 +103,21 @@ StatusWith<Shard::CommandResponse> ShardLocalTest::runFindAndModifyRunCommand(Na
findAndModifyRequest.setWriteConcern(WriteConcernOptions(
WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, Seconds(15)));
- return _shardLocal->runCommand(_txn.get(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- nss.db().toString(),
- findAndModifyRequest.toBSON(),
- Shard::RetryPolicy::kNoRetry);
+ return _shardLocal->runCommandWithFixedRetryAttempts(
+ _txn.get(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ nss.db().toString(),
+ findAndModifyRequest.toBSON(),
+ Shard::RetryPolicy::kNoRetry);
}
StatusWith<std::vector<BSONObj>> ShardLocalTest::getIndexes(NamespaceString nss) {
- auto response = _shardLocal->runCommand(_txn.get(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- nss.db().toString(),
- BSON("listIndexes" << nss.coll().toString()),
- Shard::RetryPolicy::kIdempotent);
+ auto response = _shardLocal->runCommandWithFixedRetryAttempts(
+ _txn.get(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ nss.db().toString(),
+ BSON("listIndexes" << nss.coll().toString()),
+ Shard::RetryPolicy::kIdempotent);
if (!response.isOK()) {
return response.getStatus();
}
diff --git a/src/mongo/s/commands/cluster_add_shard_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
index d6825da3c93..e2cb0e4f117 100644
--- a/src/mongo/s/commands/cluster_add_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
@@ -89,12 +89,12 @@ public:
auto parsedRequest = uassertStatusOK(AddShardRequest::parseFromMongosCommand(cmdObj));
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- auto cmdResponseStatus =
- uassertStatusOK(configShard->runCommand(txn,
- kPrimaryOnlyReadPreference,
- "admin",
- parsedRequest.toCommandForConfig(),
- Shard::RetryPolicy::kIdempotent));
+ auto cmdResponseStatus = uassertStatusOK(
+ configShard->runCommandWithFixedRetryAttempts(txn,
+ kPrimaryOnlyReadPreference,
+ "admin",
+ parsedRequest.toCommandForConfig(),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(cmdResponseStatus.commandStatus);
string shardAdded;
diff --git a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
index 36d504c276d..1a5559e52ec 100644
--- a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
@@ -109,12 +109,12 @@ public:
cmdBuilder.append("writeConcern", kMajorityWriteConcern.toBSON());
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- auto cmdResponseStatus =
- uassertStatusOK(configShard->runCommand(txn,
- kPrimaryOnlyReadPreference,
- "admin",
- cmdBuilder.obj(),
- Shard::RetryPolicy::kIdempotent));
+ auto cmdResponseStatus = uassertStatusOK(
+ configShard->runCommandWithFixedRetryAttempts(txn,
+ kPrimaryOnlyReadPreference,
+ "admin",
+ cmdBuilder.obj(),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(cmdResponseStatus.commandStatus);
return true;
}
diff --git a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
index d7a1337aed0..39da91dd362 100644
--- a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
+++ b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
@@ -84,12 +84,12 @@ public:
std::string& errmsg,
BSONObjBuilder& result) override {
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- auto cmdResponse =
- uassertStatusOK(configShard->runCommand(txn,
- kPrimaryOnlyReadPreference,
- "admin",
- BSON(_configsvrCommandName << 1),
- Shard::RetryPolicy::kIdempotent));
+ auto cmdResponse = uassertStatusOK(
+ configShard->runCommandWithFixedRetryAttempts(txn,
+ kPrimaryOnlyReadPreference,
+ "admin",
+ BSON(_configsvrCommandName << 1),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(cmdResponse.commandStatus);
// Append any return value from the response, which the config server returned
diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp
index 15e20e2c436..6de6d08021f 100644
--- a/src/mongo/s/commands/cluster_fsync_cmd.cpp
+++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp
@@ -93,12 +93,12 @@ public:
}
const auto s = shardStatus.getValue();
- auto response =
- uassertStatusOK(s->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("fsync" << 1),
- Shard::RetryPolicy::kIdempotent));
+ auto response = uassertStatusOK(s->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("fsync" << 1),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(response.commandStatus);
BSONObj x = std::move(response.response);
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 7ea4cd7e8fc..9151d244bfe 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -101,12 +101,12 @@ public:
}
const auto s = shardStatus.getValue();
- auto response = uassertStatusOK(
- s->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- BSON("listDatabases" << 1),
- Shard::RetryPolicy::kIdempotent));
+ auto response = uassertStatusOK(s->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ BSON("listDatabases" << 1),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(response.commandStatus);
BSONObj x = std::move(response.response);
diff --git a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
index a24c7a63e3b..89902d3bde0 100644
--- a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
@@ -117,12 +117,12 @@ public:
cmdBuilder.append("writeConcern", kMajorityWriteConcern.toBSON());
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- auto cmdResponseStatus =
- uassertStatusOK(configShard->runCommand(txn,
- kPrimaryOnlyReadPreference,
- "admin",
- cmdBuilder.obj(),
- Shard::RetryPolicy::kIdempotent));
+ auto cmdResponseStatus = uassertStatusOK(
+ configShard->runCommandWithFixedRetryAttempts(txn,
+ kPrimaryOnlyReadPreference,
+ "admin",
+ cmdBuilder.obj(),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(cmdResponseStatus.commandStatus);
return true;
}
diff --git a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
index 0bcc8ad6e2f..0ff6eacabf6 100644
--- a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
+++ b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
@@ -112,12 +112,12 @@ public:
// Forward to config shard, which will forward to all shards.
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- auto response =
- configShard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- dbname,
- BSON("_configsvrSetFeatureCompatibilityVersion" << version),
- Shard::RetryPolicy::kIdempotent);
+ auto response = configShard->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ dbname,
+ BSON("_configsvrSetFeatureCompatibilityVersion" << version),
+ Shard::RetryPolicy::kIdempotent);
uassertStatusOK(response);
uassertStatusOK(response.getValue().commandStatus);
diff --git a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
index a08e0d32677..7c1e55eaf5d 100644
--- a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
+++ b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
@@ -132,12 +132,12 @@ public:
cmdBuilder.append("writeConcern", kMajorityWriteConcern.toBSON());
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- auto cmdResponseStatus =
- uassertStatusOK(configShard->runCommand(txn,
- kPrimaryOnlyReadPreference,
- "admin",
- cmdBuilder.obj(),
- Shard::RetryPolicy::kIdempotent));
+ auto cmdResponseStatus = uassertStatusOK(
+ configShard->runCommandWithFixedRetryAttempts(txn,
+ kPrimaryOnlyReadPreference,
+ "admin",
+ cmdBuilder.obj(),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(cmdResponseStatus.commandStatus);
return true;
}
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index 177855c50f1..69aee76c4d3 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -858,12 +858,12 @@ Status runUpgradeOnAllShards(OperationContext* txn, int maxSteps, BSONObjBuilder
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
- auto cmdResult =
- shardStatus.getValue()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- cmdObj,
- Shard::RetryPolicy::kIdempotent);
+ auto cmdResult = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ cmdObj,
+ Shard::RetryPolicy::kIdempotent);
auto status = cmdResult.isOK() ? std::move(cmdResult.getValue().commandStatus)
: std::move(cmdResult.getStatus());
if (!status.isOK()) {
diff --git a/src/mongo/s/config_server_client.cpp b/src/mongo/s/config_server_client.cpp
index 22dcff95810..ba3e09f3292 100644
--- a/src/mongo/s/config_server_client.cpp
+++ b/src/mongo/s/config_server_client.cpp
@@ -51,7 +51,7 @@ Status moveChunk(OperationContext* txn,
bool waitForDelete) {
auto shardRegistry = Grid::get(txn)->shardRegistry();
auto shard = shardRegistry->getConfigShard();
- auto cmdResponseStatus = shard->runCommand(
+ auto cmdResponseStatus = shard->runCommandWithFixedRetryAttempts(
txn,
kPrimaryOnlyReadPreference,
"admin",
@@ -68,12 +68,12 @@ Status moveChunk(OperationContext* txn,
Status rebalanceChunk(OperationContext* txn, const ChunkType& chunk) {
auto shardRegistry = Grid::get(txn)->shardRegistry();
auto shard = shardRegistry->getConfigShard();
- auto cmdResponseStatus =
- shard->runCommand(txn,
- kPrimaryOnlyReadPreference,
- "admin",
- BalanceChunkRequest::serializeToRebalanceCommandForConfig(chunk),
- Shard::RetryPolicy::kNotIdempotent);
+ auto cmdResponseStatus = shard->runCommandWithFixedRetryAttempts(
+ txn,
+ kPrimaryOnlyReadPreference,
+ "admin",
+ BalanceChunkRequest::serializeToRebalanceCommandForConfig(chunk),
+ Shard::RetryPolicy::kNotIdempotent);
if (!cmdResponseStatus.isOK()) {
return cmdResponseStatus.getStatus();
}
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index 70b391e7077..f984886e004 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -348,12 +348,13 @@ Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* txn,
auto config = getConfigShard();
invariant(config);
- auto insertResponse = config->runCommand(txn,
- kReadPref,
- ns.db().toString(),
- request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kNoRetry);
+ auto insertResponse =
+ config->runCommandWithFixedRetryAttempts(txn,
+ kReadPref,
+ ns.db().toString(),
+ request.toBSON(),
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kNoRetry);
BatchedCommandResponse batchResponse;
auto status = Shard::CommandResponse::processBatchWriteResponse(insertResponse, &batchResponse);
@@ -433,11 +434,12 @@ StatusWith<std::vector<BSONObj>> ConfigServerTestFixture::getIndexes(OperationCo
const NamespaceString& ns) {
auto configShard = getConfigShard();
- auto response = configShard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- ns.db().toString(),
- BSON("listIndexes" << ns.coll().toString()),
- Shard::RetryPolicy::kIdempotent);
+ auto response = configShard->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ ns.db().toString(),
+ BSON("listIndexes" << ns.coll().toString()),
+ Shard::RetryPolicy::kIdempotent);
if (!response.isOK()) {
return response.getStatus();
}
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index 4b1b2b2fb51..e834f77e743 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -60,12 +60,12 @@ StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardI
return shardStatus.getStatus();
}
- auto listDatabasesStatus =
- shardStatus.getValue()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- BSON("listDatabases" << 1),
- Shard::RetryPolicy::kIdempotent);
+ auto listDatabasesStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ BSON("listDatabases" << 1),
+ Shard::RetryPolicy::kIdempotent);
if (!listDatabasesStatus.isOK()) {
return std::move(listDatabasesStatus.getStatus());
}
@@ -99,12 +99,12 @@ StatusWith<BSONObj> selectMedianKey(OperationContext* txn,
return shardStatus.getStatus();
}
- auto cmdStatus =
- shardStatus.getValue()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- cmd.obj(),
- Shard::RetryPolicy::kIdempotent);
+ auto cmdStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ cmd.obj(),
+ Shard::RetryPolicy::kIdempotent);
if (!cmdStatus.isOK()) {
return std::move(cmdStatus.getStatus());
}
@@ -145,12 +145,12 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
return shardStatus.getStatus();
}
- auto cmdStatus =
- shardStatus.getValue()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- cmd.obj(),
- Shard::RetryPolicy::kIdempotent);
+ auto cmdStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ cmd.obj(),
+ Shard::RetryPolicy::kIdempotent);
if (!cmdStatus.isOK()) {
return std::move(cmdStatus.getStatus());
}
@@ -212,12 +212,12 @@ StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
if (!shardStatus.isOK()) {
status = shardStatus.getStatus();
} else {
- auto cmdStatus =
- shardStatus.getValue()->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- cmdObj,
- Shard::RetryPolicy::kNotIdempotent);
+ auto cmdStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ cmdObj,
+ Shard::RetryPolicy::kNotIdempotent);
if (!cmdStatus.isOK()) {
status = std::move(cmdStatus.getStatus());
} else {