summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorSpencer T Brody <spencer@mongodb.com>2016-04-27 17:38:50 -0400
committerSpencer T Brody <spencer@mongodb.com>2016-05-02 18:45:07 -0400
commit52d9ecc1106023bb3e749ac7368391d780d3b3a2 (patch)
tree9ab26e92490727787c0ef7a7278d7d6862d89f21 /src/mongo
parent070f32758b680425b0a6995b881fcb843a7db6cf (diff)
downloadmongo-52d9ecc1106023bb3e749ac7368391d780d3b3a2.tar.gz
SERVER-23211 Remove all callers of ShardRegistry::runIdempotentCommandOnShard
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/s/balance.cpp41
-rw-r--r--src/mongo/s/balancer/cluster_statistics_impl.cpp25
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp91
-rw-r--r--src/mongo/s/chunk.cpp29
-rw-r--r--src/mongo/s/chunk_manager.cpp18
-rw-r--r--src/mongo/s/client/shard.h2
-rw-r--r--src/mongo/s/client/shard_remote.cpp5
-rw-r--r--src/mongo/s/commands/cluster_fsync_cmd.cpp14
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp14
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp21
-rw-r--r--src/mongo/s/shard_util.cpp98
11 files changed, 222 insertions, 136 deletions
diff --git a/src/mongo/s/balance.cpp b/src/mongo/s/balance.cpp
index 5ad83e841ab..afbfac59728 100644
--- a/src/mongo/s/balance.cpp
+++ b/src/mongo/s/balance.cpp
@@ -354,12 +354,15 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
continue;
}
- BSONObj f = uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard(
- txn,
- s,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("features" << 1)));
+ auto result =
+ uassertStatusOK(s->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("features" << 1),
+ Shard::RetryPolicy::kIdempotent));
+ uassertStatusOK(result.commandStatus);
+ BSONObj f = std::move(result.response);
+
if (f["oidMachine"].isNumber()) {
int x = f["oidMachine"].numberInt();
if (oids.count(x) == 0) {
@@ -368,21 +371,23 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
log() << "error: 2 machines have " << x << " as oid machine piece: " << shardId
<< " and " << oids[x];
- uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard(
- txn,
- s,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("features" << 1 << "oidReset" << 1)));
+ result = uassertStatusOK(
+ s->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("features" << 1 << "oidReset" << 1),
+ Shard::RetryPolicy::kIdempotent));
+ uassertStatusOK(result.commandStatus);
const auto otherShard = grid.shardRegistry()->getShard(txn, oids[x]);
if (otherShard) {
- uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard(
- txn,
- otherShard,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("features" << 1 << "oidReset" << 1)));
+ result = uassertStatusOK(
+ otherShard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("features" << 1 << "oidReset" << 1),
+ Shard::RetryPolicy::kIdempotent));
+ uassertStatusOK(result.commandStatus);
}
return false;
diff --git a/src/mongo/s/balancer/cluster_statistics_impl.cpp b/src/mongo/s/balancer/cluster_statistics_impl.cpp
index a6e0b3dcf60..9c6bea9350e 100644
--- a/src/mongo/s/balancer/cluster_statistics_impl.cpp
+++ b/src/mongo/s/balancer/cluster_statistics_impl.cpp
@@ -62,17 +62,24 @@ const char kVersionField[] = "version";
*/
StatusWith<string> retrieveShardMongoDVersion(OperationContext* txn, ShardId shardId) {
auto shardRegistry = Grid::get(txn)->shardRegistry();
- auto commandStatus = shardRegistry->runIdempotentCommandOnShard(
- txn,
- shardId,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("serverStatus" << 1));
- if (!commandStatus.isOK()) {
- return commandStatus.getStatus();
+ auto shard = shardRegistry->getShard(txn, shardId);
+ if (!shard) {
+ return {ErrorCodes::ShardNotFound, str::stream() << "shard " << shardId << " not found"};
}
- BSONObj serverStatus = std::move(commandStatus.getValue());
+ auto commandResponse = shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("serverStatus" << 1),
+ Shard::RetryPolicy::kIdempotent);
+ if (!commandResponse.isOK()) {
+ return commandResponse.getStatus();
+ }
+ if (!commandResponse.getValue().commandStatus.isOK()) {
+ return commandResponse.getValue().commandStatus;
+ }
+
+ BSONObj serverStatus = std::move(commandResponse.getValue().response);
string version;
Status status = bsonExtractStringField(serverStatus, kVersionField, &version);
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
index fdb3ebb2bb5..2363d2c9d45 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
@@ -720,12 +720,12 @@ Status CatalogManagerReplicaSet::shardCollection(OperationContext* txn,
return scopedDistLock.getStatus();
}
- auto status = getDatabase(txn, nsToDatabase(ns));
- if (!status.isOK()) {
- return status.getStatus();
+ auto getDBStatus = getDatabase(txn, nsToDatabase(ns));
+ if (!getDBStatus.isOK()) {
+ return getDBStatus.getStatus();
}
- ShardId dbPrimaryShardId = status.getValue().value.getPrimary();
+ ShardId dbPrimaryShardId = getDBStatus.getValue().value.getPrimary();
const auto primaryShard = grid.shardRegistry()->getShard(txn, dbPrimaryShardId);
{
@@ -789,15 +789,22 @@ Status CatalogManagerReplicaSet::shardCollection(OperationContext* txn,
manager->getVersion(),
true);
- auto ssvStatus = grid.shardRegistry()->runIdempotentCommandOnShard(
- txn,
- dbPrimaryShardId,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- ssv.toBSON());
- if (!ssvStatus.isOK()) {
+ auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, dbPrimaryShardId);
+ if (!shard) {
+ return {ErrorCodes::ShardNotFound,
+ str::stream() << "shard " << dbPrimaryShardId << " not found"};
+ }
+
+ auto ssvResponse = shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ ssv.toBSON(),
+ Shard::RetryPolicy::kIdempotent);
+ auto status = ssvResponse.isOK() ? std::move(ssvResponse.getValue().commandStatus)
+ : std::move(ssvResponse.getStatus());
+ if (!status.isOK()) {
warning() << "could not update initial version of " << ns << " on shard primary "
- << dbPrimaryShardId << ssvStatus.getStatus();
+ << dbPrimaryShardId << causedBy(status);
}
logChange(txn, "shardCollection.end", ns, BSON("version" << manager->getVersion().toString()));
@@ -1064,25 +1071,38 @@ Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const Nam
auto* shardRegistry = grid.shardRegistry();
for (const auto& shardEntry : allShards) {
- auto dropResult = shardRegistry->runIdempotentCommandOnShard(
+ auto shard = shardRegistry->getShard(txn, shardEntry.getName());
+ if (!shard) {
+ return {ErrorCodes::ShardNotFound,
+ str::stream() << "shard " << shardEntry.getName() << " not found"};
+ }
+ auto dropResult = shard->runCommand(
txn,
- shardEntry.getName(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
ns.db().toString(),
- BSON("drop" << ns.coll() << "writeConcern" << txn->getWriteConcern().toBSON()));
+ BSON("drop" << ns.coll() << "writeConcern" << txn->getWriteConcern().toBSON()),
+ Shard::RetryPolicy::kIdempotent);
if (!dropResult.isOK()) {
return Status(dropResult.getStatus().code(),
dropResult.getStatus().reason() + " at " + shardEntry.getName());
}
- auto dropStatus = getStatusFromCommandResult(dropResult.getValue());
- if (!dropStatus.isOK()) {
- if (dropStatus.code() == ErrorCodes::NamespaceNotFound) {
+ auto dropStatus = std::move(dropResult.getValue().commandStatus);
+ auto wcStatus = std::move(dropResult.getValue().writeConcernStatus);
+ if (!dropStatus.isOK() || !wcStatus.isOK()) {
+ if (dropStatus.code() == ErrorCodes::NamespaceNotFound && wcStatus.isOK()) {
+ // Generally getting NamespaceNotFound is okay to ignore as it simply means that
+ // the collection has already been dropped or doesn't exist on this shard.
+ // If, however, we get NamespaceNotFound but also have a write concern error then we
+ // can't confirm whether the fact that the namespace doesn't exist is actually
+ // committed. Thus we must still fail on NamespaceNotFound if there is also a write
+ // concern error. This can happen if we call drop, it succeeds but with a write
+ // concern error, then we retry the drop.
continue;
}
- errors.emplace(shardEntry.getHost(), dropResult.getValue());
+ errors.emplace(shardEntry.getHost(), std::move(dropResult.getValue().response));
}
}
@@ -1134,34 +1154,39 @@ Status CatalogManagerReplicaSet::dropCollection(OperationContext* txn, const Nam
ChunkVersion::DROPPED(),
true);
- auto ssvResult = shardRegistry->runIdempotentCommandOnShard(
- txn,
- shardEntry.getName(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- ssv.toBSON());
+ auto shard = shardRegistry->getShard(txn, shardEntry.getName());
+ if (!shard) {
+ return {ErrorCodes::ShardNotFound,
+ str::stream() << "shard " << shardEntry.getName() << " not found"};
+ }
+
+ auto ssvResult = shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ ssv.toBSON(),
+ Shard::RetryPolicy::kIdempotent);
if (!ssvResult.isOK()) {
return ssvResult.getStatus();
}
- auto ssvStatus = getStatusFromCommandResult(ssvResult.getValue());
+ auto ssvStatus = std::move(ssvResult.getValue().commandStatus);
if (!ssvStatus.isOK()) {
return ssvStatus;
}
- auto unsetShardingStatus = shardRegistry->runIdempotentCommandOnShard(
- txn,
- shardEntry.getName(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("unsetSharding" << 1));
+ auto unsetShardingStatus =
+ shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("unsetSharding" << 1),
+ Shard::RetryPolicy::kIdempotent);
if (!unsetShardingStatus.isOK()) {
return unsetShardingStatus.getStatus();
}
- auto unsetShardingResult = getStatusFromCommandResult(unsetShardingStatus.getValue());
+ auto unsetShardingResult = std::move(unsetShardingStatus.getValue().commandStatus);
if (!unsetShardingResult.isOK()) {
return unsetShardingResult;
}
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index a35dea17787..9dc814af604 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -419,15 +419,28 @@ bool Chunk::moveAndCommit(OperationContext* txn,
Status status{ErrorCodes::NotYetInitialized, "Uninitialized"};
- auto cmdStatus = Grid::get(txn)->shardRegistry()->runIdempotentCommandOnShard(
- txn, _shardId, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, "admin", cmdObj);
- if (!cmdStatus.isOK()) {
- warning() << "Move chunk failed" << causedBy(cmdStatus.getStatus());
- status = std::move(cmdStatus.getStatus());
+ auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, _shardId);
+ if (!shard) {
+ status = Status(ErrorCodes::ShardNotFound,
+ str::stream() << "shard " << _shardId << " not found");
} else {
- res = std::move(cmdStatus.getValue());
- status = getStatusFromCommandResult(res);
- LOG(status.isOK() ? 1 : 0) << "moveChunk result: " << res;
+ auto response = shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ cmdObj,
+ Shard::RetryPolicy::kNotIdempotent);
+ if (!response.isOK()) {
+ status = std::move(response.getStatus());
+ } else {
+ status = std::move(response.getValue().commandStatus);
+ res = std::move(response.getValue().response);
+ }
+ }
+
+ if (status.isOK()) {
+ LOG(1) << "moveChunk result: " << res;
+ } else {
+ warning() << "Move chunk failed" << causedBy(status);
}
// If succeeded we needs to reload the chunk manager in order to pick up the new location. If
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 99ecefa4d31..cb2e0055e31 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -357,17 +357,17 @@ void ChunkManager::calcInitSplitsAndShards(OperationContext* txn,
// discover split points
const auto primaryShard = grid.shardRegistry()->getShard(txn, primaryShardId);
const NamespaceString nss{getns()};
- auto result = grid.shardRegistry()->runIdempotentCommandOnShard(
- txn,
- primaryShard,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- nss.db().toString(),
- BSON("count" << nss.coll()));
+
+ auto result = uassertStatusOK(
+ primaryShard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ nss.db().toString(),
+ BSON("count" << nss.coll()),
+ Shard::RetryPolicy::kIdempotent));
long long numObjects = 0;
- uassertStatusOK(result.getStatus());
- uassertStatusOK(getStatusFromCommandResult(result.getValue()));
- uassertStatusOK(bsonExtractIntegerField(result.getValue(), "n", &numObjects));
+ uassertStatusOK(result.commandStatus);
+ uassertStatusOK(bsonExtractIntegerField(result.response, "n", &numObjects));
if (numObjects > 0) {
*splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints(
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 9624ff8555a..68ff1ba7f5f 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -59,7 +59,7 @@ public:
BSONObj response;
BSONObj metadata;
Status commandStatus;
- Status writeConcernStatus; // Only valid to check when commandStatus is OK.
+ Status writeConcernStatus;
};
struct QueryResponse {
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index c47a7607d59..cccc6daf38f 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -266,10 +266,7 @@ StatusWith<Shard::CommandResponse> ShardRemote::_runCommand(OperationContext* tx
BSONObj responseObj = swResponse.getValue().data.getOwned();
BSONObj responseMetadata = swResponse.getValue().metadata.getOwned();
Status commandStatus = getStatusFromCommandResult(responseObj);
- Status writeConcernStatus = kInternalErrorStatus;
- if (commandStatus.isOK()) {
- writeConcernStatus = getWriteConcernStatusFromCommandResult(responseObj);
- }
+ Status writeConcernStatus = getWriteConcernStatusFromCommandResult(responseObj);
updateReplSetMonitor(host.getValue(), commandStatus);
return CommandResponse(std::move(responseObj),
diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp
index 4a8b20df062..85fc026ce9a 100644
--- a/src/mongo/s/commands/cluster_fsync_cmd.cpp
+++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp
@@ -92,12 +92,14 @@ public:
continue;
}
- BSONObj x = uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard(
- txn,
- s,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("fsync" << 1)));
+ auto result =
+ uassertStatusOK(s->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("fsync" << 1),
+ Shard::RetryPolicy::kIdempotent));
+ uassertStatusOK(result.commandStatus);
+ BSONObj x = std::move(result.response);
sub.append(s->getId(), x);
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index a8fb599871f..6c40889a086 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -100,12 +100,14 @@ public:
continue;
}
- BSONObj x = uassertStatusOK(grid.shardRegistry()->runIdempotentCommandOnShard(
- txn,
- s,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- BSON("listDatabases" << 1)));
+ auto result = uassertStatusOK(
+ s->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ BSON("listDatabases" << 1),
+ Shard::RetryPolicy::kIdempotent));
+ uassertStatusOK(result.commandStatus);
+ BSONObj x = std::move(result.response);
BSONObjIterator j(x["databases"].Obj());
while (j.more()) {
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index 0b2a79f793a..1e448a4689e 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -851,18 +851,27 @@ Status runUpgradeOnAllShards(OperationContext* txn,
bool hasWCError = false;
for (const auto& shardId : shardIds) {
- auto cmdResult = shardRegistry->runIdempotentCommandOnShard(
- txn, shardId, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, "admin", cmdObj);
-
- if (!cmdResult.isOK()) {
- return Status(cmdResult.getStatus().code(),
+ auto shard = shardRegistry->getShard(txn, shardId);
+ if (!shard) {
+ return {ErrorCodes::ShardNotFound,
+ str::stream() << "shard " << shardId << " not found"};
+ }
+ auto cmdResult = shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ cmdObj,
+ Shard::RetryPolicy::kIdempotent);
+ auto status = cmdResult.isOK() ? std::move(cmdResult.getValue().commandStatus)
+ : std::move(cmdResult.getStatus());
+ if (!status.isOK()) {
+ return Status(status.code(),
str::stream() << "Failed to run authSchemaUpgrade on shard " << shardId
<< causedBy(cmdResult.getStatus()));
}
// If the result has a writeConcernError, append it.
if (!hasWCError) {
- if (auto wcErrorElem = cmdResult.getValue()["writeConcernError"]) {
+ if (auto wcErrorElem = cmdResult.getValue().response["writeConcernError"]) {
appendWriteConcernErrorToCmdResponse(shardId, wcErrorElem, result);
hasWCError = true;
}
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index beb84eee609..3083b8f251c 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -52,18 +52,25 @@ const char kShouldMigrate[] = "shouldMigrate";
}
StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardId& shardId) {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- auto listDatabasesStatus = shardRegistry->runIdempotentCommandOnShard(
- txn,
- shardId,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- BSON("listDatabases" << 1));
+ auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shard) {
+ return Status(ErrorCodes::ShardNotFound,
+ str::stream() << "shard " << shardId << " not found");
+ }
+ auto listDatabasesStatus =
+ shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ BSON("listDatabases" << 1),
+ Shard::RetryPolicy::kIdempotent);
if (!listDatabasesStatus.isOK()) {
- return listDatabasesStatus.getStatus();
+ return std::move(listDatabasesStatus.getStatus());
+ }
+ if (!listDatabasesStatus.getValue().commandStatus.isOK()) {
+ return std::move(listDatabasesStatus.getValue().commandStatus);
}
- BSONElement totalSizeElem = listDatabasesStatus.getValue()["totalSize"];
+ BSONElement totalSizeElem = listDatabasesStatus.getValue().response["totalSize"];
if (!totalSizeElem.isNumber()) {
return {ErrorCodes::NoSuchKey, "totalSize field not found in listDatabases"};
}
@@ -84,20 +91,25 @@ StatusWith<BSONObj> selectMedianKey(OperationContext* txn,
cmd.append("max", maxKey);
cmd.appendBool("force", true);
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- auto cmdStatus = shardRegistry->runIdempotentCommandOnShard(
- txn, shardId, ReadPreferenceSetting{ReadPreference::PrimaryPreferred}, "admin", cmd.obj());
+ auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shard) {
+ return Status(ErrorCodes::ShardNotFound,
+ str::stream() << "shard " << shardId << " not found");
+ }
+ auto cmdStatus = shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ cmd.obj(),
+ Shard::RetryPolicy::kIdempotent);
if (!cmdStatus.isOK()) {
- return cmdStatus.getStatus();
+ return std::move(cmdStatus.getStatus());
}
-
- const auto response = std::move(cmdStatus.getValue());
-
- Status status = getStatusFromCommandResult(response);
- if (!status.isOK()) {
- return status;
+ if (!cmdStatus.getValue().commandStatus.isOK()) {
+ return std::move(cmdStatus.getValue().commandStatus);
}
+ const auto response = std::move(cmdStatus.getValue().response);
+
BSONObjIterator it(response.getObjectField("splitKeys"));
if (it.more()) {
return it.next().Obj().getOwned();
@@ -124,20 +136,25 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
cmd.append("maxSplitPoints", maxPoints);
cmd.append("maxChunkObjects", maxObjs);
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- auto cmdStatus = shardRegistry->runIdempotentCommandOnShard(
- txn, shardId, ReadPreferenceSetting{ReadPreference::PrimaryPreferred}, "admin", cmd.obj());
+ auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shard) {
+ return Status(ErrorCodes::ShardNotFound,
+ str::stream() << "shard " << shardId << " not found");
+ }
+ auto cmdStatus = shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ cmd.obj(),
+ Shard::RetryPolicy::kIdempotent);
if (!cmdStatus.isOK()) {
- return cmdStatus.getStatus();
+ return std::move(cmdStatus.getStatus());
}
-
- const auto response = std::move(cmdStatus.getValue());
-
- Status status = getStatusFromCommandResult(response);
- if (!status.isOK()) {
- return status;
+ if (!cmdStatus.getValue().commandStatus.isOK()) {
+ return std::move(cmdStatus.getValue().commandStatus);
}
+ const auto response = std::move(cmdStatus.getValue().response);
+
std::vector<BSONObj> splitPoints;
BSONObjIterator it(response.getObjectField("splitKeys"));
@@ -181,13 +198,24 @@ StatusWith<boost::optional<std::pair<BSONObj, BSONObj>>> splitChunkAtMultiplePoi
BSONObj cmdObj = cmd.obj();
Status status{ErrorCodes::InternalError, "Uninitialized value"};
+ BSONObj cmdResponse;
- auto cmdStatus = Grid::get(txn)->shardRegistry()->runIdempotentCommandOnShard(
- txn, shardId, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, "admin", cmdObj);
- if (!cmdStatus.isOK()) {
- status = std::move(cmdStatus.getStatus());
+ auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shard) {
+ status =
+ Status(ErrorCodes::ShardNotFound, str::stream() << "shard " << shardId << " not found");
} else {
- status = getStatusFromCommandResult(cmdStatus.getValue());
+ auto cmdStatus = shard->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ cmdObj,
+ Shard::RetryPolicy::kNotIdempotent);
+ if (!cmdStatus.isOK()) {
+ status = std::move(cmdStatus.getStatus());
+ } else {
+ status = std::move(cmdStatus.getValue().commandStatus);
+ cmdResponse = std::move(cmdStatus.getValue().response);
+ }
}
if (!status.isOK()) {
@@ -195,8 +223,6 @@ StatusWith<boost::optional<std::pair<BSONObj, BSONObj>>> splitChunkAtMultiplePoi
return {status.code(), str::stream() << "split failed due to " << status.toString()};
}
- BSONObj cmdResponse = std::move(cmdStatus.getValue());
-
BSONElement shouldMigrateElement;
status = bsonExtractTypedField(cmdResponse, kShouldMigrate, Object, &shouldMigrateElement);
if (status.isOK()) {