summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMisha Tyulenev <misha@mongodb.com>2016-08-24 18:05:09 -0400
committerMisha Tyulenev <misha@mongodb.com>2016-08-25 11:57:21 -0400
commit46b33e042de75d801e5fd9f20b74a1c9a249b0c2 (patch)
treeb339c718ee0282cfd04190725369a3b9f154d111
parente589562b858061cf82dd430115c82033203db018 (diff)
downloadmongo-46b33e042de75d801e5fd9f20b74a1c9a249b0c2.tar.gz
SERVER-23996 ShardRegistry::getShard should return a StatusWith<shared_ptr<Shard>>
-rw-r--r--src/mongo/client/parallel.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp8
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp14
-rw-r--r--src/mongo/s/balancer/balancer.cpp21
-rw-r--r--src/mongo/s/balancer/cluster_statistics_impl.cpp7
-rw-r--r--src/mongo/s/balancer/migration_manager.cpp7
-rw-r--r--src/mongo/s/balancer/migration_manager_test.cpp3
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp46
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp12
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp7
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_test.cpp17
-rw-r--r--src/mongo/s/chunk.cpp4
-rw-r--r--src/mongo/s/chunk_manager.cpp9
-rw-r--r--src/mongo/s/client/shard_connection.cpp6
-rw-r--r--src/mongo/s/client/shard_registry.cpp22
-rw-r--r--src/mongo/s/client/shard_registry.h6
-rw-r--r--src/mongo/s/commands/cluster_add_shard_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp3
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp16
-rw-r--r--src/mongo/s/commands/cluster_fsync_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_get_last_error_cmd.cpp9
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp10
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp15
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp9
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp14
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp18
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp16
-rw-r--r--src/mongo/s/commands/commands_public.cpp56
-rw-r--r--src/mongo/s/commands/run_on_all_shards_cmd.cpp12
-rw-r--r--src/mongo/s/config.cpp23
-rw-r--r--src/mongo/s/query/cluster_find.cpp9
-rw-r--r--src/mongo/s/shard_util.cpp74
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp47
38 files changed, 325 insertions, 235 deletions
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 8fdacb5efaa..3fca978c745 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -397,7 +397,7 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(
// Setup conn
if (!state->conn) {
- const auto shard = grid.shardRegistry()->getShard(txn, shardId);
+ const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, shardId));
state->conn.reset(new ShardConnection(shard->getConnString(), ns.ns(), manager));
}
@@ -945,7 +945,7 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
_cursors[index].reset(mdata.pcState->cursor.get(), &mdata);
{
- const auto shard = grid.shardRegistry()->getShard(txn, i->first);
+ const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, i->first));
_servers.insert(shard->getConnString().toString());
}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 9e61b381969..f25289baecd 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1720,11 +1720,9 @@ public:
std::string server = e.fieldName();
servers.insert(server);
- if (!grid.shardRegistry()->getShard(txn, server)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::ShardNotFound,
- str::stream() << "Shard not found for server: " << server));
+ auto shardStatus = grid.shardRegistry()->getShard(txn, server);
+ if (!shardStatus.isOK()) {
+ return appendCommandStatus(result, shardStatus.getStatus());
}
}
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 2a2d2e77d3e..a4ed3c77a7c 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -184,12 +184,20 @@ Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* txn) {
// Resolve the donor and recipient shards and their connection string
{
- auto donorShard = grid.shardRegistry()->getShard(txn, _args.getFromShardId());
- _donorCS = donorShard->getConnString();
+ auto donorShardStatus = grid.shardRegistry()->getShard(txn, _args.getFromShardId());
+ if (!donorShardStatus.isOK()) {
+ return donorShardStatus.getStatus();
+ }
+ _donorCS = donorShardStatus.getValue()->getConnString();
}
{
- auto recipientShard = grid.shardRegistry()->getShard(txn, _args.getToShardId());
+ auto recipientShardStatus = grid.shardRegistry()->getShard(txn, _args.getToShardId());
+ if (!recipientShardStatus.isOK()) {
+ return recipientShardStatus.getStatus();
+ }
+ auto recipientShard = recipientShardStatus.getValue();
+
auto shardHostStatus = recipientShard->getTargeter()->findHost(
ReadPreferenceSetting{ReadPreference::PrimaryOnly});
if (!shardHostStatus.isOK()) {
diff --git a/src/mongo/s/balancer/balancer.cpp b/src/mongo/s/balancer/balancer.cpp
index 77402222b2f..6296d42cb88 100644
--- a/src/mongo/s/balancer/balancer.cpp
+++ b/src/mongo/s/balancer/balancer.cpp
@@ -444,10 +444,11 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
return false;
}
- const auto s = shardingContext->shardRegistry()->getShard(txn, shardId);
- if (!s) {
+ auto shardStatus = shardingContext->shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
continue;
}
+ const auto s = shardStatus.getValue();
auto result =
uassertStatusOK(s->runCommand(txn,
@@ -474,14 +475,14 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
Shard::RetryPolicy::kIdempotent));
uassertStatusOK(result.commandStatus);
- const auto otherShard = shardingContext->shardRegistry()->getShard(txn, oids[x]);
- if (otherShard) {
- result = uassertStatusOK(
- otherShard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- BSON("features" << 1 << "oidReset" << 1),
- Shard::RetryPolicy::kIdempotent));
+ auto otherShardStatus = shardingContext->shardRegistry()->getShard(txn, oids[x]);
+ if (otherShardStatus.isOK()) {
+ result = uassertStatusOK(otherShardStatus.getValue()->runCommand(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("features" << 1 << "oidReset" << 1),
+ Shard::RetryPolicy::kIdempotent));
uassertStatusOK(result.commandStatus);
}
diff --git a/src/mongo/s/balancer/cluster_statistics_impl.cpp b/src/mongo/s/balancer/cluster_statistics_impl.cpp
index 01993f1a938..8f229997cb2 100644
--- a/src/mongo/s/balancer/cluster_statistics_impl.cpp
+++ b/src/mongo/s/balancer/cluster_statistics_impl.cpp
@@ -62,10 +62,11 @@ const char kVersionField[] = "version";
*/
StatusWith<string> retrieveShardMongoDVersion(OperationContext* txn, ShardId shardId) {
auto shardRegistry = Grid::get(txn)->shardRegistry();
- auto shard = shardRegistry->getShard(txn, shardId);
- if (!shard) {
- return {ErrorCodes::ShardNotFound, str::stream() << "shard " << shardId << " not found"};
+ auto shardStatus = shardRegistry->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
+ auto shard = shardStatus.getValue();
auto commandResponse = shard->runCommand(txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
diff --git a/src/mongo/s/balancer/migration_manager.cpp b/src/mongo/s/balancer/migration_manager.cpp
index 07d01481160..b73199ec848 100644
--- a/src/mongo/s/balancer/migration_manager.cpp
+++ b/src/mongo/s/balancer/migration_manager.cpp
@@ -320,7 +320,12 @@ shared_ptr<Notification<Status>> MigrationManager::_schedule(
<< " does not exist."));
}
- const auto fromShard = Grid::get(txn)->shardRegistry()->getShard(txn, migrateInfo.from);
+ const auto fromShardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, migrateInfo.from);
+ if (!fromShardStatus.isOK()) {
+ return std::make_shared<Notification<Status>>(std::move(fromShardStatus.getStatus()));
+ }
+
+ const auto fromShard = fromShardStatus.getValue();
auto fromHostStatus =
fromShard->getTargeter()->findHost(ReadPreferenceSetting{ReadPreference::PrimaryOnly},
RemoteCommandTargeter::selectFindHostMaxWaitTime(txn));
diff --git a/src/mongo/s/balancer/migration_manager_test.cpp b/src/mongo/s/balancer/migration_manager_test.cpp
index fd06fdbc352..b4e9ab8e23c 100644
--- a/src/mongo/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/s/balancer/migration_manager_test.cpp
@@ -159,7 +159,8 @@ void MigrationManagerTest::tearDown() {
std::shared_ptr<RemoteCommandTargeterMock> MigrationManagerTest::shardTargeterMock(
OperationContext* txn, ShardId shardId) {
- return RemoteCommandTargeterMock::get(shardRegistry()->getShard(txn, shardId)->getTargeter());
+ return RemoteCommandTargeterMock::get(
+ uassertStatusOK(shardRegistry()->getShard(txn, shardId))->getTargeter());
}
void MigrationManagerTest::setUpDatabase(const std::string& dbName, const ShardId primaryShard) {
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
index 13379820db3..6f7315272ce 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
@@ -419,7 +419,10 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
}
ShardId dbPrimaryShardId = getDBStatus.getValue().value.getPrimary();
- const auto primaryShard = grid.shardRegistry()->getShard(txn, dbPrimaryShardId);
+ const auto primaryShardStatus = grid.shardRegistry()->getShard(txn, dbPrimaryShardId);
+ if (!primaryShardStatus.isOK()) {
+ return primaryShardStatus.getStatus();
+ }
{
// In 3.0 and prior we include this extra safety check that the collection is not getting
@@ -446,7 +449,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
BSONObjBuilder collectionDetail;
collectionDetail.append("shardKey", fieldsAndOrder.toBSON());
collectionDetail.append("collection", ns);
- collectionDetail.append("primary", primaryShard->toString());
+ collectionDetail.append("primary", primaryShardStatus.getValue()->toString());
{
BSONArrayBuilder initialShards(collectionDetail.subarrayStart("initShards"));
@@ -494,16 +497,16 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
SetShardVersionRequest ssv = SetShardVersionRequest::makeForVersioningNoPersist(
grid.shardRegistry()->getConfigServerConnectionString(),
dbPrimaryShardId,
- primaryShard->getConnString(),
+ primaryShardStatus.getValue()->getConnString(),
NamespaceString(ns),
manager->getVersion(),
true);
- auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, dbPrimaryShardId);
- if (!shard) {
- return {ErrorCodes::ShardNotFound,
- str::stream() << "shard " << dbPrimaryShardId << " not found"};
+ auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, dbPrimaryShardId);
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
+ auto shard = shardStatus.getValue();
auto ssvResponse = shard->runCommand(txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -808,18 +811,17 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
auto* shardRegistry = grid.shardRegistry();
for (const auto& shardEntry : allShards) {
- auto shard = shardRegistry->getShard(txn, shardEntry.getName());
- if (!shard) {
- return {ErrorCodes::ShardNotFound,
- str::stream() << "shard " << shardEntry.getName() << " not found"};
+ auto shardStatus = shardRegistry->getShard(txn, shardEntry.getName());
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
- auto dropResult =
- shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- ns.db().toString(),
- BSON("drop" << ns.coll() << WriteConcernOptions::kWriteConcernField
- << txn->getWriteConcern().toBSON()),
- Shard::RetryPolicy::kIdempotent);
+ auto dropResult = shardStatus.getValue()->runCommand(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ ns.db().toString(),
+ BSON("drop" << ns.coll() << WriteConcernOptions::kWriteConcernField
+ << txn->getWriteConcern().toBSON()),
+ Shard::RetryPolicy::kIdempotent);
if (!dropResult.isOK()) {
return Status(dropResult.getStatus().code(),
@@ -895,11 +897,11 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
ChunkVersion::DROPPED(),
true);
- auto shard = shardRegistry->getShard(txn, shardEntry.getName());
- if (!shard) {
- return {ErrorCodes::ShardNotFound,
- str::stream() << "shard " << shardEntry.getName() << " not found"};
+ auto shardStatus = shardRegistry->getShard(txn, shardEntry.getName());
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
+ auto shard = shardStatus.getValue();
auto ssvResult = shard->runCommand(txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp
index b62499cc355..b556bcb0672 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp
@@ -79,11 +79,13 @@ public:
setupShards({_shard1, _shard2});
auto shard1Targeter = RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), _shard1.getName())->getTargeter());
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), _shard1.getName()))
+ ->getTargeter());
shard1Targeter->setFindHostReturnValue(HostAndPort(_shard1.getHost()));
auto shard2Targeter = RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), _shard2.getName())->getTargeter());
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), _shard2.getName()))
+ ->getTargeter());
shard2Targeter->setFindHostReturnValue(HostAndPort(_shard2.getHost()));
}
@@ -284,7 +286,8 @@ TEST_F(DropColl2ShardTest, DistLockBusy) {
TEST_F(DropColl2ShardTest, FirstShardTargeterError) {
auto shard1Targeter = RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), shard1().getName())->getTargeter());
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), shard1().getName()))
+ ->getTargeter());
shard1Targeter->setFindHostReturnValue({ErrorCodes::HostUnreachable, "bad test network"});
auto future = launchAsync([this] {
@@ -348,7 +351,8 @@ TEST_F(DropColl2ShardTest, FirstShardDropCmdError) {
TEST_F(DropColl2ShardTest, SecondShardTargeterError) {
auto shard2Targeter = RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), shard2().getName())->getTargeter());
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), shard2().getName()))
+ ->getTargeter());
shard2Targeter->setFindHostReturnValue({ErrorCodes::HostUnreachable, "bad test network"});
auto future = launchAsync([this] {
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp b/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp
index 77dc96fd1fc..aac7bed4935 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp
@@ -818,7 +818,7 @@ StatusWith<string> ShardingCatalogManagerImpl::addShard(
// Ensure the added shard is visible to this process.
auto shardRegistry = Grid::get(txn)->shardRegistry();
- if (!shardRegistry->getShard(txn, shardType.getName())) {
+ if (!shardRegistry->getShard(txn, shardType.getName()).isOK()) {
return {ErrorCodes::OperationFailed,
"Could not find shard metadata for shard after adding it. This most likely "
"indicates that the shard was removed immediately after it was added."};
@@ -1925,10 +1925,11 @@ Status ShardingCatalogManagerImpl::setFeatureCompatibilityVersionOnShards(
std::vector<ShardId> shardIds;
grid.shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shard = grid.shardRegistry()->getShard(txn, shardId);
- if (!shard) {
+ const auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
continue;
}
+ const auto shard = shardStatus.getValue();
auto response =
shard->runCommand(txn,
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_test.cpp
index fab9b690c13..da4e375465f 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_test.cpp
@@ -1564,16 +1564,15 @@ TEST_F(ShardingCatalogClientTest, createDatabaseSuccess) {
// Set up all the target mocks return values.
RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), s0.getName())->getTargeter())
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), s0.getName()))->getTargeter())
->setFindHostReturnValue(HostAndPort(s0.getHost()));
RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), s1.getName())->getTargeter())
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), s1.getName()))->getTargeter())
->setFindHostReturnValue(HostAndPort(s1.getHost()));
RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), s2.getName())->getTargeter())
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), s2.getName()))->getTargeter())
->setFindHostReturnValue(HostAndPort(s2.getHost()));
-
// Now actually start the createDatabase work.
distLock()->expectLock([dbname](StringData name,
@@ -1855,16 +1854,15 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDuplicateKeyOnInsert) {
// Set up all the target mocks return values.
RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), s0.getName())->getTargeter())
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), s0.getName()))->getTargeter())
->setFindHostReturnValue(HostAndPort(s0.getHost()));
RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), s1.getName())->getTargeter())
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), s1.getName()))->getTargeter())
->setFindHostReturnValue(HostAndPort(s1.getHost()));
RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), s2.getName())->getTargeter())
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), s2.getName()))->getTargeter())
->setFindHostReturnValue(HostAndPort(s2.getHost()));
-
// Now actually start the createDatabase work.
distLock()->expectLock([dbname](StringData name,
@@ -1977,7 +1975,8 @@ TEST_F(ShardingCatalogClientTest, EnableShardingNoDBExists) {
setupShards(vector<ShardType>{shard});
auto shardTargeter = RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), ShardId("shard0"))->getTargeter());
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), ShardId("shard0")))
+ ->getTargeter());
shardTargeter->setFindHostReturnValue(HostAndPort("shard0:12"));
distLock()->expectLock(
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index c38dee72dd5..bcdd2dd09c7 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -81,7 +81,7 @@ Chunk::Chunk(OperationContext* txn, ChunkManager* manager, const ChunkType& from
uassert(13327, "Chunk ns must match server ns", ns == _manager->getns());
uassert(10172, "Chunk needs a min", !_min.isEmpty());
uassert(10173, "Chunk needs a max", !_max.isEmpty());
- uassert(10171, "Chunk needs a server", grid.shardRegistry()->getShard(txn, _shardId));
+ uassert(10171, "Chunk needs a server", grid.shardRegistry()->getShard(txn, _shardId).isOK());
}
Chunk::Chunk(ChunkManager* info,
@@ -415,7 +415,7 @@ bool Chunk::splitIfShould(OperationContext* txn, long dataWritten) {
}
ConnectionString Chunk::_getShardConnectionString(OperationContext* txn) const {
- const auto shard = grid.shardRegistry()->getShard(txn, getShardId());
+ const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, getShardId()));
return shard->getConnString();
}
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index cda67a18e06..43bd27ee42f 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -102,7 +102,7 @@ public:
}
ShardId shardFor(OperationContext* txn, const ShardId& shardId) const final {
- const auto shard = grid.shardRegistry()->getShard(txn, shardId);
+ const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, shardId));
return shard->getId();
}
@@ -308,11 +308,12 @@ bool ChunkManager::_load(OperationContext* txn,
// Add all existing shards we find to the shards set
for (ShardVersionMap::iterator it = shardVersions->begin(); it != shardVersions->end();) {
- shared_ptr<Shard> shard = grid.shardRegistry()->getShard(txn, it->first);
- if (shard) {
+ auto shardStatus = grid.shardRegistry()->getShard(txn, it->first);
+ if (shardStatus.isOK()) {
shardIds.insert(it->first);
++it;
} else {
+ invariant(shardStatus == ErrorCodes::ShardNotFound);
shardVersions->erase(it++);
}
}
@@ -380,7 +381,7 @@ void ChunkManager::calcInitSplitsAndShards(OperationContext* txn,
if (!initPoints || initPoints->empty()) {
// discover split points
- const auto primaryShard = grid.shardRegistry()->getShard(txn, primaryShardId);
+ auto primaryShard = uassertStatusOK(grid.shardRegistry()->getShard(txn, primaryShardId));
const NamespaceString nss{getns()};
auto result = uassertStatusOK(
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index a4faaa34377..7e38ed59b9e 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -281,10 +281,12 @@ public:
// Now only check top-level shard connections
for (const ShardId& shardId : all) {
try {
- const auto shard = grid.shardRegistry()->getShard(txn, shardId);
- if (!shard) {
+ auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ invariant(shardStatus == ErrorCodes::ShardNotFound);
continue;
}
+ const auto shard = shardStatus.getValue();
string sconnString = shard->getConnString().toString();
Status* s = _getStatus(sconnString);
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 7846cc8b560..3b98f4f7334 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -106,7 +106,8 @@ void ShardRegistry::rebuildConfigShard() {
invariant(_data.getConfigShard());
}
-shared_ptr<Shard> ShardRegistry::getShard(OperationContext* txn, const ShardId& shardId) {
+StatusWith<shared_ptr<Shard>> ShardRegistry::getShard(OperationContext* txn,
+ const ShardId& shardId) {
// If we know about the shard, return it.
auto shard = _data.findByShardId(shardId);
if (shard) {
@@ -117,16 +118,27 @@ shared_ptr<Shard> ShardRegistry::getShard(OperationContext* txn, const ShardId&
bool didReload = reload(txn);
shard = _data.findByShardId(shardId);
- // If we found the shard, return it. If we did not find the shard but performed the reload
- // ourselves, return, because it means the shard does not exist.
- if (shard || didReload) {
+ // If we found the shard, return it.
+ if (shard) {
return shard;
}
+ // If we did not find the shard but performed the reload
+ // ourselves, return, because it means the shard does not exist.
+ if (didReload) {
+ return {ErrorCodes::ShardNotFound, str::stream() << "Shard " << shardId << " not found"};
+ }
+
// If we did not perform the reload ourselves (because there was a concurrent reload), force a
// reload again to ensure that we have seen data at least as up to date as our first reload.
reload(txn);
- return _data.findByShardId(shardId);
+ shard = _data.findByShardId(shardId);
+
+ if (shard) {
+ return shard;
+ }
+
+ return {ErrorCodes::ShardNotFound, str::stream() << "Shard " << shardId << " not found"};
}
shared_ptr<Shard> ShardRegistry::getShardNoReload(const ShardId& shardId) {
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index 61adaa20bac..edd78e8af02 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -185,12 +185,14 @@ public:
void updateReplSetHosts(const ConnectionString& newConnString);
/**
- * Returns a shared pointer to the shard object with the given shard id.
+ * Returns a shared pointer to the shard object with the given shard id, or ShardNotFound error
+ * otherwise.
+ *
* May refresh the shard registry if there's no cached information about the shard. The shardId
* parameter can actually be the shard name or the HostAndPort for any
* server in the shard.
*/
- std::shared_ptr<Shard> getShard(OperationContext* txn, const ShardId& shardId);
+ StatusWith<std::shared_ptr<Shard>> getShard(OperationContext* txn, const ShardId& shardId);
/**
* Returns a shared pointer to the shard object with the given shard id. The shardId parameter
diff --git a/src/mongo/s/commands/cluster_add_shard_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
index 40e861e6b6b..d6825da3c93 100644
--- a/src/mongo/s/commands/cluster_add_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
@@ -104,7 +104,7 @@ public:
// Ensure the added shard is visible to this process.
auto shardRegistry = Grid::get(txn)->shardRegistry();
- if (!shardRegistry->getShard(txn, shardAdded)) {
+ if (!shardRegistry->getShard(txn, shardAdded).isOK()) {
return appendCommandStatus(result,
{ErrorCodes::OperationFailed,
"Could not find shard metadata for shard after adding it. "
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index fa4b231741a..96b78d23f0a 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -220,7 +220,8 @@ void ClusterExplain::buildPlannerInfo(OperationContext* txn,
singleShardBob.append("shardName", shardResults[i].shardTargetId.toString());
{
- const auto shard = grid.shardRegistry()->getShard(txn, shardResults[i].shardTargetId);
+ const auto shard =
+ uassertStatusOK(grid.shardRegistry()->getShard(txn, shardResults[i].shardTargetId));
singleShardBob.append("connectionString", shard->getConnString().toString());
}
appendIfRoom(&singleShardBob, serverInfo, "serverInfo");
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index bea947aedda..73f1860118d 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -98,7 +98,11 @@ public:
shared_ptr<Shard> shard;
if (!conf->isShardingEnabled() || !conf->isSharded(nss.ns())) {
- shard = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
+ }
+ shard = shardStatus.getValue();
} else {
chunkMgr = _getChunkManager(txn, conf, nss);
@@ -128,7 +132,12 @@ public:
"to non-simple collation");
}
- shard = Grid::get(txn)->shardRegistry()->getShard(txn, chunk.getValue()->getShardId());
+ auto shardStatus =
+ Grid::get(txn)->shardRegistry()->getShard(txn, chunk.getValue()->getShardId());
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
+ }
+ shard = shardStatus.getValue();
}
BSONObjBuilder explainCmd;
@@ -257,7 +266,8 @@ private:
BSONObjBuilder& result) const {
BSONObj res;
- const auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+
ShardConnection conn(shard->getConnString(), nss.ns(), chunkManager);
bool ok = conn->runCommand(conf->name(), cmdObj, res);
conn.done();
diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp
index 8a1ce646c2e..15e20e2c436 100644
--- a/src/mongo/s/commands/cluster_fsync_cmd.cpp
+++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp
@@ -87,10 +87,11 @@ public:
grid.shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto s = grid.shardRegistry()->getShard(txn, shardId);
- if (!s) {
+ auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
continue;
}
+ const auto s = shardStatus.getValue();
auto response =
uassertStatusOK(s->runCommand(txn,
diff --git a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
index 78d7e892391..56bfa66b369 100644
--- a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
@@ -111,13 +111,12 @@ public:
const HostOpTime& hot = it->second;
const ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly, TagSet());
- auto shard = grid.shardRegistry()->getShard(txn, shardEndpoint.toString());
- if (!shard) {
- status =
- Status(ErrorCodes::ShardNotFound,
- str::stream() << "shard " << shardEndpoint.toString() << " not found");
+ auto shardStatus = grid.shardRegistry()->getShard(txn, shardEndpoint.toString());
+ if (!shardStatus.isOK()) {
+ status = shardStatus.getStatus();
break;
}
+ auto shard = shardStatus.getValue();
auto swHostAndPort = shard->getTargeter()->findHost(readPref);
if (!swHostAndPort.isOK()) {
status = swHostAndPort.getStatus();
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index ba698426fcc..bfbc556b36b 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -103,13 +103,11 @@ public:
log() << "want to kill op: " << redact(opToKill);
// Will throw if shard id is not found
- auto shard = grid.shardRegistry()->getShard(txn, shardIdent);
- if (!shard) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::ShardNotFound,
- str::stream() << "shard " << shardIdent << " does not exist"));
+ auto shardStatus = grid.shardRegistry()->getShard(txn, shardIdent);
+ if (!shardStatus.isOK()) {
+ return appendCommandStatus(result, shardStatus.getStatus());
}
+ auto shard = shardStatus.getValue();
int opId;
uassertStatusOK(parseNumberFromStringWithBase(opToKill.substr(opSepPos + 1), 10, &opId));
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 3b911f97588..7ea4cd7e8fc 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -95,10 +95,11 @@ public:
grid.shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto s = grid.shardRegistry()->getShard(txn, shardId);
- if (!s) {
+ const auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
continue;
}
+ const auto s = shardStatus.getValue();
auto response = uassertStatusOK(
s->runCommand(txn,
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index e398609c9e9..9978e6c142d 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -290,7 +290,9 @@ public:
if (!shardedInput && !shardedOutput && !customOutDB) {
LOG(1) << "simple MR, just passthrough";
- const auto shard = grid.shardRegistry()->getShard(txn, confIn->getPrimaryId());
+ const auto shard =
+ uassertStatusOK(grid.shardRegistry()->getShard(txn, confIn->getPrimaryId()));
+
ShardConnection conn(shard->getConnString(), "");
BSONObj res;
@@ -348,7 +350,8 @@ public:
// Need to gather list of all servers even if an error happened
string server;
{
- const auto shard = grid.shardRegistry()->getShard(txn, mrResult.shardTargetId);
+ const auto shard = uassertStatusOK(
+ grid.shardRegistry()->getShard(txn, mrResult.shardTargetId));
server = shard->getConnString().toString();
}
servers.insert(server);
@@ -441,7 +444,9 @@ public:
bool hasWCError = false;
if (!shardedOutput) {
- const auto shard = grid.shardRegistry()->getShard(txn, confOut->getPrimaryId());
+ const auto shard =
+ uassertStatusOK(grid.shardRegistry()->getShard(txn, confOut->getPrimaryId()));
+
LOG(1) << "MR with single shard output, NS=" << outputCollNss.ns()
<< " primary=" << shard->toString();
@@ -545,8 +550,8 @@ public:
for (const auto& mrResult : mrCommandResults) {
string server;
{
- const auto shard =
- grid.shardRegistry()->getShard(txn, mrResult.shardTargetId);
+ const auto shard = uassertStatusOK(
+ grid.shardRegistry()->getShard(txn, mrResult.shardTargetId));
server = shard->getConnString().toString();
}
singleResult = mrResult.result;
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index d534c29aadd..44f8e74ccf2 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -185,15 +185,15 @@ public:
// Throws, but handled at level above. Don't want to rewrap to preserve exception
// formatting.
- const auto shard = grid.shardRegistry()->getShard(txn, firstChunk->getShardId());
- if (!shard) {
+ const auto shardStatus = grid.shardRegistry()->getShard(txn, firstChunk->getShardId());
+ if (!shardStatus.isOK()) {
return appendCommandStatus(
result,
Status(ErrorCodes::ShardNotFound,
str::stream() << "Can't find shard for chunk: " << firstChunk->toString()));
}
- ShardConnection conn(shard->getConnString(), "");
+ ShardConnection conn(shardStatus.getValue()->getConnString(), "");
bool ok = conn->runCommand("admin", remoteCmdObjB.obj(), remoteResult);
conn.done();
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 282667dd6eb..4236beb6121 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -135,14 +135,15 @@ public:
return false;
}
- const auto to = grid.shardRegistry()->getShard(txn, toString);
- if (!to) {
+ const auto toStatus = grid.shardRegistry()->getShard(txn, toString);
+ if (!toStatus.isOK()) {
string msg(str::stream() << "Could not move chunk in '" << nss.ns() << "' to shard '"
<< toString
<< "' because that shard does not exist");
log() << msg;
return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
}
+ const auto to = toStatus.getValue();
// so far, chunk size serves test purposes; it may or may not become a supported parameter
long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index b4e6ce5e003..418ed3b25a0 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -130,17 +130,18 @@ public:
return false;
}
- shared_ptr<Shard> toShard = grid.shardRegistry()->getShard(txn, to);
- if (!toShard) {
+ auto toShardStatus = grid.shardRegistry()->getShard(txn, to);
+ if (!toShardStatus.isOK()) {
string msg(str::stream() << "Could not move database '" << dbname << "' to shard '"
<< to
<< "' because the shard does not exist");
log() << msg;
return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
}
+ auto toShard = toShardStatus.getValue();
- shared_ptr<Shard> fromShard = grid.shardRegistry()->getShard(txn, config->getPrimaryId());
- invariant(fromShard);
+ auto fromShard =
+ uassertStatusOK(grid.shardRegistry()->getShard(txn, config->getPrimaryId()));
if (fromShard->getId() == toShard->getId()) {
errmsg = "it is already the primary";
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index 8d14feaceec..19df5847a85 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -304,7 +304,9 @@ public:
const auto& mergingShardId = needPrimaryShardMerger
? conf->getPrimaryId()
: shardResults[prng.nextInt32(shardResults.size())].shardTargetId;
- const auto mergingShard = grid.shardRegistry()->getShard(txn, mergingShardId);
+ const auto mergingShard =
+ uassertStatusOK(grid.shardRegistry()->getShard(txn, mergingShardId));
+
ShardConnection conn(mergingShard->getConnString(), outputNsOrEmpty);
BSONObj mergedResults =
aggRunCommand(conn.get(), dbname, mergeCmd.freeze().toBson(), options);
@@ -490,15 +492,19 @@ bool PipelineCommand::aggPassthrough(OperationContext* txn,
int queryOptions,
std::string& errmsg) {
// Temporary hack. See comment on declaration for details.
- const auto shard = grid.shardRegistry()->getShard(txn, conf->getPrimaryId());
- ShardConnection conn(shard->getConnString(), "");
+ auto shardStatus = grid.shardRegistry()->getShard(txn, conf->getPrimaryId());
+ if (!shardStatus.isOK()) {
+ return appendCommandStatus(out, shardStatus.getStatus());
+ }
+
+ ShardConnection conn(shardStatus.getValue()->getConnString(), "");
BSONObj result = aggRunCommand(conn.get(), conf->name(), cmdObj, queryOptions);
conn.done();
// First append the properly constructed writeConcernError. It will then be skipped
// in appendElementsUnique.
if (auto wcErrorElem = result["writeConcernError"]) {
- appendWriteConcernErrorToCmdResponse(shard->getId(), wcErrorElem, out);
+ appendWriteConcernErrorToCmdResponse(shardStatus.getValue()->getId(), wcErrorElem, out);
}
out.appendElementsUnique(result);
diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
index 03dd30ae787..42acb26fd11 100644
--- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
@@ -87,13 +87,14 @@ public:
BSONObjBuilder& result) {
const string target = cmdObj.firstElement().valuestrsafe();
- const auto s = grid.shardRegistry()->getShard(txn, ShardId(target));
- if (!s) {
+ const auto shardStatus = grid.shardRegistry()->getShard(txn, ShardId(target));
+ if (!shardStatus.isOK()) {
string msg(str::stream() << "Could not drop shard '" << target
<< "' because it does not exist");
log() << msg;
return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
}
+ const auto s = shardStatus.getValue();
auto catalogClient = grid.catalogClient(txn);
StatusWith<ShardDrainingStatus> removeShardResult =
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 5471371e0e8..1f580f97493 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -206,7 +206,8 @@ public:
// The rest of the checks require a connection to the primary db
ConnectionString shardConnString;
{
- const auto shard = grid.shardRegistry()->getShard(txn, config->getPrimaryId());
+ const auto shard =
+ uassertStatusOK(grid.shardRegistry()->getShard(txn, config->getPrimaryId()));
shardConnString = shard->getConnString();
}
@@ -532,10 +533,11 @@ public:
int i = 0;
for (ChunkMap::const_iterator c = chunkMap.begin(); c != chunkMap.end(); ++c, ++i) {
const ShardId& shardId = shardIds[i % numShards];
- const auto to = grid.shardRegistry()->getShard(txn, shardId);
- if (!to) {
+ const auto toStatus = grid.shardRegistry()->getShard(txn, shardId);
+ if (!toStatus.isOK()) {
continue;
}
+ const auto to = toStatus.getValue();
shared_ptr<Chunk> chunk = c->second;
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index 28c750617d7..177855c50f1 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -854,16 +854,16 @@ Status runUpgradeOnAllShards(OperationContext* txn, int maxSteps, BSONObjBuilder
bool hasWCError = false;
for (const auto& shardId : shardIds) {
- auto shard = shardRegistry->getShard(txn, shardId);
- if (!shard) {
- return {ErrorCodes::ShardNotFound,
- str::stream() << "shard " << shardId << " not found"};
+ auto shardStatus = shardRegistry->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
- auto cmdResult = shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- cmdObj,
- Shard::RetryPolicy::kIdempotent);
+ auto cmdResult =
+ shardStatus.getValue()->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ cmdObj,
+ Shard::RetryPolicy::kIdempotent);
auto status = cmdResult.isOK() ? std::move(cmdResult.getValue().commandStatus)
: std::move(cmdResult.getStatus());
if (!status.isOK()) {
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index 317019abb01..ea63a7d020a 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -267,12 +267,11 @@ private:
const ShardEndpoint* endpoint = *it;
const ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly, TagSet());
- auto shard = grid.shardRegistry()->getShard(txn, endpoint->shardName);
- if (!shard) {
- return Status(ErrorCodes::ShardNotFound,
- "Could not find shard with id " + endpoint->shardName.toString());
+ auto shardStatus = grid.shardRegistry()->getShard(txn, endpoint->shardName);
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
- auto swHostAndPort = shard->getTargeter()->findHost(readPref);
+ auto swHostAndPort = shardStatus.getValue()->getTargeter()->findHost(readPref);
if (!swHostAndPort.isOK()) {
return swHostAndPort.getStatus();
}
@@ -300,8 +299,11 @@ private:
Strategy::CommandResult result;
result.target = host;
{
- const auto shard = grid.shardRegistry()->getShard(txn, host.toString());
- result.shardTargetId = shard->getId();
+ auto shardStatus = grid.shardRegistry()->getShard(txn, host.toString());
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
+ }
+ result.shardTargetId = shardStatus.getValue()->getId();
}
result.result = response.toBSON();
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 4610b4cca67..67d2ba99338 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -91,11 +91,12 @@ bool cursorCommandPassthrough(OperationContext* txn,
const BSONObj& cmdObj,
int options,
BSONObjBuilder* out) {
- const auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
- if (!shard) {
- return Command::appendCommandStatus(
- *out, {ErrorCodes::ShardNotFound, "failed to find a valid shard"});
+ const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ if (!shardStatus.isOK()) {
+ invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
+ return Command::appendCommandStatus(*out, shardStatus.getStatus());
}
+ const auto shard = shardStatus.getValue();
ScopedDbConnection conn(shard->getConnString());
auto cursor = conn->query(str::stream() << conf->name() << ".$cmd",
cmdObj,
@@ -198,7 +199,10 @@ private:
const BSONObj& cmdObj,
int options,
BSONObjBuilder& result) {
- const auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ const auto shardStatus =
+ Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ const auto shard = uassertStatusOK(shardStatus);
+
ShardConnection conn(shard->getConnString(), "");
BSONObj res;
@@ -673,8 +677,8 @@ public:
}
{
- const auto& shard =
- Grid::get(txn)->shardRegistry()->getShard(txn, confFrom->getPrimaryId());
+ const auto shard = uassertStatusOK(
+ Grid::get(txn)->shardRegistry()->getShard(txn, confFrom->getPrimaryId()));
b.append("fromhost", shard->getConnString().toString());
}
BSONObj fixed = b.obj();
@@ -732,10 +736,12 @@ public:
set<ShardId> shardIds;
cm->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
- if (!shard) {
+ const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
}
+ const auto shard = shardStatus.getValue();
BSONObj res;
{
@@ -918,12 +924,13 @@ public:
set<ShardId> shardIds;
cm->getShardIdsForRange(shardIds, min, max);
for (const ShardId& shardId : shardIds) {
- const auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
- if (!shard) {
+ const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
}
- ScopedDbConnection conn(shard->getConnString());
+ ScopedDbConnection conn(shardStatus.getValue()->getConnString());
BSONObj res;
bool ok = conn->runCommand(conf->name(), cmdObj, res);
conn.done();
@@ -1028,12 +1035,15 @@ public:
<< ". Cannot run on sharded namespace.");
}
- const auto primaryShard =
+ const auto primaryShardStatus =
Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ if (!primaryShardStatus.isOK()) {
+ return primaryShardStatus.getStatus();
+ }
BSONObj shardResult;
try {
- ShardConnection conn(primaryShard->getConnString(), "");
+ ShardConnection conn(primaryShardStatus.getValue()->getConnString(), "");
// TODO: this can throw a stale config when mongos is not up-to-date -- fix.
if (!conn->runCommand(nss.db().toString(), command, shardResult, options)) {
@@ -1054,7 +1064,7 @@ public:
Strategy::CommandResult cmdResult;
cmdResult.shardTargetId = conf->getPrimaryId();
cmdResult.result = shardResult;
- cmdResult.target = primaryShard->getConnString();
+ cmdResult.target = primaryShardStatus.getValue()->getConnString();
return ClusterExplain::buildExplainResult(
txn, {cmdResult}, ClusterExplain::kSingleShard, timer.millis(), out);
@@ -1206,12 +1216,13 @@ public:
BSONObjSet all = bsonCmp.makeBSONObjSet();
for (const ShardId& shardId : shardIds) {
- const auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
- if (!shard) {
+ const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
}
- ShardConnection conn(shard->getConnString(), fullns);
+ ShardConnection conn(shardStatus.getValue()->getConnString(), fullns);
BSONObj res;
bool ok = conn->runCommand(conf->name(), cmdObj, res, options);
conn.done();
@@ -1524,13 +1535,14 @@ public:
list<shared_ptr<Future::CommandResult>> futures;
BSONArrayBuilder shardArray;
for (const ShardId& shardId : shardIds) {
- const auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
- if (!shard) {
+ const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
}
- futures.push_back(
- Future::spawnCommand(shard->getConnString().toString(), dbName, cmdObj, options));
+ futures.push_back(Future::spawnCommand(
+ shardStatus.getValue()->getConnString().toString(), dbName, cmdObj, options));
shardArray.append(shardId.toString());
}
diff --git a/src/mongo/s/commands/run_on_all_shards_cmd.cpp b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
index 44858bb249f..9b0b26cf14b 100644
--- a/src/mongo/s/commands/run_on_all_shards_cmd.cpp
+++ b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
@@ -88,13 +88,17 @@ bool RunOnAllShardsCommand::run(OperationContext* txn,
std::list<std::shared_ptr<Future::CommandResult>> futures;
for (const ShardId& shardId : shardIds) {
- const auto shard = grid.shardRegistry()->getShard(txn, shardId);
- if (!shard) {
+ const auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
continue;
}
- futures.push_back(Future::spawnCommand(
- shard->getConnString().toString(), dbName, cmdObj, 0, NULL, _useShardConn));
+ futures.push_back(Future::spawnCommand(shardStatus.getValue()->getConnString().toString(),
+ dbName,
+ cmdObj,
+ 0,
+ NULL,
+ _useShardConn));
}
std::vector<ShardAndReply> results;
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 6459d758beb..e830f9db187 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -225,7 +225,10 @@ void DBConfig::getChunkManagerOrPrimary(OperationContext* txn,
// No namespace
if (i == _collections.end()) {
// If we don't know about this namespace, it's unsharded by default
- primary = grid.shardRegistry()->getShard(txn, _primaryId);
+ auto primaryStatus = grid.shardRegistry()->getShard(txn, _primaryId);
+ if (primaryStatus.isOK()) {
+ primary = primaryStatus.getValue();
+ }
} else {
CollectionInfo& cInfo = i->second;
@@ -236,13 +239,16 @@ void DBConfig::getChunkManagerOrPrimary(OperationContext* txn,
if (_shardingEnabled && cInfo.isSharded()) {
manager = cInfo.getCM();
} else {
- primary = grid.shardRegistry()->getShard(txn, _primaryId);
+ auto primaryStatus = grid.shardRegistry()->getShard(txn, _primaryId);
+ if (primaryStatus.isOK()) {
+ primary = primaryStatus.getValue();
+ }
}
}
}
- verify(manager || primary);
- verify(!manager || !primary);
+ invariant(manager || primary);
+ invariant(!manager || !primary);
}
@@ -578,7 +584,8 @@ bool DBConfig::dropDatabase(OperationContext* txn, string& errmsg) {
// 3
{
- const auto shard = grid.shardRegistry()->getShard(txn, _primaryId);
+ const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, _primaryId));
+
ScopedDbConnection conn(shard->getConnString(), 30.0);
BSONObj res;
if (!conn->dropDatabase(_name, txn->getWriteConcern(), &res)) {
@@ -597,12 +604,12 @@ bool DBConfig::dropDatabase(OperationContext* txn, string& errmsg) {
// 4
for (const ShardId& shardId : shardIds) {
- const auto shard = grid.shardRegistry()->getShard(txn, shardId);
- if (!shard) {
+ const auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
continue;
}
- ScopedDbConnection conn(shard->getConnString(), 30.0);
+ ScopedDbConnection conn(shardStatus.getValue()->getConnString(), 30.0);
BSONObj res;
if (!conn->dropDatabase(_name, txn->getWriteConcern(), &res)) {
errmsg = res.toString() + " at " + shardId.toString();
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 7d8b12260a7..0ad02215ed2 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -168,12 +168,11 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
&shardIds);
for (auto id : shardIds) {
- auto shard = shardRegistry->getShard(txn, id);
- if (!shard) {
- return {ErrorCodes::ShardNotFound,
- str::stream() << "Shard with id: " << id << " is not found."};
+ auto shardStatus = shardRegistry->getShard(txn, id);
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
- shards.emplace_back(shard);
+ shards.emplace_back(shardStatus.getValue());
}
}
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index 8e690c93188..4b1b2b2fb51 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -55,17 +55,17 @@ const char kShouldMigrate[] = "shouldMigrate";
} // namespace
StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardId& shardId) {
- auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
- if (!shard) {
- return Status(ErrorCodes::ShardNotFound,
- str::stream() << "shard " << shardId << " not found");
+ auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
+
auto listDatabasesStatus =
- shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- BSON("listDatabases" << 1),
- Shard::RetryPolicy::kIdempotent);
+ shardStatus.getValue()->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ BSON("listDatabases" << 1),
+ Shard::RetryPolicy::kIdempotent);
if (!listDatabasesStatus.isOK()) {
return std::move(listDatabasesStatus.getStatus());
}
@@ -94,16 +94,17 @@ StatusWith<BSONObj> selectMedianKey(OperationContext* txn,
cmd.append(kMaxKey, maxKey);
cmd.appendBool("force", true);
- auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
- if (!shard) {
- return Status(ErrorCodes::ShardNotFound,
- str::stream() << "shard " << shardId << " not found");
+ auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
- auto cmdStatus = shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- cmd.obj(),
- Shard::RetryPolicy::kIdempotent);
+
+ auto cmdStatus =
+ shardStatus.getValue()->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ cmd.obj(),
+ Shard::RetryPolicy::kIdempotent);
if (!cmdStatus.isOK()) {
return std::move(cmdStatus.getStatus());
}
@@ -139,16 +140,17 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
cmd.append("maxSplitPoints", maxPoints);
cmd.append("maxChunkObjects", maxObjs);
- auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
- if (!shard) {
- return Status(ErrorCodes::ShardNotFound,
- str::stream() << "shard " << shardId << " not found");
+ auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ return shardStatus.getStatus();
}
- auto cmdStatus = shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- "admin",
- cmd.obj(),
- Shard::RetryPolicy::kIdempotent);
+
+ auto cmdStatus =
+ shardStatus.getValue()->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ "admin",
+ cmd.obj(),
+ Shard::RetryPolicy::kIdempotent);
if (!cmdStatus.isOK()) {
return std::move(cmdStatus.getStatus());
}
@@ -206,16 +208,16 @@ StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
Status status{ErrorCodes::InternalError, "Uninitialized value"};
BSONObj cmdResponse;
- auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
- if (!shard) {
- status =
- Status(ErrorCodes::ShardNotFound, str::stream() << "shard " << shardId << " not found");
+ auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ if (!shardStatus.isOK()) {
+ status = shardStatus.getStatus();
} else {
- auto cmdStatus = shard->runCommand(txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- cmdObj,
- Shard::RetryPolicy::kNotIdempotent);
+ auto cmdStatus =
+ shardStatus.getValue()->runCommand(txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ cmdObj,
+ Shard::RetryPolicy::kNotIdempotent);
if (!cmdStatus.isOK()) {
status = std::move(cmdStatus.getStatus());
} else {
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 1c1b0869933..34f399267f4 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -170,16 +170,14 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
continue;
// Figure out what host we need to dispatch our targeted batch
- bool resolvedHost = true;
const ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly, TagSet());
- auto shard = Grid::get(txn)->shardRegistry()->getShard(
+ auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(
txn, nextBatch->getEndpoint().shardName);
- if (!shard) {
- Status status = Status(ErrorCodes::ShardNotFound,
- str::stream() << "unknown shard name "
- << nextBatch->getEndpoint().shardName);
- resolvedHost = false;
+ bool resolvedHost = false;
+ ConnectionString shardHost;
+ if (!shardStatus.isOK()) {
+ Status status(std::move(shardStatus.getStatus()));
// Record a resolve failure
// TODO: It may be necessary to refresh the cache if stale, or maybe just
@@ -189,20 +187,25 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
LOG(4) << "unable to send write batch to " << nextBatch->getEndpoint().shardName
<< causedBy(status);
batchOp.noteBatchError(*nextBatch, error);
- }
-
- auto swHostAndPort = shard->getTargeter()->findHost(readPref);
- if (!swHostAndPort.isOK()) {
- resolvedHost = false;
-
- // Record a resolve failure
- // TODO: It may be necessary to refresh the cache if stale, or maybe just
- // cancel and retarget the batch
- WriteErrorDetail error;
- buildErrorFrom(swHostAndPort.getStatus(), &error);
- LOG(4) << "unable to send write batch to " << nextBatch->getEndpoint().shardName
- << causedBy(swHostAndPort.getStatus());
- batchOp.noteBatchError(*nextBatch, error);
+ } else {
+ auto shard = shardStatus.getValue();
+
+ auto swHostAndPort = shard->getTargeter()->findHost(readPref);
+ if (!swHostAndPort.isOK()) {
+
+ // Record a resolve failure
+ // TODO: It may be necessary to refresh the cache if stale, or maybe just
+ // cancel and retarget the batch
+ WriteErrorDetail error;
+ buildErrorFrom(swHostAndPort.getStatus(), &error);
+ LOG(4) << "unable to send write batch to "
+ << nextBatch->getEndpoint().shardName
+ << causedBy(swHostAndPort.getStatus());
+ batchOp.noteBatchError(*nextBatch, error);
+ } else {
+ shardHost = ConnectionString(std::move(swHostAndPort.getValue()));
+ resolvedHost = true;
+ }
}
if (!resolvedHost) {
@@ -216,8 +219,6 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
continue;
}
- ConnectionString shardHost(swHostAndPort.getValue());
-
// If we already have a batch for this host, wait until the next time
OwnedHostBatchMap::MapType::iterator pendingIt = pendingBatches.find(shardHost);
if (pendingIt != pendingBatches.end())