summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp4
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp2
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/sessions_collection_config_server.cpp2
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp2
-rw-r--r--src/mongo/s/client/shard_registry.cpp26
-rw-r--r--src/mongo/s/client/shard_registry.h19
-rw-r--r--src/mongo/s/client/shard_remote_test.cpp2
-rw-r--r--src/mongo/s/cluster_commands_helpers.cpp2
-rw-r--r--src/mongo/s/commands/cluster_fsync_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp6
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.cpp7
12 files changed, 27 insertions, 51 deletions
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 071420b5ce0..6822de95f8a 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -894,7 +894,7 @@ void Balancer::_sleepFor(OperationContext* opCtx, Milliseconds waitTimeout) {
bool Balancer::_checkOIDs(OperationContext* opCtx) {
auto shardingContext = Grid::get(opCtx);
- const auto all = shardingContext->shardRegistry()->getAllShardIdsNoReload();
+ const auto all = shardingContext->shardRegistry()->getAllShardIds(opCtx);
// map of OID machine ID => shardId
map<int, ShardId> oids;
@@ -908,7 +908,7 @@ bool Balancer::_checkOIDs(OperationContext* opCtx) {
if (!shardStatus.isOK()) {
continue;
}
- const auto s = shardStatus.getValue();
+ const auto s = std::move(shardStatus.getValue());
auto result = uassertStatusOK(
s->runCommandWithFixedRetryAttempts(opCtx,
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 0b2ab1b0474..c340e017443 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -56,7 +56,7 @@ using ZoneShardMap = StringMap<std::vector<ShardId>>;
std::vector<ShardId> getAllShardIdsSorted(OperationContext* opCtx) {
// Many tests assume that chunks will be placed on shards
// according to their IDs in ascending lexical order.
- auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload();
+ auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
std::sort(shardIds.begin(), shardIds.end());
return shardIds;
}
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index ccbad667d35..216d88f5ddf 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -570,7 +570,7 @@ void CreateCollectionCoordinator::_checkCommandArguments(OperationContext* opCtx
// less danger of an OOM error.
const int maxNumInitialChunksForShards =
- Grid::get(opCtx)->shardRegistry()->getNumShardsNoReload() * shardutil::kMaxSplitPoints;
+ Grid::get(opCtx)->shardRegistry()->getNumShards(opCtx) * shardutil::kMaxSplitPoints;
const int maxNumInitialChunksTotal = 1000 * 1000; // Arbitrary limit to memory consumption
int numChunks = _request.getNumInitialChunks().value();
uassert(ErrorCodes::InvalidOptions,
diff --git a/src/mongo/db/s/sessions_collection_config_server.cpp b/src/mongo/db/s/sessions_collection_config_server.cpp
index 4376166a365..995b75aa452 100644
--- a/src/mongo/db/s/sessions_collection_config_server.cpp
+++ b/src/mongo/db/s/sessions_collection_config_server.cpp
@@ -56,7 +56,7 @@ void SessionsCollectionConfigServer::_shardCollectionIfNeeded(OperationContext*
uassert(ErrorCodes::ShardNotFound,
str::stream() << "Failed to create " << NamespaceString::kLogicalSessionsNamespace
<< ": cannot create the collection until there are shards",
- Grid::get(opCtx)->shardRegistry()->getNumShardsNoReload() != 0);
+ Grid::get(opCtx)->shardRegistry()->getNumShards(opCtx) != 0);
ShardsvrCreateCollection shardsvrCollRequest(NamespaceString::kLogicalSessionsNamespace);
CreateCollectionRequest requestParamsObj;
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index bec32dfd8c4..a446334712a 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -616,7 +616,7 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetAllShards(OperationContex
// implies the collection is sharded, so we should always have a chunk manager.
invariant(_cm.isSharded());
- auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload();
+ auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
std::vector<ShardEndpoint> endpoints;
for (auto&& shardId : shardIds) {
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index ff011591319..73d806e7f08 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -548,6 +548,12 @@ ShardRegistry::Cache::ValueHandle ShardRegistry::_getData(OperationContext* opCt
return _getDataAsync().get(opCtx);
}
+bool ShardRegistry::isConfigServer(const HostAndPort& host) const {
+ const auto configsvrConnString = getConfigServerConnectionString();
+ const auto& configsvrHosts = configsvrConnString.getServers();
+ return std::find(configsvrHosts.begin(), configsvrHosts.end(), host) != configsvrHosts.end();
+}
+
// TODO SERVER-50206: Remove usage of these non-causally consistent accessors.
ShardRegistry::Cache::ValueHandle ShardRegistry::_getCachedData() const {
@@ -555,18 +561,6 @@ ShardRegistry::Cache::ValueHandle ShardRegistry::_getCachedData() const {
return _cache->peekLatestCached(_kSingleton);
}
-std::shared_ptr<Shard> ShardRegistry::getShardNoReload(const ShardId& shardId) const {
- // First check if this is a config shard lookup.
- {
- stdx::lock_guard<Latch> lk(_mutex);
- if (auto shard = _configShardData.findShard(shardId)) {
- return shard;
- }
- }
- auto data = _getCachedData();
- return data->findShard(shardId);
-}
-
std::shared_ptr<Shard> ShardRegistry::getShardForHostNoReload(const HostAndPort& host) const {
// First check if this is a config shard lookup.
{
@@ -579,14 +573,6 @@ std::shared_ptr<Shard> ShardRegistry::getShardForHostNoReload(const HostAndPort&
return data->findByHostAndPort(host);
}
-std::vector<ShardId> ShardRegistry::getAllShardIdsNoReload() const {
- return _getCachedData()->getAllShardIds();
-}
-
-int ShardRegistry::getNumShardsNoReload() const {
- return _getCachedData()->getAllShardIds().size();
-}
-
std::shared_ptr<Shard> ShardRegistry::_getShardForRSNameNoReload(const std::string& name) const {
// First check if this is a config shard lookup.
{
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index 641f7195a23..263c7dfa434 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -292,6 +292,13 @@ public:
static void updateReplicaSetOnConfigServer(ServiceContext* serviceContex,
const ConnectionString& connStr) noexcept;
+ /*
+ * Returns true if the given host is part of the config server replica set.
+ *
+ * This method relies on the RSM to have pushed the correct CSRS membership information.
+ */
+ bool isConfigServer(const HostAndPort& host) const;
+
// TODO SERVER-50206: Remove usage of these non-causally consistent accessors.
//
// Their most important current users are dispatching requests to hosts, and processing
@@ -301,23 +308,11 @@ public:
// refreshed via _lookup()).
/**
- * Returns a shared pointer to the shard object with the given shard id. The shardId parameter
- * can actually be the shard name or the HostAndPort for any server in the shard. Will not
- * refresh the shard registry or otherwise perform any network traffic. This means that if the
- * shard was recently added it may not be found. USE WITH CAUTION.
- */
- std::shared_ptr<Shard> getShardNoReload(const ShardId& shardId) const;
-
- /**
* Finds the Shard that the mongod listening at this HostAndPort is a member of. Will not
* refresh the shard registry or otherwise perform any network traffic.
*/
std::shared_ptr<Shard> getShardForHostNoReload(const HostAndPort& shardHost) const;
- std::vector<ShardId> getAllShardIdsNoReload() const;
-
- int getNumShardsNoReload() const;
-
private:
/**
* The ShardRegistry uses the ReadThroughCache to handle refreshing itself. The cache stores
diff --git a/src/mongo/s/client/shard_remote_test.cpp b/src/mongo/s/client/shard_remote_test.cpp
index 9db1c62ec2e..e9d597856be 100644
--- a/src/mongo/s/client/shard_remote_test.cpp
+++ b/src/mongo/s/client/shard_remote_test.cpp
@@ -80,7 +80,7 @@ protected:
}
void runDummyCommandOnShard(ShardId shardId) {
- auto shard = shardRegistry()->getShardNoReload(shardId);
+ auto shard = unittest::assertGet(shardRegistry()->getShard(operationContext(), shardId));
uassertStatusOK(shard->runCommand(operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"unusedDb",
diff --git a/src/mongo/s/cluster_commands_helpers.cpp b/src/mongo/s/cluster_commands_helpers.cpp
index 7b4a24a03f6..495b7886869 100644
--- a/src/mongo/s/cluster_commands_helpers.cpp
+++ b/src/mongo/s/cluster_commands_helpers.cpp
@@ -388,7 +388,7 @@ std::vector<AsyncRequestsSender::Response> scatterGatherUnversionedTargetAllShar
const ReadPreferenceSetting& readPref,
Shard::RetryPolicy retryPolicy) {
std::vector<AsyncRequestsSender::Request> requests;
- for (auto shardId : Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload())
+ for (auto&& shardId : Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx))
requests.emplace_back(std::move(shardId), cmdObj);
return gatherResponses(opCtx, dbName, readPref, retryPolicy, requests);
diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp
index e82469acee3..00972e17815 100644
--- a/src/mongo/s/commands/cluster_fsync_cmd.cpp
+++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp
@@ -82,14 +82,14 @@ public:
bool ok = true;
auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
- const auto shardIds = shardRegistry->getAllShardIdsNoReload();
+ const auto shardIds = shardRegistry->getAllShardIds(opCtx);
for (const ShardId& shardId : shardIds) {
auto shardStatus = shardRegistry->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
- const auto s = shardStatus.getValue();
+ const auto s = std::move(shardStatus.getValue());
auto response = uassertStatusOK(s->runCommandWithFixedRetryAttempts(
opCtx,
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 21ea0a14f2b..7aca53965d5 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -104,7 +104,7 @@ public:
std::map<std::string, long long> sizes;
std::map<std::string, std::unique_ptr<BSONObjBuilder>> dbShardInfo;
- auto shardIds = shardRegistry->getAllShardIdsNoReload();
+ auto shardIds = shardRegistry->getAllShardIds(opCtx);
shardIds.emplace_back(ShardId::kConfigServerId);
// { filter: matchExpression }.
@@ -112,11 +112,11 @@ public:
opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmd.toBSON({})));
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = shardRegistry->getShard(opCtx, shardId);
+ auto shardStatus = shardRegistry->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
- const auto s = shardStatus.getValue();
+ const auto s = std::move(shardStatus.getValue());
auto response = uassertStatusOK(
s->runCommandWithFixedRetryAttempts(opCtx,
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.cpp b/src/mongo/s/sharding_task_executor_pool_controller.cpp
index 4d8f536d61f..bd2313125a8 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.cpp
+++ b/src/mongo/s/sharding_task_executor_pool_controller.cpp
@@ -59,12 +59,7 @@ void emplaceOrInvariant(Map&& map, Args&&... args) noexcept {
}
bool isConfigServer(const ShardRegistry* sr, const HostAndPort& peer) {
- if (!sr)
- return false;
- auto shard = sr->getShardForHostNoReload(peer);
- if (!shard)
- return false;
- return shard->isConfig();
+ return sr && sr->isConfigServer(peer);
}
} // namespace