summaryrefslogtreecommitdiff
path: root/src/mongo/s/sharding_task_executor_pool_controller.cpp
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@mongodb.com>2020-01-17 16:51:39 +0000
committerevergreen <evergreen@mongodb.com>2020-01-17 16:51:39 +0000
commitc8714ef5051fb1fd1d094b7cc505db0099f7dcaa (patch)
tree4237f4d173f48abbadfb2777ad60cc14fe396bb8 /src/mongo/s/sharding_task_executor_pool_controller.cpp
parent5eea91ee17a4e4cbf339768d3494675142a18978 (diff)
downloadmongo-c8714ef5051fb1fd1d094b7cc505db0099f7dcaa.tar.gz
SERVER-45546 Do not create HostPools for passive members
Diffstat (limited to 'src/mongo/s/sharding_task_executor_pool_controller.cpp')
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.cpp20
1 files changed, 14 insertions, 6 deletions
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.cpp b/src/mongo/s/sharding_task_executor_pool_controller.cpp
index 7e78093c22c..e20a59e128b 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.cpp
+++ b/src/mongo/s/sharding_task_executor_pool_controller.cpp
@@ -99,10 +99,18 @@ Status ShardingTaskExecutorPoolController::onUpdateMatchingStrategy(const std::s
void ShardingTaskExecutorPoolController::_addGroup(WithLock,
const ReplicaSetChangeNotifier::State& state) {
- auto groupData = std::make_shared<GroupData>(state);
+ auto groupData = std::make_shared<GroupData>();
+ groupData->primary = state.primary;
- // Mark each host with this groupData
+ // Find each active member
for (auto& host : state.connStr.getServers()) {
+ if (!state.passives.count(host)) {
+ groupData->members.push_back(host);
+ }
+ }
+
+ // Mark each host with this groupData
+ for (auto& host : groupData->members) {
auto& groupAndId = _groupAndIds[host];
invariant(!groupAndId.groupData);
@@ -127,7 +135,7 @@ void ShardingTaskExecutorPoolController::_removeGroup(WithLock, const std::strin
}
auto& groupData = it->second;
- for (auto& host : groupData->state.connStr.getServers()) {
+ for (auto& host : groupData->members) {
auto& groupAndId = getOrInvariant(_groupAndIds, host);
groupAndId.groupData.reset();
if (groupAndId.maybeId) {
@@ -221,13 +229,13 @@ auto ShardingTaskExecutorPoolController::updateHost(PoolId id, const HostState&
// If the pool isn't in a groupData, we can return now
auto groupData = poolData.groupData.lock();
- if (!groupData || groupData->state.passives.count(poolData.host)) {
+ if (!groupData) {
return {{poolData.host}, poolData.isAbleToShutdown};
}
switch (gParameters.matchingStrategy.load()) {
case MatchingStrategy::kMatchPrimaryNode: {
- if (groupData->state.primary == poolData.host) {
+ if (groupData->primary == poolData.host) {
groupData->target = poolData.target;
}
} break;
@@ -254,7 +262,7 @@ auto ShardingTaskExecutorPoolController::updateHost(PoolId id, const HostState&
std::all_of(groupData->poolIds.begin(), groupData->poolIds.end(), [&](auto otherId) {
return getOrInvariant(_poolDatas, otherId).isAbleToShutdown;
});
- return {groupData->state.connStr.getServers(), shouldShutdown};
+ return {groupData->members, shouldShutdown};
}
void ShardingTaskExecutorPoolController::removeHost(PoolId id) {