summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@mongodb.com>2020-01-17 16:51:39 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-02 19:46:59 +0000
commit22f254f0c19e2737bbe15104cb1a8bdc8affae24 (patch)
tree9f39536514a2da37a840b0e88aa4acb77e7faeae
parent0f89b28128bce591388b20e38fd6c675eaac2b15 (diff)
downloadmongo-22f254f0c19e2737bbe15104cb1a8bdc8affae24.tar.gz
SERVER-45546 Do not create HostPools for passive members
(cherry picked from commit c8714ef5051fb1fd1d094b7cc505db0099f7dcaa)
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.cpp20
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.h7
2 files changed, 18 insertions, 9 deletions
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.cpp b/src/mongo/s/sharding_task_executor_pool_controller.cpp
index 871293699ea..cc7b2197f48 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.cpp
+++ b/src/mongo/s/sharding_task_executor_pool_controller.cpp
@@ -99,10 +99,18 @@ Status ShardingTaskExecutorPoolController::onUpdateMatchingStrategy(const std::s
void ShardingTaskExecutorPoolController::_addGroup(WithLock,
const ReplicaSetChangeNotifier::State& state) {
- auto groupData = std::make_shared<GroupData>(state);
+ auto groupData = std::make_shared<GroupData>();
+ groupData->primary = state.primary;
- // Mark each host with this groupData
+ // Find each active member
for (auto& host : state.connStr.getServers()) {
+ if (!state.passives.count(host)) {
+ groupData->members.push_back(host);
+ }
+ }
+
+ // Mark each host with this groupData
+ for (auto& host : groupData->members) {
auto& groupAndId = _groupAndIds[host];
invariant(!groupAndId.groupData);
@@ -127,7 +135,7 @@ void ShardingTaskExecutorPoolController::_removeGroup(WithLock, const std::strin
}
auto& groupData = it->second;
- for (auto& host : groupData->state.connStr.getServers()) {
+ for (auto& host : groupData->members) {
auto& groupAndId = getOrInvariant(_groupAndIds, host);
groupAndId.groupData.reset();
if (groupAndId.maybeId) {
@@ -221,13 +229,13 @@ auto ShardingTaskExecutorPoolController::updateHost(PoolId id, const HostState&
// If the pool isn't in a groupData, we can return now
auto groupData = poolData.groupData.lock();
- if (!groupData || groupData->state.passives.count(poolData.host)) {
+ if (!groupData) {
return {{poolData.host}, poolData.isAbleToShutdown};
}
switch (gParameters.matchingStrategy.load()) {
case MatchingStrategy::kMatchPrimaryNode: {
- if (groupData->state.primary == poolData.host) {
+ if (groupData->primary == poolData.host) {
groupData->target = poolData.target;
}
} break;
@@ -254,7 +262,7 @@ auto ShardingTaskExecutorPoolController::updateHost(PoolId id, const HostState&
std::all_of(groupData->poolIds.begin(), groupData->poolIds.end(), [&](auto otherId) {
return getOrInvariant(_poolDatas, otherId).isAbleToShutdown;
});
- return {groupData->state.connStr.getServers(), shouldShutdown};
+ return {groupData->members, shouldShutdown};
}
void ShardingTaskExecutorPoolController::removeHost(PoolId id) {
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.h b/src/mongo/s/sharding_task_executor_pool_controller.h
index d9b82233974..187fbba55de 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.h
+++ b/src/mongo/s/sharding_task_executor_pool_controller.h
@@ -147,10 +147,11 @@ private:
* Note that a PoolData can find itself orphaned from its GroupData during a reconfig.
*/
struct GroupData {
- explicit GroupData(const ReplicaSetChangeNotifier::State& state_) : state{state_} {}
+ // The members for this group
+ std::vector<HostAndPort> members;
- // The ReplicaSet state for this set
- ReplicaSetChangeNotifier::State state;
+ // The primary member for this group
+ HostAndPort primary;
// Id for each pool in the set
stdx::unordered_set<PoolId> poolIds;