summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@mongodb.com>2020-01-17 16:51:39 +0000
committerA. Jesse Jiryu Davis <jesse@mongodb.com>2020-01-27 15:40:40 -0500
commit445127511dc485763b609a7bb4a9542bc3309f28 (patch)
tree074062860e2b72a2fea522d67282f13b624d53ea
parent8096dda89619b3643234b62a48efd35bb335ee43 (diff)
downloadmongo-445127511dc485763b609a7bb4a9542bc3309f28.tar.gz
SERVER-45546 Do not create HostPools for passive members
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.cpp20
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.h7
2 files changed, 18 insertions, 9 deletions
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.cpp b/src/mongo/s/sharding_task_executor_pool_controller.cpp
index 7e78093c22c..e20a59e128b 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.cpp
+++ b/src/mongo/s/sharding_task_executor_pool_controller.cpp
@@ -99,10 +99,18 @@ Status ShardingTaskExecutorPoolController::onUpdateMatchingStrategy(const std::s
void ShardingTaskExecutorPoolController::_addGroup(WithLock,
const ReplicaSetChangeNotifier::State& state) {
- auto groupData = std::make_shared<GroupData>(state);
+ auto groupData = std::make_shared<GroupData>();
+ groupData->primary = state.primary;
- // Mark each host with this groupData
+ // Find each active member
for (auto& host : state.connStr.getServers()) {
+ if (!state.passives.count(host)) {
+ groupData->members.push_back(host);
+ }
+ }
+
+ // Mark each host with this groupData
+ for (auto& host : groupData->members) {
auto& groupAndId = _groupAndIds[host];
invariant(!groupAndId.groupData);
@@ -127,7 +135,7 @@ void ShardingTaskExecutorPoolController::_removeGroup(WithLock, const std::strin
}
auto& groupData = it->second;
- for (auto& host : groupData->state.connStr.getServers()) {
+ for (auto& host : groupData->members) {
auto& groupAndId = getOrInvariant(_groupAndIds, host);
groupAndId.groupData.reset();
if (groupAndId.maybeId) {
@@ -221,13 +229,13 @@ auto ShardingTaskExecutorPoolController::updateHost(PoolId id, const HostState&
// If the pool isn't in a groupData, we can return now
auto groupData = poolData.groupData.lock();
- if (!groupData || groupData->state.passives.count(poolData.host)) {
+ if (!groupData) {
return {{poolData.host}, poolData.isAbleToShutdown};
}
switch (gParameters.matchingStrategy.load()) {
case MatchingStrategy::kMatchPrimaryNode: {
- if (groupData->state.primary == poolData.host) {
+ if (groupData->primary == poolData.host) {
groupData->target = poolData.target;
}
} break;
@@ -254,7 +262,7 @@ auto ShardingTaskExecutorPoolController::updateHost(PoolId id, const HostState&
std::all_of(groupData->poolIds.begin(), groupData->poolIds.end(), [&](auto otherId) {
return getOrInvariant(_poolDatas, otherId).isAbleToShutdown;
});
- return {groupData->state.connStr.getServers(), shouldShutdown};
+ return {groupData->members, shouldShutdown};
}
void ShardingTaskExecutorPoolController::removeHost(PoolId id) {
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.h b/src/mongo/s/sharding_task_executor_pool_controller.h
index d9b82233974..187fbba55de 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.h
+++ b/src/mongo/s/sharding_task_executor_pool_controller.h
@@ -147,10 +147,11 @@ private:
* Note that a PoolData can find itself orphaned from its GroupData during a reconfig.
*/
struct GroupData {
- explicit GroupData(const ReplicaSetChangeNotifier::State& state_) : state{state_} {}
+ // The members for this group
+ std::vector<HostAndPort> members;
- // The ReplicaSet state for this set
- ReplicaSetChangeNotifier::State state;
+ // The primary member for this group
+ HostAndPort primary;
// Id for each pool in the set
stdx::unordered_set<PoolId> poolIds;