summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2022-03-15 10:41:45 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-03-15 11:08:12 +0000
commit39ecdd92807aafc5c75dc1ad2bffad88f293d294 (patch)
tree8eac3fe4c474cd28ccc80fe09a3c921929746385
parent80421c5b8e5ac71e16dc005fd961901884891c47 (diff)
downloadmongo-39ecdd92807aafc5c75dc1ad2bffad88f293d294.tar.gz
SERVER-64149 Remove MigrationReason from MigrateInfo
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp19
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy.h4
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp9
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h6
-rw-r--r--src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp3
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp3
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp62
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.h21
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp183
9 files changed, 157 insertions, 153 deletions
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 344e0442eac..17409de660e 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -369,8 +369,7 @@ Status Balancer::moveSingleChunk(OperationContext* opCtx,
nss,
chunk,
forceJumbo ? MoveChunkRequest::ForceJumbo::kForceManual
- : MoveChunkRequest::ForceJumbo::kDoNotForce,
- MigrateInfo::chunksImbalance);
+ : MoveChunkRequest::ForceJumbo::kDoNotForce);
auto response =
_commandScheduler
->requestMoveChunk(opCtx, migrateInfo, settings, true /* issuedByRemoteUser */)
@@ -1016,20 +1015,18 @@ BalancerCollectionStatusResponse Balancer::getBalancerStatusForNs(OperationConte
return response;
}
- auto chunksToMove = uassertStatusOK(_chunkSelectionPolicy->selectChunksToMove(opCtx, ns));
- if (chunksToMove.empty()) {
- return response;
- }
+ auto [_, reason] = uassertStatusOK(_chunkSelectionPolicy->selectChunksToMove(opCtx, ns));
- const auto& migrationInfo = chunksToMove.front();
- switch (migrationInfo.reason) {
- case MigrateInfo::drain:
+ switch (reason) {
+ case MigrationReason::none:
+ break;
+ case MigrationReason::drain:
setViolationOnResponse(kBalancerPolicyStatusDraining);
break;
- case MigrateInfo::zoneViolation:
+ case MigrationReason::zoneViolation:
setViolationOnResponse(kBalancerPolicyStatusZoneViolation);
break;
- case MigrateInfo::chunksImbalance:
+ case MigrationReason::chunksImbalance:
setViolationOnResponse(kBalancerPolicyStatusChunksImbalance);
break;
}
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
index 3ddafd9f50c..5aed229c202 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
@@ -80,8 +80,8 @@ public:
* Given a valid namespace returns all the Migrations the balancer would need to perform
* with the current state
*/
- virtual StatusWith<MigrateInfoVector> selectChunksToMove(OperationContext* opCtx,
- const NamespaceString& nss) = 0;
+ virtual StatusWith<MigrateInfosWithReason> selectChunksToMove(OperationContext* opCtx,
+ const NamespaceString& nss) = 0;
/**
* Requests a single chunk to be relocated to a different shard, if possible. If some error
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index a82646d05fa..c9a41e33c09 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -374,14 +374,14 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
}
candidateChunks.insert(candidateChunks.end(),
- std::make_move_iterator(candidatesStatus.getValue().begin()),
- std::make_move_iterator(candidatesStatus.getValue().end()));
+ std::make_move_iterator(candidatesStatus.getValue().first.begin()),
+ std::make_move_iterator(candidatesStatus.getValue().first.end()));
}
return candidateChunks;
}
-StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMove(
+StatusWith<MigrateInfosWithReason> BalancerChunkSelectionPolicyImpl::selectChunksToMove(
OperationContext* opCtx, const NamespaceString& nss) {
auto shardStatsStatus = _clusterStats->getStats(opCtx);
if (!shardStatsStatus.isOK()) {
@@ -514,7 +514,8 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidate
return splitCandidates.done();
}
-StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandidatesForCollection(
+StatusWith<MigrateInfosWithReason>
+BalancerChunkSelectionPolicyImpl::_getMigrateCandidatesForCollection(
OperationContext* opCtx,
const NamespaceString& nss,
const ShardStatisticsVector& shardStats,
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
index dc15cbfc465..76febe0557c 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
@@ -49,8 +49,8 @@ public:
StatusWith<MigrateInfoVector> selectChunksToMove(
OperationContext* opCtx, stdx::unordered_set<ShardId>* usedShards) override;
- StatusWith<MigrateInfoVector> selectChunksToMove(OperationContext* opCtx,
- const NamespaceString& ns) override;
+ StatusWith<MigrateInfosWithReason> selectChunksToMove(OperationContext* opCtx,
+ const NamespaceString& ns) override;
StatusWith<boost::optional<MigrateInfo>> selectSpecificChunkToMove(
OperationContext* opCtx, const NamespaceString& nss, const ChunkType& chunk) override;
@@ -73,7 +73,7 @@ private:
* Synchronous method, which iterates the collection's chunks and uses the cluster statistics to
* figure out where to place them.
*/
- StatusWith<MigrateInfoVector> _getMigrateCandidatesForCollection(
+ StatusWith<MigrateInfosWithReason> _getMigrateCandidatesForCollection(
OperationContext* opCtx,
const NamespaceString& nss,
const ShardStatisticsVector& shardStats,
diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
index 467be0cf563..dc7a51d287e 100644
--- a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
@@ -77,8 +77,7 @@ public:
BSON("x" << min),
BSON("x" << min + 10),
ChunkVersion(1, 1, OID::gen(), Timestamp(10)),
- MoveChunkRequest::ForceJumbo::kDoNotForce,
- MigrateInfo::chunksImbalance);
+ MoveChunkRequest::ForceJumbo::kDoNotForce);
}
MoveChunkSettings getMoveChunkSettings(int64_t maxChunkSize = kDefaultMaxChunkSizeBytes) {
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
index bb891bcff60..41d7ea49d58 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
@@ -716,8 +716,7 @@ private:
chunkToMove->range.getMin(),
chunkToMove->range.getMax(),
version,
- MoveChunkRequest::ForceJumbo::kForceBalancer,
- MigrateInfo::chunksImbalance);
+ MoveChunkRequest::ForceJumbo::kForceBalancer);
}
ChunkRange asMergedRange() const {
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index e9d75310972..96619b52c80 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -397,26 +397,27 @@ MigrateInfo chooseRandomMigration(const ShardStatisticsVector& shardStats,
return {destShardId,
distribution.nss(),
chunks[getRandomIndex(chunks.size())],
- MoveChunkRequest::ForceJumbo::kDoNotForce,
- MigrateInfo::chunksImbalance};
+ MoveChunkRequest::ForceJumbo::kDoNotForce};
}
-vector<MigrateInfo> BalancerPolicy::balance(const ShardStatisticsVector& shardStats,
- const DistributionStatus& distribution,
- stdx::unordered_set<ShardId>* usedShards,
- bool forceJumbo) {
+MigrateInfosWithReason BalancerPolicy::balance(const ShardStatisticsVector& shardStats,
+ const DistributionStatus& distribution,
+ stdx::unordered_set<ShardId>* usedShards,
+ bool forceJumbo) {
vector<MigrateInfo> migrations;
+ MigrationReason firstReason = MigrationReason::none;
if (MONGO_unlikely(balancerShouldReturnRandomMigrations.shouldFail()) &&
!distribution.nss().isConfigDB()) {
LOGV2_DEBUG(21881, 1, "balancerShouldReturnRandomMigrations failpoint is set");
if (shardStats.size() < 2)
- return migrations;
+ return std::make_pair(std::move(migrations), firstReason);
migrations.push_back(chooseRandomMigration(shardStats, distribution));
+ firstReason = MigrationReason::chunksImbalance;
- return migrations;
+ return std::make_pair(std::move(migrations), firstReason);
}
// 1) Check for shards, which are in draining mode
@@ -461,11 +462,11 @@ vector<MigrateInfo> BalancerPolicy::balance(const ShardStatisticsVector& shardSt
}
invariant(to != stat.shardId);
- migrations.emplace_back(to,
- distribution.nss(),
- chunk,
- MoveChunkRequest::ForceJumbo::kForceBalancer,
- MigrateInfo::drain);
+ migrations.emplace_back(
+ to, distribution.nss(), chunk, MoveChunkRequest::ForceJumbo::kForceBalancer);
+ if (firstReason == MigrationReason::none) {
+ firstReason = MigrationReason::drain;
+ }
invariant(usedShards->insert(stat.shardId).second);
invariant(usedShards->insert(to).second);
break;
@@ -528,8 +529,10 @@ vector<MigrateInfo> BalancerPolicy::balance(const ShardStatisticsVector& shardSt
distribution.nss(),
chunk,
forceJumbo ? MoveChunkRequest::ForceJumbo::kForceBalancer
- : MoveChunkRequest::ForceJumbo::kDoNotForce,
- MigrateInfo::zoneViolation);
+ : MoveChunkRequest::ForceJumbo::kDoNotForce);
+ if (firstReason == MigrationReason::none) {
+ firstReason = MigrationReason::zoneViolation;
+ }
invariant(usedShards->insert(stat.shardId).second);
invariant(usedShards->insert(to).second);
break;
@@ -583,11 +586,14 @@ vector<MigrateInfo> BalancerPolicy::balance(const ShardStatisticsVector& shardSt
&migrations,
usedShards,
forceJumbo ? MoveChunkRequest::ForceJumbo::kForceBalancer
- : MoveChunkRequest::ForceJumbo::kDoNotForce))
- ;
+ : MoveChunkRequest::ForceJumbo::kDoNotForce)) {
+ if (firstReason == MigrationReason::none) {
+ firstReason = MigrationReason::chunksImbalance;
+ }
+ }
}
- return migrations;
+ return std::make_pair(std::move(migrations), firstReason);
}
boost::optional<MigrateInfo> BalancerPolicy::balanceSingleChunk(
@@ -602,11 +608,8 @@ boost::optional<MigrateInfo> BalancerPolicy::balanceSingleChunk(
return boost::optional<MigrateInfo>();
}
- return MigrateInfo(newShardId,
- distribution.nss(),
- chunk,
- MoveChunkRequest::ForceJumbo::kDoNotForce,
- MigrateInfo::chunksImbalance);
+ return MigrateInfo(
+ newShardId, distribution.nss(), chunk, MoveChunkRequest::ForceJumbo::kDoNotForce);
}
bool BalancerPolicy::_singleZoneBalance(const ShardStatisticsVector& shardStats,
@@ -678,8 +681,7 @@ bool BalancerPolicy::_singleZoneBalance(const ShardStatisticsVector& shardStats,
continue;
}
- migrations->emplace_back(
- to, distribution.nss(), chunk, forceJumbo, MigrateInfo::chunksImbalance);
+ migrations->emplace_back(to, distribution.nss(), chunk, forceJumbo);
invariant(usedShards->insert(chunk.getShard()).second);
invariant(usedShards->insert(to).second);
return true;
@@ -710,8 +712,7 @@ string ZoneRange::toString() const {
MigrateInfo::MigrateInfo(const ShardId& a_to,
const NamespaceString& a_nss,
const ChunkType& a_chunk,
- const MoveChunkRequest::ForceJumbo a_forceJumbo,
- MigrationReason a_reason)
+ const MoveChunkRequest::ForceJumbo a_forceJumbo)
: nss(a_nss), uuid(a_chunk.getCollectionUUID()) {
invariant(a_to.isValid());
@@ -722,7 +723,6 @@ MigrateInfo::MigrateInfo(const ShardId& a_to,
maxKey = a_chunk.getMax();
version = a_chunk.getVersion();
forceJumbo = a_forceJumbo;
- reason = a_reason;
}
MigrateInfo::MigrateInfo(const ShardId& a_to,
@@ -732,15 +732,13 @@ MigrateInfo::MigrateInfo(const ShardId& a_to,
const BSONObj& a_min,
const BSONObj& a_max,
const ChunkVersion& a_version,
- const MoveChunkRequest::ForceJumbo a_forceJumbo,
- MigrationReason a_reason)
+ const MoveChunkRequest::ForceJumbo a_forceJumbo)
: nss(a_nss),
uuid(a_uuid),
minKey(a_min),
maxKey(a_max),
version(a_version),
- forceJumbo(a_forceJumbo),
- reason(a_reason) {
+ forceJumbo(a_forceJumbo) {
invariant(a_to.isValid());
invariant(a_from.isValid());
diff --git a/src/mongo/db/s/balancer/balancer_policy.h b/src/mongo/db/s/balancer/balancer_policy.h
index 6b81068e43f..4dc7a575e74 100644
--- a/src/mongo/db/s/balancer/balancer_policy.h
+++ b/src/mongo/db/s/balancer/balancer_policy.h
@@ -56,13 +56,10 @@ struct ZoneRange {
};
struct MigrateInfo {
- enum MigrationReason { drain, zoneViolation, chunksImbalance };
-
MigrateInfo(const ShardId& a_to,
const NamespaceString& a_nss,
const ChunkType& a_chunk,
- MoveChunkRequest::ForceJumbo a_forceJumbo,
- MigrationReason a_reason);
+ MoveChunkRequest::ForceJumbo a_forceJumbo);
MigrateInfo(const ShardId& a_to,
const ShardId& a_from,
@@ -71,8 +68,7 @@ struct MigrateInfo {
const BSONObj& a_min,
const BSONObj& a_max,
const ChunkVersion& a_version,
- MoveChunkRequest::ForceJumbo a_forceJumbo,
- MigrationReason a_reason);
+ MoveChunkRequest::ForceJumbo a_forceJumbo);
std::string getName() const;
@@ -88,11 +84,14 @@ struct MigrateInfo {
BSONObj maxKey;
ChunkVersion version;
MoveChunkRequest::ForceJumbo forceJumbo;
- MigrationReason reason;
};
+enum MigrationReason { none, drain, zoneViolation, chunksImbalance };
+
typedef std::vector<MigrateInfo> MigrateInfoVector;
+typedef std::pair<MigrateInfoVector, MigrationReason> MigrateInfosWithReason;
+
typedef std::vector<BSONObj> SplitPoints;
/**
@@ -385,10 +384,10 @@ public:
* used for migrations. Used so we don't return multiple conflicting migrations for the same
* shard.
*/
- static std::vector<MigrateInfo> balance(const ShardStatisticsVector& shardStats,
- const DistributionStatus& distribution,
- stdx::unordered_set<ShardId>* usedShards,
- bool forceJumbo);
+ static MigrateInfosWithReason balance(const ShardStatisticsVector& shardStats,
+ const DistributionStatus& distribution,
+ stdx::unordered_set<ShardId>* usedShards,
+ bool forceJumbo);
/**
* Using the specified distribution information, returns a suggested better location for the
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index 2c2f0895684..0a53200149e 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -111,10 +111,10 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
return std::make_pair(std::move(shardStats), std::move(chunkMap));
}
-std::vector<MigrateInfo> balanceChunks(const ShardStatisticsVector& shardStats,
- const DistributionStatus& distribution,
- bool shouldAggressivelyBalance,
- bool forceJumbo) {
+MigrateInfosWithReason balanceChunks(const ShardStatisticsVector& shardStats,
+ const DistributionStatus& distribution,
+ bool shouldAggressivelyBalance,
+ bool forceJumbo) {
stdx::unordered_set<ShardId> usedShards;
return BalancerPolicy::balance(shardStats, distribution, &usedShards, forceJumbo);
}
@@ -125,14 +125,14 @@ TEST(BalancerPolicy, Basic) {
{ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
{ShardStatistics(kShardId2, kNoMaxSize, 3, false, emptyTagSet, emptyShardVersion), 3}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
@@ -141,26 +141,32 @@ TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
{ShardStatistics(kShardId1, kNoMaxSize, 2, false, emptyTagSet, emptyShardVersion), 2},
{ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId1, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, SingleChunkShouldNotMove) {
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 1, false, emptyTagSet, emptyShardVersion), 1},
{ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
-
- ASSERT(balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), true, false)
- .empty());
- ASSERT(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false)
- .empty());
+ {
+ auto [migrations, reason] = balanceChunks(
+ cluster.first, DistributionStatus(kNamespace, cluster.second), true, false);
+ ASSERT(migrations.empty());
+ ASSERT_EQ(MigrationReason::none, reason);
+ }
+ {
+ auto [migrations, reason] = balanceChunks(
+ cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
+ ASSERT(migrations.empty());
+ ASSERT_EQ(MigrationReason::none, reason);
+ }
}
TEST(BalancerPolicy, BalanceThresholdObeyed) {
@@ -170,11 +176,18 @@ TEST(BalancerPolicy, BalanceThresholdObeyed) {
{ShardStatistics(kShardId2, kNoMaxSize, 1, false, emptyTagSet, emptyShardVersion), 1},
{ShardStatistics(kShardId3, kNoMaxSize, 1, false, emptyTagSet, emptyShardVersion), 1}});
- ASSERT(balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), true, false)
- .empty());
- ASSERT(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false)
- .empty());
+ {
+ auto [migrations, reason] = balanceChunks(
+ cluster.first, DistributionStatus(kNamespace, cluster.second), true, false);
+ ASSERT(migrations.empty());
+ ASSERT_EQ(MigrationReason::none, reason);
+ }
+ {
+ auto [migrations, reason] = balanceChunks(
+ cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
+ ASSERT(migrations.empty());
+ ASSERT_EQ(MigrationReason::none, reason);
+ }
}
TEST(BalancerPolicy, ParallelBalancing) {
@@ -184,21 +197,20 @@ TEST(BalancerPolicy, ParallelBalancing) {
{ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
{ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(2U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::chunksImbalance, reason);
ASSERT_EQ(kShardId1, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[1].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), migrations[1].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[1].reason);
}
TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
@@ -210,21 +222,20 @@ TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
{ShardStatistics(kShardId4, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
{ShardStatistics(kShardId5, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(2U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId4, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::chunksImbalance, reason);
ASSERT_EQ(kShardId1, migrations[1].from);
ASSERT_EQ(kShardId5, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[1].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), migrations[1].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[1].reason);
}
TEST(BalancerPolicy, ParallelBalancingDoesNotMoveChunksFromShardsBelowOptimal) {
@@ -234,15 +245,15 @@ TEST(BalancerPolicy, ParallelBalancingDoesNotMoveChunksFromShardsBelowOptimal) {
{ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 5},
{ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId3, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNecessary) {
@@ -254,15 +265,15 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNe
// Here kShardId0 would have been selected as a donor
stdx::unordered_set<ShardId> usedShards{kShardId0};
- const auto migrations(BalancerPolicy::balance(
- cluster.first, DistributionStatus(kNamespace, cluster.second), &usedShards, false));
+ const auto [migrations, reason] = BalancerPolicy::balance(
+ cluster.first, DistributionStatus(kNamespace, cluster.second), &usedShards, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId1, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNotNecessary) {
@@ -274,8 +285,8 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNo
// Here kShardId0 would have been selected as a donor
stdx::unordered_set<ShardId> usedShards{kShardId0};
- const auto migrations(BalancerPolicy::balance(
- cluster.first, DistributionStatus(kNamespace, cluster.second), &usedShards, false));
+ const auto [migrations, reason] = BalancerPolicy::balance(
+ cluster.first, DistributionStatus(kNamespace, cluster.second), &usedShards, false);
ASSERT_EQ(0U, migrations.size());
}
@@ -288,15 +299,15 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseDestinationShards) {
// Here kShardId2 would have been selected as a recipient
stdx::unordered_set<ShardId> usedShards{kShardId2};
- const auto migrations(BalancerPolicy::balance(
- cluster.first, DistributionStatus(kNamespace, cluster.second), &usedShards, false));
+ const auto [migrations, reason] = BalancerPolicy::balance(
+ cluster.first, DistributionStatus(kNamespace, cluster.second), &usedShards, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId3, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, JumboChunksNotMoved) {
@@ -309,14 +320,14 @@ TEST(BalancerPolicy, JumboChunksNotMoved) {
cluster.second[kShardId0][2].setJumbo(true);
cluster.second[kShardId0][3].setJumbo(true);
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
@@ -336,21 +347,20 @@ TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
cluster.second[kShardId2][2].setJumbo(false); // Only chunk 1 is not jumbo
cluster.second[kShardId2][3].setJumbo(true);
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(2U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::chunksImbalance, reason);
ASSERT_EQ(kShardId2, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][2].getMin(), migrations[1].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][2].getMax(), migrations[1].maxKey);
- ASSERT_EQ(MigrateInfo::chunksImbalance, migrations[1].reason);
}
TEST(BalancerPolicy, DrainingSingleChunk) {
@@ -359,14 +369,14 @@ TEST(BalancerPolicy, DrainingSingleChunk) {
{{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyTagSet, emptyShardVersion), 1},
{ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 5}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::drain, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
@@ -377,21 +387,20 @@ TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
{ShardStatistics(kShardId2, kNoMaxSize, 2, true, emptyTagSet, emptyShardVersion), 1},
{ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 5}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(2U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::drain, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::drain, reason);
ASSERT_EQ(kShardId2, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[1].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[1].maxKey);
- ASSERT_EQ(MigrateInfo::drain, migrations[1].reason);
}
TEST(BalancerPolicy, DrainingWithTwoChunksFirstOneSelected) {
@@ -400,14 +409,14 @@ TEST(BalancerPolicy, DrainingWithTwoChunksFirstOneSelected) {
{{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyTagSet, emptyShardVersion), 2},
{ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 5}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::drain, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
@@ -418,14 +427,15 @@ TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
{ShardStatistics(kShardId1, kNoMaxSize, 5, true, emptyTagSet, emptyShardVersion), 2},
{ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 16}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
+
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::drain, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptChunks) {
@@ -435,8 +445,8 @@ TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptChunks) {
{ShardStatistics(kShardId1, kNoMaxSize, 0, true, emptyTagSet, emptyShardVersion), 0},
{ShardStatistics(kShardId2, kNoMaxSize, 0, true, emptyTagSet, emptyShardVersion), 0}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT(migrations.empty());
}
@@ -450,13 +460,13 @@ TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToTag) {
ASSERT_OK(distribution.addRangeToZone(ZoneRange(
cluster.second[kShardId2][0].getMin(), cluster.second[kShardId2][0].getMax(), "LAX")));
- const auto migrations(balanceChunks(cluster.first, distribution, false, false));
+ const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::drain, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToTag) {
@@ -469,7 +479,7 @@ TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToTag) {
ASSERT_OK(distribution.addRangeToZone(ZoneRange(
cluster.second[kShardId2][0].getMin(), cluster.second[kShardId2][0].getMax(), "SEA")));
- const auto migrations(balanceChunks(cluster.first, distribution, false, false));
+ const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
ASSERT(migrations.empty());
}
@@ -480,8 +490,8 @@ TEST(BalancerPolicy, NoBalancingDueToAllNodesEitherDrainingOrMaxedOut) {
{ShardStatistics(kShardId1, 1, 1, false, emptyTagSet, emptyShardVersion), 6},
{ShardStatistics(kShardId2, kNoMaxSize, 1, true, emptyTagSet, emptyShardVersion), 1}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT(migrations.empty());
}
@@ -494,8 +504,8 @@ TEST(BalancerPolicy, BalancerRespectsMaxShardSizeOnlyBalanceToNonMaxed) {
{ShardStatistics(kShardId1, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 5},
{ShardStatistics(kShardId2, kNoMaxSize, 10, false, emptyTagSet, emptyShardVersion), 10}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
@@ -512,8 +522,8 @@ TEST(BalancerPolicy, BalancerRespectsMaxShardSizeWhenAllBalanced) {
{ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4},
{ShardStatistics(kShardId2, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4}});
- const auto migrations(
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false));
+ const auto [migrations, reason] =
+ balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT(migrations.empty());
}
@@ -528,13 +538,13 @@ TEST(BalancerPolicy, BalancerRespectsTagsWhenDraining) {
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 7), "a")));
ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 8), kMaxBSONKey, "b")));
- const auto migrations(balanceChunks(cluster.first, distribution, false, false));
+ const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId1, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::drain, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, BalancerRespectsTagPolicyBeforeImbalance) {
@@ -548,13 +558,13 @@ TEST(BalancerPolicy, BalancerRespectsTagPolicyBeforeImbalance) {
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 100), "a")));
- const auto migrations(balanceChunks(cluster.first, distribution, false, false));
+ const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::zoneViolation, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::zoneViolation, reason);
}
TEST(BalancerPolicy, BalancerFixesIncorrectTagsWithCrossShardViolationOfTags) {
@@ -569,13 +579,13 @@ TEST(BalancerPolicy, BalancerFixesIncorrectTagsWithCrossShardViolationOfTags) {
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "b")));
ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 8), kMaxBSONKey, "a")));
- const auto migrations(balanceChunks(cluster.first, distribution, false, false));
+ const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::zoneViolation, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::zoneViolation, reason);
}
TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedCluster) {
@@ -588,13 +598,13 @@ TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedCluster) {
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 10), "a")));
- const auto migrations(balanceChunks(cluster.first, distribution, false, false));
+ const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::zoneViolation, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::zoneViolation, reason);
}
TEST(BalancerPolicy, BalancerTagAlreadyBalanced) {
@@ -605,7 +615,7 @@ TEST(BalancerPolicy, BalancerTagAlreadyBalanced) {
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, kMaxBSONKey, "a")));
- ASSERT(balanceChunks(cluster.first, distribution, false, false).empty());
+ ASSERT(balanceChunks(cluster.first, distribution, false, false).first.empty());
}
TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleTags) {
@@ -621,7 +631,7 @@ TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleTags) {
ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 3), "b")));
ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 3), BSON("x" << 5), "c")));
- const auto migrations(balanceChunks(cluster.first, distribution, false, false));
+ const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
@@ -645,7 +655,8 @@ TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleTagsSkipTagWithShardInU
ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 3), BSON("x" << 5), "c")));
stdx::unordered_set<ShardId> usedShards{kShardId1};
- const auto migrations(BalancerPolicy::balance(cluster.first, distribution, &usedShards, false));
+ const auto [migrations, reason] =
+ BalancerPolicy::balance(cluster.first, distribution, &usedShards, false);
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
@@ -665,20 +676,20 @@ TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedClusterParalle
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 20), "a")));
- const auto migrations(balanceChunks(cluster.first, distribution, false, false));
+ const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
ASSERT_EQ(2U, migrations.size());
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
- ASSERT_EQ(MigrateInfo::zoneViolation, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::zoneViolation, reason);
ASSERT_EQ(kShardId3, migrations[1].from);
ASSERT_EQ(kShardId1, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId3][0].getMin(), migrations[1].minKey);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId3][0].getMax(), migrations[1].maxKey);
- ASSERT_EQ(MigrateInfo::zoneViolation, migrations[0].reason);
+ ASSERT_EQ(MigrationReason::zoneViolation, reason);
}
TEST(BalancerPolicy, BalancerHandlesNoShardsWithTag) {
@@ -690,7 +701,7 @@ TEST(BalancerPolicy, BalancerHandlesNoShardsWithTag) {
ASSERT_OK(
distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 7), "NonExistentZone")));
- ASSERT(balanceChunks(cluster.first, distribution, false, false).empty());
+ ASSERT(balanceChunks(cluster.first, distribution, false, false).first.empty());
}
TEST(DistributionStatus, AddTagRangeOverlap) {