summaryrefslogtreecommitdiff
path: root/src/mongo/db/s/balancer/balancer_policy_test.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/s/balancer/balancer_policy_test.cpp')
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp719
1 files changed, 356 insertions, 363 deletions
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index 88c504b151d..2fff8e30803 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/keypattern.h"
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/platform/random.h"
+#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/unittest/unittest.h"
@@ -64,11 +65,10 @@ const NamespaceString kNamespace("TestDB", "TestColl");
* [MinKey, 1), [1, 2), [2, 3) ... [N - 1, MaxKey)
*/
std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
- const vector<std::pair<ShardStatistics, size_t>>& shardsAndNumChunks) {
- int64_t totalNumChunks = 0;
- for (const auto& entry : shardsAndNumChunks) {
- totalNumChunks += std::get<1>(entry);
- }
+ const vector<ShardStatistics>& statsVector) {
+
+ // Distribute one chunk per shard, no matter the owned data size.
+ int64_t totalNumChunks = statsVector.size();
ShardToChunksMap chunkMap;
ShardStatisticsVector shardStats;
@@ -80,28 +80,21 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
const KeyPattern shardKeyPattern(BSON("x" << 1));
- for (auto it = shardsAndNumChunks.begin(); it != shardsAndNumChunks.end(); it++) {
- ShardStatistics shard = std::move(it->first);
- const size_t numChunks = it->second;
-
+ for (const auto& shard : statsVector) {
// Ensure that an entry is created
chunkMap[shard.shardId];
- for (size_t i = 0; i < numChunks; i++, currentChunk++) {
- ChunkType chunk;
-
- chunk.setCollectionUUID(uuid);
- chunk.setMin(currentChunk == 0 ? shardKeyPattern.globalMin()
- : BSON("x" << currentChunk));
- chunk.setMax(currentChunk == totalNumChunks - 1 ? shardKeyPattern.globalMax()
- : BSON("x" << currentChunk + 1));
- chunk.setShard(shard.shardId);
- chunk.setVersion(chunkVersion);
+ ChunkType chunk;
- chunkVersion.incMajor();
+ chunk.setCollectionUUID(uuid);
+ chunk.setMin(currentChunk == 0 ? shardKeyPattern.globalMin() : BSON("x" << currentChunk));
+ chunk.setMax(currentChunk == totalNumChunks - 1 ? shardKeyPattern.globalMax()
+ : BSON("x" << ++currentChunk));
+ chunk.setShard(shard.shardId);
+ chunk.setVersion(chunkVersion);
- chunkMap[shard.shardId].push_back(std::move(chunk));
- }
+ chunkVersion.incMajor();
+ chunkMap[shard.shardId].push_back(std::move(chunk));
shardStats.push_back(std::move(shard));
}
@@ -118,20 +111,45 @@ stdx::unordered_set<ShardId> getAllShardIds(const ShardStatisticsVector& shardSt
return shards;
}
+CollectionDataSizeInfoForBalancing buildDataSizeInfoForBalancingFromShardStats(
+ const ShardStatisticsVector& shardStats) {
+ std::map<ShardId, int64_t> collSizePerShard;
+ for (const auto& shard : shardStats) {
+ collSizePerShard.try_emplace(shard.shardId, shard.currSizeBytes);
+ }
+
+ return CollectionDataSizeInfoForBalancing(std::move(collSizePerShard),
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes);
+}
+
MigrateInfosWithReason balanceChunks(const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
bool shouldAggressivelyBalance,
bool forceJumbo) {
auto availableShards = getAllShardIds(shardStats);
- return BalancerPolicy::balance(
- shardStats, distribution, boost::none /* collDataSizeInfo */, &availableShards, forceJumbo);
+
+ return BalancerPolicy::balance(shardStats,
+ distribution,
+ buildDataSizeInfoForBalancingFromShardStats(shardStats),
+ &availableShards,
+ forceJumbo);
}
TEST(BalancerPolicy, Basic) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, 3, false, emptyZoneSet, emptyShardVersion), 3}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId2,
+ 3 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -139,30 +157,19 @@ TEST(BalancerPolicy, Basic) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
-TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 1, false, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 2, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0}});
-
- const auto [migrations, reason] =
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId1, migrations[0].from);
- ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::chunksImbalance, reason);
-}
+TEST(BalancerPolicy, SmallSingleChunkShouldNotMove) {
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion)});
-TEST(BalancerPolicy, SingleChunkShouldNotMove) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 1, false, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0}});
{
auto [migrations, reason] = balanceChunks(
cluster.first, DistributionStatus(kNamespace, cluster.second), true, false);
@@ -178,11 +185,33 @@ TEST(BalancerPolicy, SingleChunkShouldNotMove) {
}
TEST(BalancerPolicy, BalanceThresholdObeyed) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 2, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, 1, false, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId3, 1, false, emptyZoneSet, emptyShardVersion), 1}});
+ auto cluster = generateCluster({
+ ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+
+ ShardStatistics(kShardId2,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId3,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ });
{
auto [migrations, reason] = balanceChunks(
@@ -199,11 +228,21 @@ TEST(BalancerPolicy, BalanceThresholdObeyed) {
}
TEST(BalancerPolicy, ParallelBalancing) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion)});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -212,23 +251,41 @@ TEST(BalancerPolicy, ParallelBalancing) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
ASSERT_EQ(kShardId1, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[1].maxKey);
}
-TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 100, false, emptyZoneSet, emptyShardVersion), 100},
- {ShardStatistics(kShardId1, 90, false, emptyZoneSet, emptyShardVersion), 90},
- {ShardStatistics(kShardId2, 90, false, emptyZoneSet, emptyShardVersion), 90},
- {ShardStatistics(kShardId3, 80, false, emptyZoneSet, emptyShardVersion), 80},
- {ShardStatistics(kShardId4, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId5, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+TEST(BalancerPolicy, ParallelBalancingDoesNotScheduleMigrationsOnShardsAboveTheThreshold) {
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 100 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 90 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ 90 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId3,
+ 89 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId4, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId5, 0, false, emptyZoneSet, emptyShardVersion)});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -237,47 +294,39 @@ TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId4, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
ASSERT_EQ(kShardId1, migrations[1].from);
ASSERT_EQ(kShardId5, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[1].maxKey);
-}
-
-TEST(BalancerPolicy, ParallelBalancingDoesNotMoveChunksFromShardsBelowOptimal) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 100, false, emptyZoneSet, emptyShardVersion), 100},
- {ShardStatistics(kShardId1, 30, false, emptyZoneSet, emptyShardVersion), 30},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
-
- const auto [migrations, reason] =
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(1U, migrations.size());
-
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId3, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNecessary) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 8, false, emptyZoneSet, emptyShardVersion), 8},
- {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 8 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion)});
+
+ const auto shardStats = cluster.first;
// Here kShardId0 would have been selected as a donor
- auto availableShards = getAllShardIds(cluster.first);
+ auto availableShards = getAllShardIds(shardStats);
availableShards.erase(kShardId0);
const auto [migrations, reason] =
- BalancerPolicy::balance(cluster.first,
+ BalancerPolicy::balance(shardStats,
DistributionStatus(kNamespace, cluster.second),
- boost::none /* collDataSizeInfo */,
+ buildDataSizeInfoForBalancingFromShardStats(shardStats),
&availableShards,
false);
ASSERT_EQ(1U, migrations.size());
@@ -285,43 +334,40 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNe
ASSERT_EQ(kShardId1, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
-TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNotNecessary) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 12, false, emptyZoneSet, emptyShardVersion), 12},
- {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
-
- // Here kShardId0 would have been selected as a donor
- auto availableShards = getAllShardIds(cluster.first);
- availableShards.erase(kShardId0);
- const auto [migrations, reason] =
- BalancerPolicy::balance(cluster.first,
- DistributionStatus(kNamespace, cluster.second),
- boost::none /* collDataSizeInfo */,
- &availableShards,
- false);
- ASSERT_EQ(0U, migrations.size());
-}
-
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseDestinationShards) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, 1, false, emptyZoneSet, emptyShardVersion), 1}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId3,
+ 1 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
+
+ const auto shardStats = cluster.first;
// Here kShardId2 would have been selected as a recipient
- auto availableShards = getAllShardIds(cluster.first);
+ auto availableShards = getAllShardIds(shardStats);
availableShards.erase(kShardId2);
const auto [migrations, reason] =
- BalancerPolicy::balance(cluster.first,
+ BalancerPolicy::balance(shardStats,
DistributionStatus(kNamespace, cluster.second),
- boost::none /* collDataSizeInfo */,
+ buildDataSizeInfoForBalancingFromShardStats(shardStats),
&availableShards,
false);
ASSERT_EQ(1U, migrations.size());
@@ -329,86 +375,94 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseDestinationShards) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId3, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, JumboChunksNotMoved) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion)});
cluster.second[kShardId0][0].setJumbo(true);
- cluster.second[kShardId0][1].setJumbo(false); // Only chunk 1 is not jumbo
- cluster.second[kShardId0][2].setJumbo(true);
- cluster.second[kShardId0][3].setJumbo(true);
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::chunksImbalance, reason);
+ ASSERT(migrations.empty());
}
TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId2,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion)});
cluster.second[kShardId0][0].setJumbo(true);
- cluster.second[kShardId0][1].setJumbo(false); // Only chunk 1 is not jumbo
- cluster.second[kShardId0][2].setJumbo(true);
- cluster.second[kShardId0][3].setJumbo(true);
cluster.second[kShardId2][0].setJumbo(true);
- cluster.second[kShardId2][1].setJumbo(true);
- cluster.second[kShardId2][2].setJumbo(false); // Only chunk 1 is not jumbo
- cluster.second[kShardId2][3].setJumbo(true);
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(2U, migrations.size());
-
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::chunksImbalance, reason);
-
- ASSERT_EQ(kShardId2, migrations[1].from);
- ASSERT_EQ(kShardId3, migrations[1].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][2].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][2].getMax(), *migrations[1].maxKey);
+ ASSERT(migrations.empty());
}
-TEST(BalancerPolicy, DrainingSingleChunk) {
- // shard0 is draining and chunks will go to shard1, even though it has a lot more chunks
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 5}});
+TEST(BalancerPolicy, DrainingFromShardWithFewData) {
+ // shard1 is draining and chunks will go to shard0, even though it has a lot more data
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 20 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
+ ASSERT_EQ(kShardId1, migrations[0].from);
+ ASSERT_EQ(kShardId0, migrations[0].to);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
// shard0 and shard2 are draining and chunks will go to shard1 and shard3 in parallel
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId2, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 5}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId2,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion)});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -417,67 +471,82 @@ TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::drain, reason);
ASSERT_EQ(kShardId2, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[1].maxKey);
-}
-
-TEST(BalancerPolicy, DrainingWithTwoChunksFirstOneSelected) {
- // shard0 is draining and chunks will go to shard1, even though it has a lot more chunks
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 5}});
-
- const auto [migrations, reason] =
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
- // shard0 and shard1 are both draining with very little chunks in them and chunks will go to
- // shard2, even though it has a lot more chunks that the other two
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 5, true, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 16}});
+ // shard0 and shard1 are both draining with very little data in them and chunks will go to
+ // shard2, even though it has a lot more data that the other two
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 50 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
+ ASSERT_EQ(kShardId0, migrations[0].to);
ASSERT_EQ(MigrationReason::drain, reason);
}
-TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptChunks) {
- // shard0 has many chunks, but can't move them to shard1 or shard2 because they are draining
+TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptMigrations) {
+ // shard0 has many data, but can't move them to shard1 or shard2 because they are draining
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 0, true, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, 0, true, emptyZoneSet, emptyShardVersion), 0}});
+ {ShardStatistics(kShardId0,
+ 20 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, true /* draining */, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId2, 0, true /* draining */, emptyZoneSet, emptyShardVersion)});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT(migrations.empty());
+ ASSERT_EQ(1U, migrations.size());
+ ASSERT_EQ(kShardId0, migrations[0].to);
}
TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToZone) {
auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 2, false, {"NYC"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 2, false, {"LAX"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 1, true, {"LAX"}, emptyShardVersion), 1}});
+ generateCluster({ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"NYC"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"LAX"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ {"LAX"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(
@@ -488,15 +557,29 @@ TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToZone) {
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToZone) {
auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 2, false, {"NYC"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 2, false, {"LAX"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 1, true, {"SEA"}, emptyShardVersion), 1}});
+ generateCluster({ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"NYC"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"LAX"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ {"SEA"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(
@@ -507,9 +590,19 @@ TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToZone) {
}
TEST(BalancerPolicy, NoBalancingDueToAllNodesDraining) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId2, 1, true, emptyZoneSet, emptyShardVersion), 1}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -519,9 +612,24 @@ TEST(BalancerPolicy, NoBalancingDueToAllNodesDraining) {
TEST(BalancerPolicy, BalancerRespectsZonesWhenDraining) {
// shard1 drains the proper chunk to shard0, even though it is more loaded than shard2
auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 6},
- {ShardStatistics(kShardId1, 5, true, {"a", "b"}, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, 5, false, {"b"}, emptyShardVersion), 2}});
+ generateCluster({ShardStatistics(kShardId0,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"a"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ {"a", "b"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"b"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 7), "a")));
@@ -532,160 +640,45 @@ TEST(BalancerPolicy, BalancerRespectsZonesWhenDraining) {
ASSERT_EQ(kShardId1, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::drain, reason);
}
-TEST(BalancerPolicy, BalancerRespectsZonePolicyBeforeImbalance) {
- // There is a large imbalance between shard0 and shard1, but the balancer must first fix the
- // chunks, which are on a wrong shard due to zone policy
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 6},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 2}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 100), "a")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId2, migrations[0].from);
- ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-}
-
-TEST(BalancerPolicy, BalancerFixesIncorrectZonesWithCrossShardViolationOfZones) {
- // The zone policy dictates that the same shard must donate and also receive chunks. The test
- // validates that the same shard is not used as a donor and recipient as part of the same round.
- auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, 5, false, {"b"}, emptyShardVersion), 3}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "b")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 8), kMaxBSONKey, "a")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-}
-
-TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedCluster) {
- // Chunks are balanced across shards, but there are wrong zones, which need to be fixed
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 3}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 10), "a")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId2, migrations[0].from);
- ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-}
-
TEST(BalancerPolicy, BalancerZoneAlreadyBalanced) {
// Chunks are balanced across shards for the zone.
- auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 3, false, {"a"}, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 2, false, {"a"}, emptyShardVersion), 2}});
+ auto cluster = generateCluster({
+ ShardStatistics(kShardId0,
+ 3 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"a"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"a"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ });
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, kMaxBSONKey, "a")));
ASSERT(balanceChunks(cluster.first, distribution, false, false).first.empty());
}
-TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleZones) {
- // shard0 has chunks [MinKey, 1), [1, 2), [2, 3), [3, 4), [4, 5), so two chunks each
- // for zones "b" and "c". So [1, 2) is expected to be moved to shard1 in round 1.
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
- {ShardStatistics(kShardId1, 1, false, {"b"}, emptyShardVersion), 1},
- {ShardStatistics(kShardId2, 1, false, {"c"}, emptyShardVersion), 1}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "a")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 3), "b")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 3), BSON("x" << 5), "c")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(1U, migrations.size());
-
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), *migrations[0].maxKey);
-}
-
-TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleZonesSkipZoneWithShardInUse) {
- // shard0 has chunks [MinKey, 1), [1, 2), [2, 3), [3, 4), [4, 5), so two chunks each
- // for zones "b" and "c". So [3, 4) is expected to be moved to shard2 because shard1 is
- // in use.
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
- {ShardStatistics(kShardId1, 1, false, {"b"}, emptyShardVersion), 1},
- {ShardStatistics(kShardId2, 1, false, {"c"}, emptyShardVersion), 1}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "a")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 3), "b")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 3), BSON("x" << 5), "c")));
-
- auto availableShards = getAllShardIds(cluster.first);
- availableShards.erase(kShardId1);
- const auto [migrations, reason] = BalancerPolicy::balance(
- cluster.first, distribution, boost::none /* collDataSizeInfo */, &availableShards, false);
- ASSERT_EQ(1U, migrations.size());
-
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][3].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][3].getMax(), *migrations[0].maxKey);
-}
-
-TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedClusterParallel) {
- // Chunks are balanced across shards, but there are wrong zones, which need to be fixed
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 3},
- {ShardStatistics(kShardId3, 5, false, emptyZoneSet, emptyShardVersion), 3}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 20), "a")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(2U, migrations.size());
-
- ASSERT_EQ(kShardId2, migrations[0].from);
- ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-
- ASSERT_EQ(kShardId3, migrations[1].from);
- ASSERT_EQ(kShardId1, migrations[1].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId3][0].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId3][0].getMax(), *migrations[1].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-}
-
TEST(BalancerPolicy, BalancerHandlesNoShardsWithZone) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 5, false, emptyZoneSet, emptyShardVersion), 2}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(