summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2023-02-09 11:42:24 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-02-09 17:31:20 +0000
commit6dad29fa64f0bab3468b18ec784f53027306c8d7 (patch)
treee737c395ac9eda57ab27eb488857c00b84a5ebf3
parenteac7d3261a9b681c1f566eb5f23e5311e81ffa89 (diff)
downloadmongo-6dad29fa64f0bab3468b18ec784f53027306c8d7.tar.gz
SERVER-66782 Remove BalanceAccordingToDataSize feature flag
-rw-r--r--jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js1
-rw-r--r--jstests/sharding/auth.js16
-rw-r--r--jstests/sharding/balancing_based_on_size.js5
-rw-r--r--jstests/sharding/balancing_sessions_collection_legacy.js167
-rw-r--r--jstests/sharding/data_size_aware_balancing_sessions_collection.js4
-rw-r--r--jstests/sharding/zone_changes_hashed.js5
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp30
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h6
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp132
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp265
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.h59
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp719
-rw-r--r--src/mongo/s/sharding_feature_flags.idl5
13 files changed, 442 insertions, 972 deletions
diff --git a/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js b/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js
index c97a2f9530b..b7b4fb6806b 100644
--- a/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js
+++ b/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js
@@ -8,7 +8,6 @@
* @tags: [
* requires_sharding,
* assumes_balancer_on,
- * featureFlagBalanceAccordingToDataSize,
* antithesis_incompatible,
* ]
*/
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index a35f2edcd74..b64b36efb94 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -186,22 +186,6 @@ assert.commandWorked(bulk.execute());
s.startBalancer(60000);
-const balanceAccordingToDataSize = FeatureFlagUtil.isEnabled(
- s.configRS.getPrimary().getDB('admin'), "BalanceAccordingToDataSize", adminUser);
-if (!balanceAccordingToDataSize) {
- assert.soon(function() {
- var d1Chunks =
- findChunksUtil.countChunksForNs(s.getDB("config"), 'test.foo', {shard: "d1"});
- var d2Chunks =
- findChunksUtil.countChunksForNs(s.getDB("config"), 'test.foo', {shard: "d2"});
- var totalChunks = findChunksUtil.countChunksForNs(s.getDB("config"), 'test.foo');
-
- print("chunks: " + d1Chunks + " " + d2Chunks + " " + totalChunks);
-
- return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
- }, "Chunks failed to balance", 60000, 5000);
-}
-
// SERVER-33753: count() without predicate can be wrong on sharded collections.
// assert.eq(s.getDB("test").foo.count(), num+1);
var numDocs = s.getDB("test").foo.find().itcount();
diff --git a/jstests/sharding/balancing_based_on_size.js b/jstests/sharding/balancing_based_on_size.js
index f3074c7ae99..e47b53d8645 100644
--- a/jstests/sharding/balancing_based_on_size.js
+++ b/jstests/sharding/balancing_based_on_size.js
@@ -1,11 +1,6 @@
/*
* Test that the balancer is redistributing data based on the actual amount of data
* for a collection on each node, converging when the size difference becomes small.
- *
- * @tags: [
- * featureFlagBalanceAccordingToDataSize,
- * requires_fcv_61,
- * ]
*/
(function() {
diff --git a/jstests/sharding/balancing_sessions_collection_legacy.js b/jstests/sharding/balancing_sessions_collection_legacy.js
deleted file mode 100644
index 9c9e9a8a7ed..00000000000
--- a/jstests/sharding/balancing_sessions_collection_legacy.js
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Tests that the balancer splits the sessions collection and uniformly distributes the chunks
- * across shards in the cluster.
- * @tags: [resource_intensive]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
-load("jstests/sharding/libs/find_chunks_util.js");
-load('jstests/sharding/libs/remove_shard_util.js');
-
-// TODO SERVER-50144 Remove this and allow orphan checking.
-// This test calls removeShard which can leave docs in config.rangeDeletions in state "pending",
-// therefore preventing orphans from being cleaned up.
-TestData.skipCheckOrphans = true;
-
-/*
- * Returns the number of chunks for the sessions collection.
- */
-function getNumTotalChunks() {
- return findChunksUtil.countChunksForNs(configDB, kSessionsNs);
-}
-
-/*
- * Returns the number of chunks for the sessions collection that are the given shard.
- */
-function getNumChunksOnShard(shardName) {
- return findChunksUtil.countChunksForNs(configDB, kSessionsNs, {shard: shardName});
-}
-
-/*
- * Returns the number of docs in the sessions collection on the given host.
- */
-function getNumDocs(conn) {
- return conn.getCollection(kSessionsNs).count();
-}
-
-/*
- * Starts a replica-set shard, adds the shard to the cluster, and increments numShards.
- * Returns the ReplSetTest object for the shard.
- */
-function addShardToCluster() {
- const shardName = clusterName + "-rs" + numShards;
-
- const replTest = new ReplSetTest({name: shardName, nodes: 1});
- replTest.startSet({shardsvr: ""});
- replTest.initiate();
-
- assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: shardName}));
- numShards++;
- return replTest;
-}
-
-/*
- * Removes the given shard from the cluster, waits util the state is completed, and
- * decrements numShards.
- */
-function removeShardFromCluster(shardName) {
- removeShard(st, shardName);
- numShards--;
-}
-
-/*
- * Returns true if the chunks for the sessions collection are evenly distributed across the
- * given shards. That is, the number of chunks on the most loaded shard and on the least
- * loaded shard differs by no more than 1.
- */
-function isBalanced(shardNames) {
- const expectedMinNumChunksPerShard = Math.floor(kExpectedNumChunks / shardNames.length);
-
- let minNumChunks = Number.MAX_VALUE;
- let maxNumChunks = 0;
- for (const shardName of shardNames) {
- const numChunks = getNumChunksOnShard(shardName);
- minNumChunks = Math.min(numChunks, minNumChunks);
- maxNumChunks = Math.max(numChunks, maxNumChunks);
- }
-
- return (maxNumChunks - minNumChunks <= 1) && (minNumChunks == expectedMinNumChunksPerShard);
-}
-
-/*
- * Returns the standard deviation for given numbers.
- */
-function computeStdDev(nums) {
- const mean = nums.reduce((a, b) => a + b) / nums.length;
- return Math.sqrt(nums.map(x => Math.pow(x - mean, 2)).reduce((a, b) => a + b) / nums.length);
-}
-
-const kMinNumChunks = 100;
-const kExpectedNumChunks = 128; // the balancer rounds kMinNumChunks to the next power of 2.
-const kNumSessions = 10000;
-const kBalancerTimeoutMS = 5 * 60 * 1000;
-
-let numShards = 2;
-const clusterName = jsTest.name();
-const st = new ShardingTest({
- name: clusterName,
- shards: numShards,
- other: {configOptions: {setParameter: {minNumChunksForSessionsCollection: kMinNumChunks}}}
-});
-
-// TODO SERVER-66782 delete this file
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
-const kSessionsNs = "config.system.sessions";
-const configDB = st.s.getDB("config");
-
-// There is only one chunk initially.
-assert.eq(1, getNumTotalChunks());
-
-st.startBalancer();
-
-jsTest.log(
- "Verify that the balancer splits the initial chunks and distributes chunks evenly across existing shards");
-
-assert.soon(() => getNumTotalChunks() == kExpectedNumChunks,
- "balancer did not split the initial chunk for the sessions collection");
-assert.soon(() => isBalanced([st.shard0.shardName, st.shard1.shardName]),
- "balancer did not distribute chunks evenly across existing shards",
- kBalancerTimeoutMS);
-
-jsTest.log(
- "Verify that the balancer redistributes chunks when more shards are added to the cluster");
-const shard2 = addShardToCluster();
-const shard3 = addShardToCluster();
-const shard4 = addShardToCluster();
-
-assert.soon(() => isBalanced(
- [st.shard0.shardName, st.shard1.shardName, shard2.name, shard3.name, shard4.name]),
- "balancer did not redistribute chunks evenly after more shards were added",
- kBalancerTimeoutMS);
-
-jsTest.log("Verify that the session docs are distributed almost evenly across shards");
-// Start sessions and trigger a refresh to flush the sessions to the sessions collection.
-for (let i = 0; i < kNumSessions; i++) {
- assert.commandWorked(st.s.adminCommand({startSession: 1}));
-}
-assert.commandWorked(st.s.adminCommand({refreshLogicalSessionCacheNow: 1}));
-assert.lte(kNumSessions, getNumDocs(st.s));
-
-const shards =
- [st.shard0, st.shard1, shard2.getPrimary(), shard3.getPrimary(), shard4.getPrimary()];
-const numDocsOnShards = shards.map(shard => getNumDocs(shard));
-assert.lt(computeStdDev(numDocsOnShards), 0.1 * kNumSessions / shards.length);
-
-jsTest.log(
- "Verify that the balancer redistributes chunks when shards are removed from the cluster");
-removeShardFromCluster(shard2.name);
-
-assert.soon(() => isBalanced([st.shard0.shardName, st.shard1.shardName, shard3.name, shard4.name]),
- "balancer did not redistribute chunks evenly after one of the shards was removed",
- kBalancerTimeoutMS);
-assert.eq(0, getNumChunksOnShard(shard2.name));
-
-st.stopBalancer();
-
-st.stop();
-shard2.stopSet();
-shard3.stopSet();
-shard4.stopSet();
-}());
diff --git a/jstests/sharding/data_size_aware_balancing_sessions_collection.js b/jstests/sharding/data_size_aware_balancing_sessions_collection.js
index 00357d4403a..8f10308779f 100644
--- a/jstests/sharding/data_size_aware_balancing_sessions_collection.js
+++ b/jstests/sharding/data_size_aware_balancing_sessions_collection.js
@@ -2,9 +2,7 @@
* Tests that the balancer splits the sessions collection and uniformly distributes the chunks
* across shards in the cluster.
* @tags: [
- * featureFlagBalanceAccordingToDataSize,
- * requires_fcv_61,
- * resource_intensive,
+ * resource_intensive,
* ]
*/
(function() {
diff --git a/jstests/sharding/zone_changes_hashed.js b/jstests/sharding/zone_changes_hashed.js
index f93288ec4e5..b61d7933a06 100644
--- a/jstests/sharding/zone_changes_hashed.js
+++ b/jstests/sharding/zone_changes_hashed.js
@@ -135,10 +135,7 @@ shardTags = {
};
assertShardTags(configDB, shardTags);
-const balanceAccordingToDataSize = FeatureFlagUtil.isEnabled(
- st.configRS.getPrimary().getDB('admin'), "BalanceAccordingToDataSize");
-let numChunksToMove = balanceAccordingToDataSize ? zoneChunkBounds["zoneB"].length - 1
- : zoneChunkBounds["zoneB"].length / 2;
+const numChunksToMove = zoneChunkBounds["zoneB"].length - 1;
runBalancer(st, numChunksToMove);
shardChunkBounds = {
[st.shard0.shardName]: zoneChunkBounds["zoneB"].slice(0, numChunksToMove),
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 0cdcb5727fb..a38fdaf4ae3 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -454,12 +454,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
continue;
}
- boost::optional<stdx::unordered_map<NamespaceString, CollectionDataSizeInfoForBalancing>>
- collsDataSizeInfo;
- if (feature_flags::gBalanceAccordingToDataSize.isEnabled(
- serverGlobalParams.featureCompatibility)) {
- collsDataSizeInfo.emplace(getDataSizeInfoForCollections(opCtx, collBatch));
- }
+ const auto collsDataSizeInfo = getDataSizeInfoForCollections(opCtx, collBatch);
for (const auto& collFromBatch : collBatch) {
@@ -469,13 +464,8 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
const auto& nss = collFromBatch.getNss();
- boost::optional<CollectionDataSizeInfoForBalancing> optDataSizeInfo;
- if (collsDataSizeInfo.has_value()) {
- optDataSizeInfo.emplace(std::move(collsDataSizeInfo->at(nss)));
- }
-
auto candidatesStatus = _getMigrateCandidatesForCollection(
- opCtx, nss, shardStats, optDataSizeInfo, availableShards);
+ opCtx, nss, shardStats, collsDataSizeInfo.at(nss), availableShards);
if (candidatesStatus == ErrorCodes::NamespaceNotFound) {
// Namespace got dropped before we managed to get to it, so just skip it
continue;
@@ -521,14 +511,10 @@ StatusWith<MigrateInfosWithReason> BalancerChunkSelectionPolicyImpl::selectChunk
});
- boost::optional<CollectionDataSizeInfoForBalancing> optCollDataSizeInfo;
- if (feature_flags::gBalanceAccordingToDataSize.isEnabled(
- serverGlobalParams.featureCompatibility)) {
- optCollDataSizeInfo.emplace(getDataSizeInfoForCollection(opCtx, nss));
- }
+ const auto dataSizeInfo = getDataSizeInfoForCollection(opCtx, nss);
- auto candidatesStatus = _getMigrateCandidatesForCollection(
- opCtx, nss, shardStats, optCollDataSizeInfo, &availableShards);
+ auto candidatesStatus =
+ _getMigrateCandidatesForCollection(opCtx, nss, shardStats, dataSizeInfo, &availableShards);
if (!candidatesStatus.isOK()) {
return candidatesStatus.getStatus();
}
@@ -562,7 +548,9 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* op
const DistributionStatus& distribution = collInfoStatus.getValue();
- return BalancerPolicy::balanceSingleChunk(chunk, shardStats, distribution);
+ const auto dataSizeInfo = getDataSizeInfoForCollection(opCtx, nss);
+
+ return BalancerPolicy::balanceSingleChunk(chunk, shardStats, distribution, dataSizeInfo);
}
Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* opCtx,
@@ -652,7 +640,7 @@ BalancerChunkSelectionPolicyImpl::_getMigrateCandidatesForCollection(
OperationContext* opCtx,
const NamespaceString& nss,
const ShardStatisticsVector& shardStats,
- const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
stdx::unordered_set<ShardId>* availableShards) {
auto routingInfoStatus =
Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss);
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
index c2baec3ab93..bae65625fdf 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
@@ -72,14 +72,14 @@ private:
const ShardStatisticsVector& shardStats);
/**
- * Synchronous method, which iterates the collection's chunks and uses the cluster statistics to
- * figure out where to place them.
+ * Synchronous method, which iterates the collection's size per shard to figure out where to
+ * place them.
*/
StatusWith<MigrateInfosWithReason> _getMigrateCandidatesForCollection(
OperationContext* opCtx,
const NamespaceString& nss,
const ShardStatisticsVector& shardStats,
- const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
stdx::unordered_set<ShardId>* availableShards);
// Source for obtaining cluster statistics. Not owned and must not be destroyed before the
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
index 40f02409106..91f766c3ab6 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/s/balancer/migration_test_fixture.h"
#include "mongo/idl/server_parameter_test_util.h"
#include "mongo/platform/random.h"
+#include "mongo/s/request_types/get_stats_for_balancing_gen.h"
#include "mongo/s/type_collection_common_types_gen.h"
namespace mongo {
@@ -89,6 +90,23 @@ protected:
}
/**
+ * Sets up mock network to expect a _shardsvrGetStatsForBalancing command and returns a BSON
+ * response with a dummy version.
+ */
+ void expectGetStatsForBalancingCommand() {
+ BSONObjBuilder resultBuilder;
+ CommandHelpers::appendCommandStatusNoThrow(resultBuilder, Status::OK());
+
+ onCommand([&resultBuilder](const RemoteCommandRequest& request) {
+ ASSERT(request.cmdObj[ShardsvrGetStatsForBalancing::kCommandName]);
+
+ ShardsvrGetStatsForBalancingReply reply({CollStatsForBalancing(kNamespace, 12345)});
+ reply.serialize(&resultBuilder);
+ return resultBuilder.obj();
+ });
+ }
+
+ /**
* Sets up mock network for all the shards to expect the commands executed for computing cluster
* stats, which include listDatabase and serverStatus.
*/
@@ -189,8 +207,6 @@ TEST_F(BalancerChunkSelectionTest, ZoneRangesOverlap) {
}
TEST_F(BalancerChunkSelectionTest, ZoneRangeMaxNotAlignedWithChunkMax) {
- RAIIServerParameterControllerForTest featureFlagBalanceAccordingToDataSize{
- "featureFlagBalanceAccordingToDataSize", false};
// Set up two shards in the metadata.
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
NamespaceString::kConfigsvrShardsNamespace,
@@ -240,7 +256,11 @@ TEST_F(BalancerChunkSelectionTest, ZoneRangeMaxNotAlignedWithChunkMax) {
ASSERT_EQUALS(0U, candidateChunksStatus.getValue().size());
});
- expectGetStatsCommands(2);
+ const int numShards = 2;
+ expectGetStatsCommands(numShards);
+ for (int i = 0; i < numShards; i++) {
+ expectGetStatsForBalancingCommand();
+ }
future.default_timed_get();
removeAllChunks(kNamespace, collUUID);
};
@@ -251,111 +271,5 @@ TEST_F(BalancerChunkSelectionTest, ZoneRangeMaxNotAlignedWithChunkMax) {
{BSON(kPattern << -15), kKeyPattern.globalMax()}});
}
-TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeAutoSplitted) {
- // Set up two shards in the metadata, each one with its own zone
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- NamespaceString::kConfigsvrShardsNamespace,
- appendZones(kShard0, {"A"}),
- kMajorityWriteConcern));
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- NamespaceString::kConfigsvrShardsNamespace,
- appendZones(kShard1, {"B"}),
- kMajorityWriteConcern));
-
- // Set up a database and a sharded collection in the metadata.
- const auto collUUID = UUID::gen();
- ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
- setUpDatabase(kDbName, kShardId0);
-
- TypeCollectionTimeseriesFields tsFields;
- tsFields.setTimeseriesOptions(TimeseriesOptions("fieldName"));
- setUpCollection(kNamespace, collUUID, version, std::move(tsFields));
-
- // Set up two zones
- setUpZones(kNamespace,
- {
- {"A", {kKeyPattern.globalMin(), BSON(kPattern << 0)}},
- {"B", {BSON(kPattern << 0), kKeyPattern.globalMax()}},
- });
-
- // Create just one chunk covering the whole space
- setUpChunk(collUUID, kKeyPattern.globalMin(), kKeyPattern.globalMax(), kShardId0, version);
-
- auto future = launchAsync([this] {
- ThreadClient tc(getServiceContext());
- auto opCtx = Client::getCurrent()->makeOperationContext();
-
- // Requests chunks to be relocated requires running commands on each shard to
- // get shard statistics. Set up dummy hosts for the source shards.
- shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
- shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1);
-
- auto candidateChunksStatus = _chunkSelectionPolicy.get()->selectChunksToSplit(opCtx.get());
- ASSERT_OK(candidateChunksStatus.getStatus());
-
- ASSERT_EQUALS(1U, candidateChunksStatus.getValue().size());
- });
-
- expectGetStatsCommands(2);
- future.default_timed_get();
-}
-
-TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeBalanced) {
- RAIIServerParameterControllerForTest featureFlagBalanceAccordingToDataSize{
- "featureFlagBalanceAccordingToDataSize", false};
- // Set up two shards in the metadata.
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- NamespaceString::kConfigsvrShardsNamespace,
- kShard0,
- kMajorityWriteConcern));
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- NamespaceString::kConfigsvrShardsNamespace,
- kShard1,
- kMajorityWriteConcern));
-
- // Set up a database and a sharded collection in the metadata.
- const auto collUUID = UUID::gen();
- ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
- setUpDatabase(kDbName, kShardId0);
-
- TypeCollectionTimeseriesFields tsFields;
- tsFields.setTimeseriesOptions(TimeseriesOptions("fieldName"));
- setUpCollection(kNamespace, collUUID, version, std::move(tsFields));
-
- auto addChunk = [&](const BSONObj& min, const BSONObj& max) {
- setUpChunk(collUUID, min, max, kShardId0, version);
- version.incMinor();
- };
-
- addChunk(kKeyPattern.globalMin(), BSON(kPattern << 0));
- for (int i = 1; i <= 100; ++i) {
- addChunk(BSON(kPattern << (i - 1)), BSON(kPattern << i));
- }
- addChunk(BSON(kPattern << 100), kKeyPattern.globalMax());
-
- auto future = launchAsync([this] {
- ThreadClient tc(getServiceContext());
- auto opCtx = Client::getCurrent()->makeOperationContext();
-
- // Requests chunks to be relocated requires running commands on each shard to
- // get shard statistics. Set up dummy hosts for the source shards.
- shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
- shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1);
-
- std::vector<ClusterStatistics::ShardStatistics> shardStats =
- uassertStatusOK(_clusterStats.get()->getStats(opCtx.get()));
- auto availableShards = getAllShardIds(shardStats);
-
- auto candidateChunksStatus = _chunkSelectionPolicy.get()->selectChunksToMove(
- opCtx.get(), shardStats, &availableShards);
- ASSERT_OK(candidateChunksStatus.getStatus());
-
- ASSERT_EQUALS(1, candidateChunksStatus.getValue().size());
- });
-
- expectGetStatsCommands(2);
- future.default_timed_get();
-}
-
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index 7677a0e0d23..3587c61bacf 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -58,14 +58,6 @@ using std::string;
using std::vector;
using namespace fmt::literals;
-namespace {
-
-// This value indicates the minimum deviation shard's number of chunks need to have from the
-// optimal average across all shards for a zone for a rebalancing migration to be initiated.
-const size_t kDefaultImbalanceThreshold = 1;
-
-} // namespace
-
DistributionStatus::DistributionStatus(NamespaceString nss,
ShardToChunksMap shardToChunksMap,
ZoneInfo zoneInfo)
@@ -73,46 +65,11 @@ DistributionStatus::DistributionStatus(NamespaceString nss,
_shardChunks(std::move(shardToChunksMap)),
_zoneInfo(std::move(zoneInfo)) {}
-size_t DistributionStatus::totalChunks() const {
- size_t total = 0;
-
- for (const auto& shardChunk : _shardChunks) {
- total += shardChunk.second.size();
- }
-
- return total;
-}
-
-size_t DistributionStatus::totalChunksInZone(const std::string& zone) const {
- size_t total = 0;
-
- for (const auto& shardChunk : _shardChunks) {
- total += numberOfChunksInShardWithZone(shardChunk.first, zone);
- }
-
- return total;
-}
-
size_t DistributionStatus::numberOfChunksInShard(const ShardId& shardId) const {
const auto& shardChunks = getChunks(shardId);
return shardChunks.size();
}
-size_t DistributionStatus::numberOfChunksInShardWithZone(const ShardId& shardId,
- const string& zone) const {
- const auto& shardChunks = getChunks(shardId);
-
- size_t total = 0;
-
- for (const auto& chunk : shardChunks) {
- if (zone == getZoneForChunk(chunk)) {
- total++;
- }
- }
-
- return total;
-}
-
const vector<ChunkType>& DistributionStatus::getChunks(const ShardId& shardId) const {
ShardToChunksMap::const_iterator i = _shardChunks.find(shardId);
invariant(i != _shardChunks.end());
@@ -242,14 +199,12 @@ Status BalancerPolicy::isShardSuitableReceiver(const ClusterStatistics::ShardSta
std::tuple<ShardId, int64_t> BalancerPolicy::_getLeastLoadedReceiverShard(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
- const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
const string& zone,
const stdx::unordered_set<ShardId>& availableShards) {
ShardId best;
int64_t currentMin = numeric_limits<int64_t>::max();
- const auto shouldBalanceAccordingToDataSize = collDataSizeInfo.has_value();
-
for (const auto& stat : shardStats) {
if (!availableShards.count(stat.shardId))
continue;
@@ -259,24 +214,16 @@ std::tuple<ShardId, int64_t> BalancerPolicy::_getLeastLoadedReceiverShard(
continue;
}
- if (shouldBalanceAccordingToDataSize) {
- const auto& shardSizeIt = collDataSizeInfo->shardToDataSizeMap.find(stat.shardId);
- if (shardSizeIt == collDataSizeInfo->shardToDataSizeMap.end()) {
- // Skip if stats not available (may happen if add|remove shard during a round)
- continue;
- }
+ const auto& shardSizeIt = collDataSizeInfo.shardToDataSizeMap.find(stat.shardId);
+ if (shardSizeIt == collDataSizeInfo.shardToDataSizeMap.end()) {
+ // Skip if stats not available (may happen if add|remove shard during a round)
+ continue;
+ }
- int64_t shardSize = shardSizeIt->second;
- if (shardSize < currentMin) {
- best = stat.shardId;
- currentMin = shardSize;
- }
- } else {
- int64_t myChunks = distribution.numberOfChunksInShard(stat.shardId);
- if (myChunks < currentMin) {
- best = stat.shardId;
- currentMin = myChunks;
- }
+ const auto shardSize = shardSizeIt->second;
+ if (shardSize < currentMin) {
+ best = stat.shardId;
+ currentMin = shardSize;
}
}
@@ -286,37 +233,26 @@ std::tuple<ShardId, int64_t> BalancerPolicy::_getLeastLoadedReceiverShard(
std::tuple<ShardId, int64_t> BalancerPolicy::_getMostOverloadedShard(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
- const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
const string& chunkZone,
const stdx::unordered_set<ShardId>& availableShards) {
ShardId worst;
long long currentMax = numeric_limits<long long>::min();
- const auto shouldBalanceAccordingToDataSize = collDataSizeInfo.has_value();
-
for (const auto& stat : shardStats) {
if (!availableShards.count(stat.shardId))
continue;
- if (shouldBalanceAccordingToDataSize) {
- const auto& shardSizeIt = collDataSizeInfo->shardToDataSizeMap.find(stat.shardId);
- if (shardSizeIt == collDataSizeInfo->shardToDataSizeMap.end()) {
- // Skip if stats not available (may happen if add|remove shard during a round)
- continue;
- }
+ const auto& shardSizeIt = collDataSizeInfo.shardToDataSizeMap.find(stat.shardId);
+ if (shardSizeIt == collDataSizeInfo.shardToDataSizeMap.end()) {
+ // Skip if stats not available (may happen if add|remove shard during a round)
+ continue;
+ }
- const auto shardSize = shardSizeIt->second;
- if (shardSize > currentMax) {
- worst = stat.shardId;
- currentMax = shardSize;
- }
- } else {
- const unsigned shardChunkCount =
- distribution.numberOfChunksInShardWithZone(stat.shardId, chunkZone);
- if (shardChunkCount > currentMax) {
- worst = stat.shardId;
- currentMax = shardChunkCount;
- }
+ const auto shardSize = shardSizeIt->second;
+ if (shardSize > currentMax) {
+ worst = stat.shardId;
+ currentMax = shardSize;
}
}
@@ -391,7 +327,7 @@ MigrateInfo chooseRandomMigration(const ShardStatisticsVector& shardStats,
MigrateInfosWithReason BalancerPolicy::balance(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
- const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
stdx::unordered_set<ShardId>* availableShards,
bool forceJumbo) {
vector<MigrateInfo> migrations;
@@ -453,15 +389,11 @@ MigrateInfosWithReason BalancerPolicy::balance(
invariant(to != stat.shardId);
- auto maxChunkSizeBytes = [&]() -> boost::optional<int64_t> {
- if (collDataSizeInfo.has_value()) {
- return collDataSizeInfo->maxChunkSizeBytes;
- }
- return boost::none;
- }();
-
- migrations.emplace_back(
- to, distribution.nss(), chunk, ForceJumbo::kForceBalancer, maxChunkSizeBytes);
+ migrations.emplace_back(to,
+ distribution.nss(),
+ chunk,
+ ForceJumbo::kForceBalancer,
+ collDataSizeInfo.maxChunkSizeBytes);
if (firstReason == MigrationReason::none) {
firstReason = MigrationReason::drain;
}
@@ -530,19 +462,12 @@ MigrateInfosWithReason BalancerPolicy::balance(
invariant(to != stat.shardId);
- auto maxChunkSizeBytes = [&]() -> boost::optional<int64_t> {
- if (collDataSizeInfo.has_value()) {
- return collDataSizeInfo->maxChunkSizeBytes;
- }
- return boost::none;
- }();
-
migrations.emplace_back(to,
distribution.nss(),
chunk,
forceJumbo ? ForceJumbo::kForceBalancer
: ForceJumbo::kDoNotForce,
- maxChunkSizeBytes);
+ collDataSizeInfo.maxChunkSizeBytes);
if (firstReason == MigrationReason::none) {
firstReason = MigrationReason::zoneViolation;
}
@@ -589,29 +514,14 @@ MigrateInfosWithReason BalancerPolicy::balance(
continue;
}
- auto singleZoneBalance = [&]() {
- if (collDataSizeInfo.has_value()) {
- return _singleZoneBalanceBasedOnDataSize(shardStats,
- distribution,
- *collDataSizeInfo,
- zone,
- &migrations,
- availableShards,
- forceJumbo ? ForceJumbo::kForceBalancer
- : ForceJumbo::kDoNotForce);
- }
-
- return _singleZoneBalanceBasedOnChunks(shardStats,
- distribution,
- zone,
- totalNumberOfShardsWithZone,
- &migrations,
- availableShards,
- forceJumbo ? ForceJumbo::kForceBalancer
- : ForceJumbo::kDoNotForce);
- };
-
- while (singleZoneBalance()) {
+ while (_singleZoneBalanceBasedOnDataSize(shardStats,
+ distribution,
+ collDataSizeInfo,
+ zone,
+ &migrations,
+ availableShards,
+ forceJumbo ? ForceJumbo::kForceBalancer
+ : ForceJumbo::kDoNotForce)) {
if (firstReason == MigrationReason::none) {
firstReason = MigrationReason::chunksImbalance;
}
@@ -624,14 +534,12 @@ MigrateInfosWithReason BalancerPolicy::balance(
boost::optional<MigrateInfo> BalancerPolicy::balanceSingleChunk(
const ChunkType& chunk,
const ShardStatisticsVector& shardStats,
- const DistributionStatus& distribution) {
+ const DistributionStatus& distribution,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo) {
const string zone = distribution.getZoneForChunk(chunk);
- const auto [newShardId, _] = _getLeastLoadedReceiverShard(shardStats,
- distribution,
- boost::none /* collDataSizeInfo */,
- zone,
- stdx::unordered_set<ShardId>());
+ const auto [newShardId, _] = _getLeastLoadedReceiverShard(
+ shardStats, distribution, collDataSizeInfo, zone, stdx::unordered_set<ShardId>());
if (!newShardId.isValid() || newShardId == chunk.getShard()) {
return boost::optional<MigrateInfo>();
}
@@ -639,101 +547,6 @@ boost::optional<MigrateInfo> BalancerPolicy::balanceSingleChunk(
return MigrateInfo(newShardId, distribution.nss(), chunk, ForceJumbo::kDoNotForce);
}
-bool BalancerPolicy::_singleZoneBalanceBasedOnChunks(const ShardStatisticsVector& shardStats,
- const DistributionStatus& distribution,
- const string& zone,
- size_t totalNumberOfShardsWithZone,
- vector<MigrateInfo>* migrations,
- stdx::unordered_set<ShardId>* availableShards,
- ForceJumbo forceJumbo) {
- // Calculate the rounded optimal number of chunks per shard
- const size_t totalNumberOfChunksInZone =
- (zone.empty() ? distribution.totalChunks() : distribution.totalChunksInZone(zone));
- const size_t idealNumberOfChunksPerShardForZone =
- (size_t)std::roundf(totalNumberOfChunksInZone / (float)totalNumberOfShardsWithZone);
-
- const auto [from, fromSize] =
- _getMostOverloadedShard(shardStats, distribution, boost::none, zone, *availableShards);
- if (!from.isValid())
- return false;
-
- const size_t max = distribution.numberOfChunksInShardWithZone(from, zone);
-
- // Do not use a shard if it already has less entries than the optimal per-shard chunk count
- if (max <= idealNumberOfChunksPerShardForZone)
- return false;
-
- const auto [to, toSize] =
- _getLeastLoadedReceiverShard(shardStats, distribution, boost::none, zone, *availableShards);
- if (!to.isValid()) {
- if (migrations->empty()) {
- LOGV2(21882, "No available shards to take chunks for zone", "zone"_attr = zone);
- }
- return false;
- }
-
- const size_t min = distribution.numberOfChunksInShardWithZone(to, zone);
-
- // Do not use a shard if it already has more entries than the optimal per-shard chunk count
- if (min >= idealNumberOfChunksPerShardForZone)
- return false;
-
- const size_t imbalance = max - idealNumberOfChunksPerShardForZone;
-
- LOGV2_DEBUG(
- 21883,
- 1,
- "collection: {namespace}, zone: {zone}, donor: {fromShardId} chunks on "
- " {fromShardChunkCount}, receiver: {toShardId} chunks on {toShardChunkCount}, "
- "ideal: {idealNumberOfChunksPerShardForZone}, threshold: {chunkCountImbalanceThreshold}",
- "Balancing single zone",
- "namespace"_attr = distribution.nss().ns(),
- "zone"_attr = zone,
- "fromShardId"_attr = from,
- "fromShardChunkCount"_attr = max,
- "toShardId"_attr = to,
- "toShardChunkCount"_attr = min,
- "idealNumberOfChunksPerShardForZone"_attr = idealNumberOfChunksPerShardForZone,
- "chunkCountImbalanceThreshold"_attr = kDefaultImbalanceThreshold);
-
- // Check whether it is necessary to balance within this zone
- if (imbalance < kDefaultImbalanceThreshold)
- return false;
-
- const vector<ChunkType>& chunks = distribution.getChunks(from);
-
- unsigned numJumboChunks = 0;
-
- for (const auto& chunk : chunks) {
- if (distribution.getZoneForChunk(chunk) != zone)
- continue;
-
- if (chunk.getJumbo()) {
- numJumboChunks++;
- continue;
- }
-
- migrations->emplace_back(to, distribution.nss(), chunk, forceJumbo);
- invariant(availableShards->erase(chunk.getShard()));
- invariant(availableShards->erase(to));
- return true;
- }
-
- if (numJumboChunks) {
- LOGV2_WARNING(
- 21894,
- "Shard: {shardId}, collection: {namespace} has only jumbo chunks for "
- "zone \'{zone}\' and cannot be balanced. Jumbo chunks count: {numJumboChunks}",
- "Shard has only jumbo chunks for and cannot be balanced",
- "shardId"_attr = from,
- "namespace"_attr = distribution.nss().ns(),
- "zone"_attr = zone,
- "numJumboChunks"_attr = numJumboChunks);
- }
-
- return false;
-}
-
bool BalancerPolicy::_singleZoneBalanceBasedOnDataSize(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
diff --git a/src/mongo/db/s/balancer/balancer_policy.h b/src/mongo/db/s/balancer/balancer_policy.h
index 706d2cdb351..ba34bdbd4cc 100644
--- a/src/mongo/db/s/balancer/balancer_policy.h
+++ b/src/mongo/db/s/balancer/balancer_policy.h
@@ -313,27 +313,11 @@ public:
Status addRangeToZone(const ZoneRange& range);
/**
- * Returns total number of chunks across all shards.
- */
- size_t totalChunks() const;
-
- /**
- * Returns the total number of chunks across all shards, which fall into the specified zone's
- * range.
- */
- size_t totalChunksInZone(const std::string& zone) const;
-
- /**
* Returns number of chunks in the specified shard.
*/
size_t numberOfChunksInShard(const ShardId& shardId) const;
/**
- * Returns number of chunks in the specified shard, which also belong to the give zone.
- */
- size_t numberOfChunksInShardWithZone(const ShardId& shardId, const std::string& zone) const;
-
- /**
* Returns all chunks for the specified shard.
*/
const std::vector<ChunkType>& getChunks(const ShardId& shardId) const;
@@ -404,7 +388,7 @@ public:
static MigrateInfosWithReason balance(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
- const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
stdx::unordered_set<ShardId>* availableShards,
bool forceJumbo);
@@ -412,59 +396,36 @@ public:
* Using the specified distribution information, returns a suggested better location for the
* specified chunk if one is available.
*/
- static boost::optional<MigrateInfo> balanceSingleChunk(const ChunkType& chunk,
- const ShardStatisticsVector& shardStats,
- const DistributionStatus& distribution);
+ static boost::optional<MigrateInfo> balanceSingleChunk(
+ const ChunkType& chunk,
+ const ShardStatisticsVector& shardStats,
+ const DistributionStatus& distribution,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo);
private:
/*
* Only considers shards with the specified zone, all shards in case the zone is empty.
- *
- * Returns a tuple <ShardID, number of chunks> referring the shard with less chunks.
- *
- * If balancing based on collection size on shards:
- * - Returns a tuple <ShardID, amount of data in bytes> referring the shard with less data.
+ * Returns a tuple <ShardID, amount of data in bytes> referring the shard with less data.
*/
static std::tuple<ShardId, int64_t> _getLeastLoadedReceiverShard(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
- const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
const std::string& zone,
const stdx::unordered_set<ShardId>& availableShards);
/**
* Only considers shards with the specified zone, all shards in case the zone is empty.
- *
- * If balancing based on number of chunks:
- * - Returns a tuple <ShardID, number of chunks> referring the shard with more chunks.
- *
- * If balancing based on collection size on shards:
- * - Returns a tuple <ShardID, amount of data in bytes> referring the shard with more data.
+ * Returns a tuple <ShardID, amount of data in bytes> referring the shard with more data.
*/
static std::tuple<ShardId, int64_t> _getMostOverloadedShard(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
- const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
+ const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
const std::string& zone,
const stdx::unordered_set<ShardId>& availableShards);
/**
- * Selects one chunk for the specified zone (if appropriate) to be moved in order to bring the
- * deviation of the shards chunk contents closer to even across all shards in the specified
- * zone. Takes into account and updates the shards, which haven't been used for migrations yet.
- *
- * Returns true if a migration was suggested, false otherwise. This method is intented to be
- * called multiple times until all posible migrations for a zone have been selected.
- */
- static bool _singleZoneBalanceBasedOnChunks(const ShardStatisticsVector& shardStats,
- const DistributionStatus& distribution,
- const std::string& zone,
- size_t totalNumberOfShardsWithZone,
- std::vector<MigrateInfo>* migrations,
- stdx::unordered_set<ShardId>* availableShards,
- ForceJumbo forceJumbo);
-
- /**
* Selects one range for the specified zone (if appropriate) to be moved in order to bring the
* deviation of the collection data size closer to even across all shards in the specified
* zone. Takes into account and updates the shards, which haven't been used for migrations yet.
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index 88c504b151d..2fff8e30803 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/keypattern.h"
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/platform/random.h"
+#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/unittest/unittest.h"
@@ -64,11 +65,10 @@ const NamespaceString kNamespace("TestDB", "TestColl");
* [MinKey, 1), [1, 2), [2, 3) ... [N - 1, MaxKey)
*/
std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
- const vector<std::pair<ShardStatistics, size_t>>& shardsAndNumChunks) {
- int64_t totalNumChunks = 0;
- for (const auto& entry : shardsAndNumChunks) {
- totalNumChunks += std::get<1>(entry);
- }
+ const vector<ShardStatistics>& statsVector) {
+
+ // Distribute one chunk per shard, no matter the owned data size.
+ int64_t totalNumChunks = statsVector.size();
ShardToChunksMap chunkMap;
ShardStatisticsVector shardStats;
@@ -80,28 +80,21 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
const KeyPattern shardKeyPattern(BSON("x" << 1));
- for (auto it = shardsAndNumChunks.begin(); it != shardsAndNumChunks.end(); it++) {
- ShardStatistics shard = std::move(it->first);
- const size_t numChunks = it->second;
-
+ for (const auto& shard : statsVector) {
// Ensure that an entry is created
chunkMap[shard.shardId];
- for (size_t i = 0; i < numChunks; i++, currentChunk++) {
- ChunkType chunk;
-
- chunk.setCollectionUUID(uuid);
- chunk.setMin(currentChunk == 0 ? shardKeyPattern.globalMin()
- : BSON("x" << currentChunk));
- chunk.setMax(currentChunk == totalNumChunks - 1 ? shardKeyPattern.globalMax()
- : BSON("x" << currentChunk + 1));
- chunk.setShard(shard.shardId);
- chunk.setVersion(chunkVersion);
+ ChunkType chunk;
- chunkVersion.incMajor();
+ chunk.setCollectionUUID(uuid);
+ chunk.setMin(currentChunk == 0 ? shardKeyPattern.globalMin() : BSON("x" << currentChunk));
+ chunk.setMax(currentChunk == totalNumChunks - 1 ? shardKeyPattern.globalMax()
+ : BSON("x" << ++currentChunk));
+ chunk.setShard(shard.shardId);
+ chunk.setVersion(chunkVersion);
- chunkMap[shard.shardId].push_back(std::move(chunk));
- }
+ chunkVersion.incMajor();
+ chunkMap[shard.shardId].push_back(std::move(chunk));
shardStats.push_back(std::move(shard));
}
@@ -118,20 +111,45 @@ stdx::unordered_set<ShardId> getAllShardIds(const ShardStatisticsVector& shardSt
return shards;
}
+CollectionDataSizeInfoForBalancing buildDataSizeInfoForBalancingFromShardStats(
+ const ShardStatisticsVector& shardStats) {
+ std::map<ShardId, int64_t> collSizePerShard;
+ for (const auto& shard : shardStats) {
+ collSizePerShard.try_emplace(shard.shardId, shard.currSizeBytes);
+ }
+
+ return CollectionDataSizeInfoForBalancing(std::move(collSizePerShard),
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes);
+}
+
MigrateInfosWithReason balanceChunks(const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
bool shouldAggressivelyBalance,
bool forceJumbo) {
auto availableShards = getAllShardIds(shardStats);
- return BalancerPolicy::balance(
- shardStats, distribution, boost::none /* collDataSizeInfo */, &availableShards, forceJumbo);
+
+ return BalancerPolicy::balance(shardStats,
+ distribution,
+ buildDataSizeInfoForBalancingFromShardStats(shardStats),
+ &availableShards,
+ forceJumbo);
}
TEST(BalancerPolicy, Basic) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, 3, false, emptyZoneSet, emptyShardVersion), 3}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId2,
+ 3 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -139,30 +157,19 @@ TEST(BalancerPolicy, Basic) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
-TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 1, false, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 2, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0}});
-
- const auto [migrations, reason] =
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId1, migrations[0].from);
- ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::chunksImbalance, reason);
-}
+TEST(BalancerPolicy, SmallSingleChunkShouldNotMove) {
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion)});
-TEST(BalancerPolicy, SingleChunkShouldNotMove) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 1, false, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0}});
{
auto [migrations, reason] = balanceChunks(
cluster.first, DistributionStatus(kNamespace, cluster.second), true, false);
@@ -178,11 +185,33 @@ TEST(BalancerPolicy, SingleChunkShouldNotMove) {
}
TEST(BalancerPolicy, BalanceThresholdObeyed) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 2, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, 1, false, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId3, 1, false, emptyZoneSet, emptyShardVersion), 1}});
+ auto cluster = generateCluster({
+ ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+
+ ShardStatistics(kShardId2,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId3,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ });
{
auto [migrations, reason] = balanceChunks(
@@ -199,11 +228,21 @@ TEST(BalancerPolicy, BalanceThresholdObeyed) {
}
TEST(BalancerPolicy, ParallelBalancing) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion)});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -212,23 +251,41 @@ TEST(BalancerPolicy, ParallelBalancing) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
ASSERT_EQ(kShardId1, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[1].maxKey);
}
-TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 100, false, emptyZoneSet, emptyShardVersion), 100},
- {ShardStatistics(kShardId1, 90, false, emptyZoneSet, emptyShardVersion), 90},
- {ShardStatistics(kShardId2, 90, false, emptyZoneSet, emptyShardVersion), 90},
- {ShardStatistics(kShardId3, 80, false, emptyZoneSet, emptyShardVersion), 80},
- {ShardStatistics(kShardId4, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId5, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+TEST(BalancerPolicy, ParallelBalancingDoesNotScheduleMigrationsOnShardsAboveTheThreshold) {
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 100 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 90 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ 90 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId3,
+ 89 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId4, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId5, 0, false, emptyZoneSet, emptyShardVersion)});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -237,47 +294,39 @@ TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId4, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
ASSERT_EQ(kShardId1, migrations[1].from);
ASSERT_EQ(kShardId5, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[1].maxKey);
-}
-
-TEST(BalancerPolicy, ParallelBalancingDoesNotMoveChunksFromShardsBelowOptimal) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 100, false, emptyZoneSet, emptyShardVersion), 100},
- {ShardStatistics(kShardId1, 30, false, emptyZoneSet, emptyShardVersion), 30},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
-
- const auto [migrations, reason] =
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(1U, migrations.size());
-
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId3, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNecessary) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 8, false, emptyZoneSet, emptyShardVersion), 8},
- {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 8 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion)});
+
+ const auto shardStats = cluster.first;
// Here kShardId0 would have been selected as a donor
- auto availableShards = getAllShardIds(cluster.first);
+ auto availableShards = getAllShardIds(shardStats);
availableShards.erase(kShardId0);
const auto [migrations, reason] =
- BalancerPolicy::balance(cluster.first,
+ BalancerPolicy::balance(shardStats,
DistributionStatus(kNamespace, cluster.second),
- boost::none /* collDataSizeInfo */,
+ buildDataSizeInfoForBalancingFromShardStats(shardStats),
&availableShards,
false);
ASSERT_EQ(1U, migrations.size());
@@ -285,43 +334,40 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNe
ASSERT_EQ(kShardId1, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
-TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNotNecessary) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 12, false, emptyZoneSet, emptyShardVersion), 12},
- {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
-
- // Here kShardId0 would have been selected as a donor
- auto availableShards = getAllShardIds(cluster.first);
- availableShards.erase(kShardId0);
- const auto [migrations, reason] =
- BalancerPolicy::balance(cluster.first,
- DistributionStatus(kNamespace, cluster.second),
- boost::none /* collDataSizeInfo */,
- &availableShards,
- false);
- ASSERT_EQ(0U, migrations.size());
-}
-
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseDestinationShards) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, 1, false, emptyZoneSet, emptyShardVersion), 1}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId3,
+ 1 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
+
+ const auto shardStats = cluster.first;
// Here kShardId2 would have been selected as a recipient
- auto availableShards = getAllShardIds(cluster.first);
+ auto availableShards = getAllShardIds(shardStats);
availableShards.erase(kShardId2);
const auto [migrations, reason] =
- BalancerPolicy::balance(cluster.first,
+ BalancerPolicy::balance(shardStats,
DistributionStatus(kNamespace, cluster.second),
- boost::none /* collDataSizeInfo */,
+ buildDataSizeInfoForBalancingFromShardStats(shardStats),
&availableShards,
false);
ASSERT_EQ(1U, migrations.size());
@@ -329,86 +375,94 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseDestinationShards) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId3, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::chunksImbalance, reason);
}
TEST(BalancerPolicy, JumboChunksNotMoved) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion)});
cluster.second[kShardId0][0].setJumbo(true);
- cluster.second[kShardId0][1].setJumbo(false); // Only chunk 1 is not jumbo
- cluster.second[kShardId0][2].setJumbo(true);
- cluster.second[kShardId0][3].setJumbo(true);
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::chunksImbalance, reason);
+ ASSERT(migrations.empty());
}
TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId2,
+ 4 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion)});
cluster.second[kShardId0][0].setJumbo(true);
- cluster.second[kShardId0][1].setJumbo(false); // Only chunk 1 is not jumbo
- cluster.second[kShardId0][2].setJumbo(true);
- cluster.second[kShardId0][3].setJumbo(true);
cluster.second[kShardId2][0].setJumbo(true);
- cluster.second[kShardId2][1].setJumbo(true);
- cluster.second[kShardId2][2].setJumbo(false); // Only chunk 1 is not jumbo
- cluster.second[kShardId2][3].setJumbo(true);
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(2U, migrations.size());
-
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::chunksImbalance, reason);
-
- ASSERT_EQ(kShardId2, migrations[1].from);
- ASSERT_EQ(kShardId3, migrations[1].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][2].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][2].getMax(), *migrations[1].maxKey);
+ ASSERT(migrations.empty());
}
-TEST(BalancerPolicy, DrainingSingleChunk) {
- // shard0 is draining and chunks will go to shard1, even though it has a lot more chunks
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 5}});
+TEST(BalancerPolicy, DrainingFromShardWithFewData) {
+ // shard1 is draining and chunks will go to shard0, even though it has a lot more data
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 20 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
+ ASSERT_EQ(kShardId1, migrations[0].from);
+ ASSERT_EQ(kShardId0, migrations[0].to);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
// shard0 and shard2 are draining and chunks will go to shard1 and shard3 in parallel
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId2, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 5}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId2,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion)});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -417,67 +471,82 @@ TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::drain, reason);
ASSERT_EQ(kShardId2, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[1].maxKey);
-}
-
-TEST(BalancerPolicy, DrainingWithTwoChunksFirstOneSelected) {
- // shard0 is draining and chunks will go to shard1, even though it has a lot more chunks
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 5}});
-
- const auto [migrations, reason] =
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
- // shard0 and shard1 are both draining with very little chunks in them and chunks will go to
- // shard2, even though it has a lot more chunks that the other two
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 5, true, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 16}});
+ // shard0 and shard1 are both draining with very little data in them and chunks will go to
+ // shard2, even though it has a lot more data that the other two
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 50 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
+ ASSERT_EQ(kShardId0, migrations[0].to);
ASSERT_EQ(MigrationReason::drain, reason);
}
-TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptChunks) {
- // shard0 has many chunks, but can't move them to shard1 or shard2 because they are draining
+TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptMigrations) {
+ // shard0 has many data, but can't move them to shard1 or shard2 because they are draining
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 0, true, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, 0, true, emptyZoneSet, emptyShardVersion), 0}});
+ {ShardStatistics(kShardId0,
+ 20 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false /* draining */,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1, 0, true /* draining */, emptyZoneSet, emptyShardVersion),
+ ShardStatistics(kShardId2, 0, true /* draining */, emptyZoneSet, emptyShardVersion)});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT(migrations.empty());
+ ASSERT_EQ(1U, migrations.size());
+ ASSERT_EQ(kShardId0, migrations[0].to);
}
TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToZone) {
auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 2, false, {"NYC"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 2, false, {"LAX"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 1, true, {"LAX"}, emptyShardVersion), 1}});
+ generateCluster({ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"NYC"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"LAX"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ {"LAX"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(
@@ -488,15 +557,29 @@ TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToZone) {
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::drain, reason);
}
TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToZone) {
auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 2, false, {"NYC"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, 2, false, {"LAX"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, 1, true, {"SEA"}, emptyShardVersion), 1}});
+ generateCluster({ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"NYC"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"LAX"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ {"SEA"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(
@@ -507,9 +590,19 @@ TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToZone) {
}
TEST(BalancerPolicy, NoBalancingDueToAllNodesDraining) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId2, 1, true, emptyZoneSet, emptyShardVersion), 1}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -519,9 +612,24 @@ TEST(BalancerPolicy, NoBalancingDueToAllNodesDraining) {
TEST(BalancerPolicy, BalancerRespectsZonesWhenDraining) {
// shard1 drains the proper chunk to shard0, even though it is more loaded than shard2
auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 6},
- {ShardStatistics(kShardId1, 5, true, {"a", "b"}, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, 5, false, {"b"}, emptyShardVersion), 2}});
+ generateCluster({ShardStatistics(kShardId0,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"a"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ true,
+ {"a", "b"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId2,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"b"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 7), "a")));
@@ -532,160 +640,45 @@ TEST(BalancerPolicy, BalancerRespectsZonesWhenDraining) {
ASSERT_EQ(kShardId1, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), *migrations[0].maxKey);
ASSERT_EQ(MigrationReason::drain, reason);
}
-TEST(BalancerPolicy, BalancerRespectsZonePolicyBeforeImbalance) {
- // There is a large imbalance between shard0 and shard1, but the balancer must first fix the
- // chunks, which are on a wrong shard due to zone policy
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 6},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 2}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 100), "a")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId2, migrations[0].from);
- ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-}
-
-TEST(BalancerPolicy, BalancerFixesIncorrectZonesWithCrossShardViolationOfZones) {
- // The zone policy dictates that the same shard must donate and also receive chunks. The test
- // validates that the same shard is not used as a donor and recipient as part of the same round.
- auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, 5, false, {"b"}, emptyShardVersion), 3}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "b")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 8), kMaxBSONKey, "a")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-}
-
-TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedCluster) {
- // Chunks are balanced across shards, but there are wrong zones, which need to be fixed
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 3}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 10), "a")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId2, migrations[0].from);
- ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-}
-
TEST(BalancerPolicy, BalancerZoneAlreadyBalanced) {
// Chunks are balanced across shards for the zone.
- auto cluster =
- generateCluster({{ShardStatistics(kShardId0, 3, false, {"a"}, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 2, false, {"a"}, emptyShardVersion), 2}});
+ auto cluster = generateCluster({
+ ShardStatistics(kShardId0,
+ 3 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"a"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ {"a"},
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ });
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, kMaxBSONKey, "a")));
ASSERT(balanceChunks(cluster.first, distribution, false, false).first.empty());
}
-TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleZones) {
- // shard0 has chunks [MinKey, 1), [1, 2), [2, 3), [3, 4), [4, 5), so two chunks each
- // for zones "b" and "c". So [1, 2) is expected to be moved to shard1 in round 1.
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
- {ShardStatistics(kShardId1, 1, false, {"b"}, emptyShardVersion), 1},
- {ShardStatistics(kShardId2, 1, false, {"c"}, emptyShardVersion), 1}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "a")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 3), "b")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 3), BSON("x" << 5), "c")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(1U, migrations.size());
-
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), *migrations[0].maxKey);
-}
-
-TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleZonesSkipZoneWithShardInUse) {
- // shard0 has chunks [MinKey, 1), [1, 2), [2, 3), [3, 4), [4, 5), so two chunks each
- // for zones "b" and "c". So [3, 4) is expected to be moved to shard2 because shard1 is
- // in use.
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
- {ShardStatistics(kShardId1, 1, false, {"b"}, emptyShardVersion), 1},
- {ShardStatistics(kShardId2, 1, false, {"c"}, emptyShardVersion), 1}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "a")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 3), "b")));
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 3), BSON("x" << 5), "c")));
-
- auto availableShards = getAllShardIds(cluster.first);
- availableShards.erase(kShardId1);
- const auto [migrations, reason] = BalancerPolicy::balance(
- cluster.first, distribution, boost::none /* collDataSizeInfo */, &availableShards, false);
- ASSERT_EQ(1U, migrations.size());
-
- ASSERT_EQ(kShardId0, migrations[0].from);
- ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][3].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][3].getMax(), *migrations[0].maxKey);
-}
-
-TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedClusterParallel) {
- // Chunks are balanced across shards, but there are wrong zones, which need to be fixed
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 3},
- {ShardStatistics(kShardId3, 5, false, emptyZoneSet, emptyShardVersion), 3}});
-
- DistributionStatus distribution(kNamespace, cluster.second);
- ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 20), "a")));
-
- const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false);
- ASSERT_EQ(2U, migrations.size());
-
- ASSERT_EQ(kShardId2, migrations[0].from);
- ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[0].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-
- ASSERT_EQ(kShardId3, migrations[1].from);
- ASSERT_EQ(kShardId1, migrations[1].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId3][0].getMin(), migrations[1].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId3][0].getMax(), *migrations[1].maxKey);
- ASSERT_EQ(MigrationReason::zoneViolation, reason);
-}
-
TEST(BalancerPolicy, BalancerHandlesNoShardsWithZone) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 5, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, 5, false, emptyZoneSet, emptyShardVersion), 2}});
+ auto cluster =
+ generateCluster({ShardStatistics(kShardId0,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t()),
+ ShardStatistics(kShardId1,
+ 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
+ false,
+ emptyZoneSet,
+ emptyShardVersion,
+ ShardStatistics::use_bytes_t())});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(
diff --git a/src/mongo/s/sharding_feature_flags.idl b/src/mongo/s/sharding_feature_flags.idl
index b2fe760efa9..920742a33c6 100644
--- a/src/mongo/s/sharding_feature_flags.idl
+++ b/src/mongo/s/sharding_feature_flags.idl
@@ -37,11 +37,6 @@ feature_flags:
cpp_varname: feature_flags::gNoMoreAutoSplitter
default: true
version: 6.0 # Starting from v6.0.3
- featureFlagBalanceAccordingToDataSize:
- description: Balancer taking decisions based on the data size if enabled, based on number of chunks if disabled
- cpp_varname: feature_flags::gBalanceAccordingToDataSize
- default: true
- version: 6.0 # Starting from v6.0.3
featureFlagGlobalIndexesShardingCatalog:
description: "Feature flag for enabling sharding catalog features for global indexes"
cpp_varname: feature_flags::gGlobalIndexesShardingCatalog