summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Polato <paolo.polato@mongodb.com>2022-07-07 07:22:30 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-07-07 08:26:01 +0000
commit95c2f2329de7f9632bc9bc13e05aaeb53cc8e021 (patch)
tree836b09a035f0d7aea4008386997f912691ffa4c9
parent3ac99737a079195d9f2079bc09ab065500647393 (diff)
downloadmongo-95c2f2329de7f9632bc9bc13e05aaeb53cc8e021.tar.gz
SERVER-66239 Replace mentions to shard tags with shard zones in Balancer source code
-rw-r--r--src/mongo/db/s/balancer/balance_stats.cpp4
-rw-r--r--src/mongo/db/s/balancer/balance_stats_test.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp2
-rw-r--r--src/mongo/db/s/balancer/balancer.h2
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy.h2
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp77
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h4
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp47
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp5
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp214
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.h59
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp222
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics.cpp23
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics.h13
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics_impl.cpp8
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics_test.cpp8
-rw-r--r--src/mongo/db/s/balancer/migration_test_fixture.cpp24
-rw-r--r--src/mongo/db/s/balancer/migration_test_fixture.h6
18 files changed, 328 insertions, 396 deletions
diff --git a/src/mongo/db/s/balancer/balance_stats.cpp b/src/mongo/db/s/balancer/balance_stats.cpp
index 7c44fc66037..aa644b3f929 100644
--- a/src/mongo/db/s/balancer/balance_stats.cpp
+++ b/src/mongo/db/s/balancer/balance_stats.cpp
@@ -52,8 +52,8 @@ int64_t getMaxChunkImbalanceCount(const ChunkManager& routingInfo,
for (const auto& shard : allShards) {
chunkDistributionPerZone[""][shard.getName()] = 0;
- for (const auto& tag : shard.getTags()) {
- chunkDistributionPerZone[tag][shard.getName()] = 0;
+ for (const auto& zone : shard.getTags()) {
+ chunkDistributionPerZone[zone][shard.getName()] = 0;
}
}
diff --git a/src/mongo/db/s/balancer/balance_stats_test.cpp b/src/mongo/db/s/balancer/balance_stats_test.cpp
index aa7b056ae34..1b1222e9b92 100644
--- a/src/mongo/db/s/balancer/balance_stats_test.cpp
+++ b/src/mongo/db/s/balancer/balance_stats_test.cpp
@@ -44,8 +44,8 @@ public:
return ChunkType(_uuid, ChunkRange(minKey, maxKey), _nextVersion, shard);
}
- ShardType makeShard(const std::string& name, std::vector<std::string> tags = {}) {
- return ShardType(name, name, tags);
+ ShardType makeShard(const std::string& name, std::vector<std::string> zones = {}) {
+ return ShardType(name, name, zones);
}
ChunkManager makeRoutingInfo(const KeyPattern& shardKeyPattern,
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index b6081dd3982..611de7755b9 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -780,7 +780,7 @@ void Balancer::_mainThread() {
"Failed to split chunks",
"error"_attr = status);
} else {
- LOGV2_DEBUG(21861, 1, "Done enforcing tag range boundaries.");
+ LOGV2_DEBUG(21861, 1, "Done enforcing zone range boundaries.");
}
stdx::unordered_set<ShardId> usedShards;
diff --git a/src/mongo/db/s/balancer/balancer.h b/src/mongo/db/s/balancer/balancer.h
index be31a053d0a..4e38e299c23 100644
--- a/src/mongo/db/s/balancer/balancer.h
+++ b/src/mongo/db/s/balancer/balancer.h
@@ -245,7 +245,7 @@ private:
* calculates split points that evenly partition the key space into N ranges (where N is
* minNumChunksForSessionsCollection rounded up the next power of 2), and splits any chunks that
* straddle those split points. If the collection is any other collection, splits any chunks
- * that straddle tag boundaries.
+ * that straddle zone boundaries.
*/
Status _splitChunksIfNeeded(OperationContext* opCtx);
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
index 5aed229c202..3eb70bcb00b 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
@@ -59,7 +59,7 @@ public:
/**
* Potentially blocking method, which gives out a set of chunks, which need to be split because
* they violate the policy for some reason. The reason is decided by the policy and may include
- * chunk is too big or chunk straddles a tag range.
+ * chunk is too big or chunk straddles a zone range.
*/
virtual StatusWith<SplitInfoVector> selectChunksToSplit(OperationContext* opCtx) = 0;
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index b92f93cfc04..208f96a13ef 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -45,7 +45,6 @@
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
-#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/get_stats_for_balancing_gen.h"
@@ -90,16 +89,16 @@ StatusWith<DistributionStatus> createCollectionDistributionStatus(
return true;
});
- DistributionStatus distribution(nss, std::move(shardToChunksMap));
-
const auto& keyPattern = chunkMgr.getShardKeyPattern().getKeyPattern();
- // Cache the collection tags
- auto status = ZoneInfo::addTagsFromCatalog(opCtx, nss, keyPattern, distribution.zoneInfo());
- if (!status.isOK()) {
- return status;
+ // Cache the collection zones
+ auto swZoneInfo = ZoneInfo::getZonesForCollection(opCtx, nss, keyPattern);
+ if (!swZoneInfo.isOK()) {
+ return swZoneInfo.getStatus();
}
+ DistributionStatus distribution(nss, std::move(shardToChunksMap), swZoneInfo.getValue());
+
return {std::move(distribution)};
}
@@ -244,36 +243,36 @@ private:
};
/**
- * Populates splitCandidates with chunk and splitPoint pairs for chunks that violate tag
+ * Populates splitCandidates with chunk and splitPoint pairs for chunks that violate zone
* range boundaries.
*/
-void getSplitCandidatesToEnforceTagRanges(const ChunkManager& cm,
- const DistributionStatus& distribution,
- SplitCandidatesBuffer* splitCandidates) {
+void getSplitCandidatesToEnforceZoneRanges(const ChunkManager& cm,
+ const DistributionStatus& distribution,
+ SplitCandidatesBuffer* splitCandidates) {
const auto& globalMax = cm.getShardKeyPattern().getKeyPattern().globalMax();
- // For each tag range, find chunks that need to be split.
- for (const auto& tagRangeEntry : distribution.tagRanges()) {
- const auto& tagRange = tagRangeEntry.second;
+ // For each zone range, find chunks that need to be split.
+ for (const auto& zoneRangeEntry : distribution.zoneRanges()) {
+ const auto& zoneRange = zoneRangeEntry.second;
- const auto chunkAtZoneMin = cm.findIntersectingChunkWithSimpleCollation(tagRange.min);
- invariant(chunkAtZoneMin.getMax().woCompare(tagRange.min) > 0);
+ const auto chunkAtZoneMin = cm.findIntersectingChunkWithSimpleCollation(zoneRange.min);
+ invariant(chunkAtZoneMin.getMax().woCompare(zoneRange.min) > 0);
- if (chunkAtZoneMin.getMin().woCompare(tagRange.min)) {
- splitCandidates->addSplitPoint(chunkAtZoneMin, tagRange.min);
+ if (chunkAtZoneMin.getMin().woCompare(zoneRange.min)) {
+ splitCandidates->addSplitPoint(chunkAtZoneMin, zoneRange.min);
}
// The global max key can never fall in the middle of a chunk.
- if (!tagRange.max.woCompare(globalMax))
+ if (!zoneRange.max.woCompare(globalMax))
continue;
- const auto chunkAtZoneMax = cm.findIntersectingChunkWithSimpleCollation(tagRange.max);
+ const auto chunkAtZoneMax = cm.findIntersectingChunkWithSimpleCollation(zoneRange.max);
// We need to check that both the chunk's minKey does not match the zone's max and also that
// the max is not equal, which would only happen in the case of the zone ending in MaxKey.
- if (chunkAtZoneMax.getMin().woCompare(tagRange.max) &&
- chunkAtZoneMax.getMax().woCompare(tagRange.max)) {
- splitCandidates->addSplitPoint(chunkAtZoneMax, tagRange.max);
+ if (chunkAtZoneMax.getMin().woCompare(zoneRange.max) &&
+ chunkAtZoneMax.getMax().woCompare(zoneRange.max)) {
+ splitCandidates->addSplitPoint(chunkAtZoneMax, zoneRange.max);
}
}
}
@@ -374,8 +373,8 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToSpli
} else {
LOGV2_WARNING(
21852,
- "Unable to enforce tag range policy for collection {namespace}: {error}",
- "Unable to enforce tag range policy for collection",
+ "Unable to enforce zone range policy for collection {namespace}: {error}",
+ "Unable to enforce zone range policy for collection",
"namespace"_attr = nss.ns(),
"error"_attr = candidatesStatus.getStatus());
}
@@ -592,7 +591,7 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* opCt
}
return BalancerPolicy::isShardSuitableReceiver(*newShardIterator,
- distribution.getTagForChunk(chunk));
+ distribution.getZoneForChunk(chunk));
}
StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidatesForCollection(
@@ -616,15 +615,15 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidate
SplitCandidatesBuffer splitCandidates(nss, cm.getVersion());
if (nss == NamespaceString::kLogicalSessionsNamespace) {
- if (!distribution.tags().empty()) {
+ if (!distribution.zones().empty()) {
LOGV2_WARNING(4562401,
"Ignoring zones for the sessions collection",
- "tags"_attr = distribution.tags());
+ "zones"_attr = distribution.zones());
}
getSplitCandidatesForSessionsCollection(opCtx, cm, &splitCandidates);
} else {
- getSplitCandidatesToEnforceTagRanges(cm, distribution, &splitCandidates);
+ getSplitCandidatesToEnforceZoneRanges(cm, distribution, &splitCandidates);
}
return splitCandidates.done();
@@ -654,15 +653,15 @@ BalancerChunkSelectionPolicyImpl::_getMigrateCandidatesForCollection(
const DistributionStatus& distribution = collInfoStatus.getValue();
- for (const auto& tagRangeEntry : distribution.tagRanges()) {
- const auto& tagRange = tagRangeEntry.second;
+ for (const auto& zoneRangeEntry : distribution.zoneRanges()) {
+ const auto& zoneRange = zoneRangeEntry.second;
- const auto chunkAtZoneMin = cm.findIntersectingChunkWithSimpleCollation(tagRange.min);
+ const auto chunkAtZoneMin = cm.findIntersectingChunkWithSimpleCollation(zoneRange.min);
- if (chunkAtZoneMin.getMin().woCompare(tagRange.min)) {
+ if (chunkAtZoneMin.getMin().woCompare(zoneRange.min)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries " << tagRange.toString()
+ << "Zone boundaries " << zoneRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMin.getMin(), chunkAtZoneMin.getMax()).toString()
<< ". Balancing for collection " << nss.ns()
@@ -670,18 +669,18 @@ BalancerChunkSelectionPolicyImpl::_getMigrateCandidatesForCollection(
}
// The global max key can never fall in the middle of a chunk
- if (!tagRange.max.woCompare(shardKeyPattern.globalMax()))
+ if (!zoneRange.max.woCompare(shardKeyPattern.globalMax()))
continue;
- const auto chunkAtZoneMax = cm.findIntersectingChunkWithSimpleCollation(tagRange.max);
+ const auto chunkAtZoneMax = cm.findIntersectingChunkWithSimpleCollation(zoneRange.max);
// We need to check that both the chunk's minKey does not match the zone's max and also that
// the max is not equal, which would only happen in the case of the zone ending in MaxKey.
- if (chunkAtZoneMax.getMin().woCompare(tagRange.max) &&
- chunkAtZoneMax.getMax().woCompare(tagRange.max)) {
+ if (chunkAtZoneMax.getMin().woCompare(zoneRange.max) &&
+ chunkAtZoneMax.getMax().woCompare(zoneRange.max)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries " << tagRange.toString()
+ << "Zone boundaries " << zoneRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMax.getMin(), chunkAtZoneMax.getMax()).toString()
<< ". Balancing for collection " << nss.ns()
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
index adb3314aa12..1113e3d2fd0 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
@@ -61,8 +61,8 @@ public:
private:
/**
- * Synchronous method, which iterates the collection's chunks and uses the tags information to
- * figure out whether some of them validate the tag range boundaries and need to be split.
+ * Synchronous method, which iterates the collection's chunks and uses the zones information to
+ * figure out whether some of them validate the zone range boundaries and need to be split.
*/
StatusWith<SplitInfoVector> _getSplitCandidatesForCollection(
OperationContext* opCtx,
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
index 8b50d3d002f..c04d24e5bc2 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -100,16 +100,17 @@ protected:
}
/**
- * Returns a new BSON object with the tags appended.
+ * Returns a new BSON object with the zone encoded using the legacy field "tags"
+ * (to mimic the expected schema of config.shards)
*/
- BSONObj appendTags(const BSONObj shardBSON, std::vector<std::string> tags) {
+ BSONObj appendZones(const BSONObj shardBSON, std::vector<std::string> zones) {
BSONObjBuilder appendedShardBSON(shardBSON);
- BSONArrayBuilder tagsBuilder;
- for (auto& tag : tags) {
- tagsBuilder.append(tag);
+ BSONArrayBuilder zonesBuilder;
+ for (auto& zone : zones) {
+ zonesBuilder.append(zone);
}
- tagsBuilder.done();
- appendedShardBSON.append("tags", tagsBuilder.arr());
+ zonesBuilder.done();
+ appendedShardBSON.append("tags", zonesBuilder.arr());
return appendedShardBSON.obj();
}
@@ -118,7 +119,7 @@ protected:
std::unique_ptr<BalancerChunkSelectionPolicy> _chunkSelectionPolicy;
};
-TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
+TEST_F(BalancerChunkSelectionTest, ZoneRangesOverlap) {
// Set up two shards in the metadata.
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
NamespaceString::kConfigsvrShardsNamespace,
@@ -140,9 +141,9 @@ TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
setUpChunk(collUUID, kKeyPattern.globalMin(), kKeyPattern.globalMax(), kShardId0, version);
auto assertRangeOverlapConflictWhenMoveChunk =
- [this, &chunk](const StringMap<ChunkRange>& tagChunkRanges) {
+ [this, &chunk](const StringMap<ChunkRange>& zoneChunkRanges) {
// Set up two zones whose ranges overlap.
- setUpTags(kNamespace, tagChunkRanges);
+ setUpZones(kNamespace, zoneChunkRanges);
auto future = launchAsync([this, &chunk] {
ThreadClient tc(getServiceContext());
@@ -161,7 +162,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
expectGetStatsCommands(2);
future.default_timed_get();
- removeAllTags(kNamespace);
+ removeAllZones(kNamespace);
};
assertRangeOverlapConflictWhenMoveChunk(
@@ -175,17 +176,17 @@ TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
{"B", {BSON(kPattern << -15), kKeyPattern.globalMax()}}});
}
-TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
+TEST_F(BalancerChunkSelectionTest, ZoneRangeMaxNotAlignedWithChunkMax) {
RAIIServerParameterControllerForTest featureFlagBalanceAccordingToDataSize{
"featureFlagBalanceAccordingToDataSize", false};
// Set up two shards in the metadata.
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
NamespaceString::kConfigsvrShardsNamespace,
- appendTags(kShard0, {"A"}),
+ appendZones(kShard0, {"A"}),
kMajorityWriteConcern));
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
NamespaceString::kConfigsvrShardsNamespace,
- appendTags(kShard1, {"A"}),
+ appendZones(kShard1, {"A"}),
kMajorityWriteConcern));
// Set up a database and a sharded collection in the metadata.
@@ -195,7 +196,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
setUpCollection(kNamespace, collUUID, version);
// Set up the zone.
- setUpTags(kNamespace, {{"A", {kKeyPattern.globalMin(), BSON(kPattern << -10)}}});
+ setUpZones(kNamespace, {{"A", {kKeyPattern.globalMin(), BSON(kPattern << -10)}}});
auto assertErrorWhenMoveChunk =
[this, &version, &collUUID](const std::vector<ChunkRange>& chunkRanges) {
@@ -237,14 +238,14 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
}
TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeAutoSplitted) {
- // Set up two shards in the metadata, each one with its own tag
+ // Set up two shards in the metadata, each one with its own zone
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
NamespaceString::kConfigsvrShardsNamespace,
- appendTags(kShard0, {"A"}),
+ appendZones(kShard0, {"A"}),
kMajorityWriteConcern));
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
NamespaceString::kConfigsvrShardsNamespace,
- appendTags(kShard1, {"B"}),
+ appendZones(kShard1, {"B"}),
kMajorityWriteConcern));
// Set up a database and a sharded collection in the metadata.
@@ -257,11 +258,11 @@ TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeAutoSplitted
setUpCollection(kNamespace, collUUID, version, std::move(tsFields));
// Set up two zones
- setUpTags(kNamespace,
- {
- {"A", {kKeyPattern.globalMin(), BSON(kPattern << 0)}},
- {"B", {BSON(kPattern << 0), kKeyPattern.globalMax()}},
- });
+ setUpZones(kNamespace,
+ {
+ {"A", {kKeyPattern.globalMin(), BSON(kPattern << 0)}},
+ {"B", {BSON(kPattern << 0), kKeyPattern.globalMax()}},
+ });
// Create just one chunk covering the whole space
setUpChunk(collUUID, kKeyPattern.globalMin(), kKeyPattern.globalMax(), kShardId0, version);
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
index 89f45fd7644..1503fa49b9e 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
@@ -88,9 +88,8 @@ uint64_t getCollectionMaxChunkSizeBytes(OperationContext* opCtx, const Collectio
}
ZoneInfo getCollectionZones(OperationContext* opCtx, const CollectionType& coll) {
- ZoneInfo zones;
- uassertStatusOK(
- ZoneInfo::addTagsFromCatalog(opCtx, coll.getNss(), coll.getKeyPattern(), zones));
+ auto zones = uassertStatusOK(
+ ZoneInfo::getZonesForCollection(opCtx, coll.getNss(), coll.getKeyPattern()));
return zones;
}
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index 4d5cbf19177..617cbf7a27a 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -64,8 +64,12 @@ const size_t kDefaultImbalanceThreshold = 1;
} // namespace
-DistributionStatus::DistributionStatus(NamespaceString nss, ShardToChunksMap shardToChunksMap)
- : _nss(std::move(nss)), _shardChunks(std::move(shardToChunksMap)) {}
+DistributionStatus::DistributionStatus(NamespaceString nss,
+ ShardToChunksMap shardToChunksMap,
+ ZoneInfo zoneInfo)
+ : _nss(std::move(nss)),
+ _shardChunks(std::move(shardToChunksMap)),
+ _zoneInfo(std::move(zoneInfo)) {}
size_t DistributionStatus::totalChunks() const {
size_t total = 0;
@@ -77,11 +81,11 @@ size_t DistributionStatus::totalChunks() const {
return total;
}
-size_t DistributionStatus::totalChunksWithTag(const std::string& tag) const {
+size_t DistributionStatus::totalChunksInZone(const std::string& zone) const {
size_t total = 0;
for (const auto& shardChunk : _shardChunks) {
- total += numberOfChunksInShardWithTag(shardChunk.first, tag);
+ total += numberOfChunksInShardWithZone(shardChunk.first, zone);
}
return total;
@@ -92,14 +96,14 @@ size_t DistributionStatus::numberOfChunksInShard(const ShardId& shardId) const {
return shardChunks.size();
}
-size_t DistributionStatus::numberOfChunksInShardWithTag(const ShardId& shardId,
- const string& tag) const {
+size_t DistributionStatus::numberOfChunksInShardWithZone(const ShardId& shardId,
+ const string& zone) const {
const auto& shardChunks = getChunks(shardId);
size_t total = 0;
for (const auto& chunk : shardChunks) {
- if (tag == getTagForChunk(chunk)) {
+ if (zone == getZoneForChunk(chunk)) {
total++;
}
}
@@ -118,7 +122,7 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
return _zoneInfo.addRangeToZone(range);
}
-string DistributionStatus::getTagForChunk(const ChunkType& chunk) const {
+string DistributionStatus::getZoneForChunk(const ChunkType& chunk) const {
return _zoneInfo.getZoneForChunk(chunk.getRange());
}
@@ -170,7 +174,7 @@ string ZoneInfo::getZoneForChunk(const ChunkRange& chunk) const {
const auto maxIntersect = _zoneRanges.lower_bound(chunk.getMax());
// We should never have a partial overlap with a chunk range. If it happens, treat it as if this
- // chunk doesn't belong to a tag
+ // chunk doesn't belong to a zone
if (minIntersect != maxIntersect) {
return "";
}
@@ -191,81 +195,35 @@ string ZoneInfo::getZoneForChunk(const ChunkRange& chunk) const {
}
-/**
- * read all tags for collection via the catalog client and add to the zoneInfo
- */
-Status ZoneInfo::addTagsFromCatalog(OperationContext* opCtx,
- const NamespaceString& nss,
- const KeyPattern& keyPattern,
- ZoneInfo& chunkMgr) {
- const auto swCollectionTags =
+StatusWith<ZoneInfo> ZoneInfo::getZonesForCollection(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const KeyPattern& keyPattern) {
+ const auto swCollectionZones =
Grid::get(opCtx)->catalogClient()->getTagsForCollection(opCtx, nss);
- if (!swCollectionTags.isOK()) {
- return swCollectionTags.getStatus().withContext(
- str::stream() << "Unable to load tags for collection " << nss);
+ if (!swCollectionZones.isOK()) {
+ return swCollectionZones.getStatus().withContext(
+ str::stream() << "Unable to load zones for collection " << nss);
}
- const auto& collectionTags = swCollectionTags.getValue();
+ const auto& collectionZones = swCollectionZones.getValue();
- for (const auto& tag : collectionTags) {
+ ZoneInfo zoneInfo;
+
+ for (const auto& zone : collectionZones) {
auto status =
- chunkMgr.addRangeToZone(ZoneRange(keyPattern.extendRangeBound(tag.getMinKey(), false),
- keyPattern.extendRangeBound(tag.getMaxKey(), false),
- tag.getTag()));
+ zoneInfo.addRangeToZone(ZoneRange(keyPattern.extendRangeBound(zone.getMinKey(), false),
+ keyPattern.extendRangeBound(zone.getMaxKey(), false),
+ zone.getTag()));
if (!status.isOK()) {
return status;
}
}
- return Status::OK();
-}
-
-void DistributionStatus::report(BSONObjBuilder* builder) const {
- builder->append("ns", _nss.ns());
-
- // Report all shards
- BSONArrayBuilder shardArr(builder->subarrayStart("shards"));
- for (const auto& shardChunk : _shardChunks) {
- BSONObjBuilder shardEntry(shardArr.subobjStart());
- shardEntry.append("name", shardChunk.first.toString());
-
- BSONArrayBuilder chunkArr(shardEntry.subarrayStart("chunks"));
- for (const auto& chunk : shardChunk.second) {
- chunkArr.append(chunk.toConfigBSON());
- }
- chunkArr.doneFast();
-
- shardEntry.doneFast();
- }
- shardArr.doneFast();
-
- // Report all tags
- BSONArrayBuilder tagsArr(builder->subarrayStart("tags"));
- tagsArr.append(_zoneInfo.allZones());
- tagsArr.doneFast();
-
- // Report all tag ranges
- BSONArrayBuilder tagRangesArr(builder->subarrayStart("tagRanges"));
- for (const auto& tagRange : _zoneInfo.zoneRanges()) {
- BSONObjBuilder tagRangeEntry(tagRangesArr.subobjStart());
- tagRangeEntry.append("tag", tagRange.second.zone);
- tagRangeEntry.append("mapKey", tagRange.first);
- tagRangeEntry.append("min", tagRange.second.min);
- tagRangeEntry.append("max", tagRange.second.max);
- tagRangeEntry.doneFast();
- }
- tagRangesArr.doneFast();
-}
-
-string DistributionStatus::toString() const {
- BSONObjBuilder builder;
- report(&builder);
-
- return builder.obj().toString();
+ return zoneInfo;
}
Status BalancerPolicy::isShardSuitableReceiver(const ClusterStatistics::ShardStatistics& stat,
- const string& chunkTag) {
+ const string& chunkZone) {
if (stat.isSizeMaxed()) {
return {ErrorCodes::IllegalOperation,
str::stream() << stat.shardId << " has reached its maximum storage size."};
@@ -276,9 +234,9 @@ Status BalancerPolicy::isShardSuitableReceiver(const ClusterStatistics::ShardSta
str::stream() << stat.shardId << " is currently draining."};
}
- if (!chunkTag.empty() && !stat.shardTags.count(chunkTag)) {
+ if (!chunkZone.empty() && !stat.shardZones.count(chunkZone)) {
return {ErrorCodes::IllegalOperation,
- str::stream() << stat.shardId << " is not in the correct zone " << chunkTag};
+ str::stream() << stat.shardId << " is not in the correct zone " << chunkZone};
}
return Status::OK();
@@ -288,7 +246,7 @@ std::tuple<ShardId, int64_t> BalancerPolicy::_getLeastLoadedReceiverShard(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
- const string& tag,
+ const string& zone,
const stdx::unordered_set<ShardId>& excludedShards) {
ShardId best;
int64_t currentMin = numeric_limits<int64_t>::max();
@@ -299,7 +257,7 @@ std::tuple<ShardId, int64_t> BalancerPolicy::_getLeastLoadedReceiverShard(
if (excludedShards.count(stat.shardId))
continue;
- auto status = isShardSuitableReceiver(stat, tag);
+ auto status = isShardSuitableReceiver(stat, zone);
if (!status.isOK()) {
continue;
}
@@ -332,7 +290,7 @@ std::tuple<ShardId, int64_t> BalancerPolicy::_getMostOverloadedShard(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
- const string& chunkTag,
+ const string& chunkZone,
const stdx::unordered_set<ShardId>& excludedShards) {
ShardId worst;
long long currentMax = numeric_limits<long long>::min();
@@ -357,7 +315,7 @@ std::tuple<ShardId, int64_t> BalancerPolicy::_getMostOverloadedShard(
}
} else {
const unsigned shardChunkCount =
- distribution.numberOfChunksInShardWithTag(stat.shardId, chunkTag);
+ distribution.numberOfChunksInShardWithZone(stat.shardId, chunkZone);
if (shardChunkCount > currentMax) {
worst = stat.shardId;
currentMax = shardChunkCount;
@@ -470,7 +428,7 @@ MigrateInfosWithReason BalancerPolicy::balance(
continue;
// Now we know we need to move to chunks off this shard, but only if permitted by the
- // tags policy
+ // zones policy
unsigned numJumboChunks = 0;
// Since we have to move all chunks, lets just do in order
@@ -480,10 +438,10 @@ MigrateInfosWithReason BalancerPolicy::balance(
continue;
}
- const string tag = distribution.getTagForChunk(chunk);
+ const auto zone = distribution.getZoneForChunk(chunk);
const auto [to, _] = _getLeastLoadedReceiverShard(
- shardStats, distribution, collDataSizeInfo, tag, *usedShards);
+ shardStats, distribution, collDataSizeInfo, zone, *usedShards);
if (!to.isValid()) {
if (migrations.empty()) {
LOGV2_WARNING(21889,
@@ -518,7 +476,7 @@ MigrateInfosWithReason BalancerPolicy::balance(
}
// 2) Check for chunks, which are on the wrong shard and must be moved off of it
- if (!distribution.tags().empty()) {
+ if (!distribution.zones().empty()) {
for (const auto& stat : shardStats) {
if (usedShards->count(stat.shardId))
continue;
@@ -526,12 +484,12 @@ MigrateInfosWithReason BalancerPolicy::balance(
const vector<ChunkType>& chunks = distribution.getChunks(stat.shardId);
for (const auto& chunk : chunks) {
- const string tag = distribution.getTagForChunk(chunk);
+ const string zone = distribution.getZoneForChunk(chunk);
- if (tag.empty())
+ if (zone.empty())
continue;
- if (stat.shardTags.count(tag))
+ if (stat.shardZones.count(zone))
continue;
if (chunk.getJumbo()) {
@@ -540,13 +498,13 @@ MigrateInfosWithReason BalancerPolicy::balance(
"Chunk {chunk} violates zone {zone}, but it is jumbo and cannot be moved",
"Chunk violates zone, but it is jumbo and cannot be moved",
"chunk"_attr = redact(chunk.toString()),
- "zone"_attr = redact(tag));
+ "zone"_attr = redact(zone));
continue;
}
const auto [to, _] = _getLeastLoadedReceiverShard(
- shardStats, distribution, collDataSizeInfo, tag, *usedShards);
+ shardStats, distribution, collDataSizeInfo, zone, *usedShards);
if (!to.isValid()) {
if (migrations.empty()) {
LOGV2_WARNING(21892,
@@ -554,7 +512,7 @@ MigrateInfosWithReason BalancerPolicy::balance(
"recipient found",
"Chunk violates zone, but no appropriate recipient found",
"chunk"_attr = redact(chunk.toString()),
- "zone"_attr = redact(tag));
+ "zone"_attr = redact(zone));
}
continue;
}
@@ -575,24 +533,24 @@ MigrateInfosWithReason BalancerPolicy::balance(
}
}
- // 3) for each tag balance
+ // 3) for each zone balance
- vector<string> tagsPlusEmpty(distribution.tags().begin(), distribution.tags().end());
- tagsPlusEmpty.push_back("");
+ vector<string> zonesPlusEmpty(distribution.zones().begin(), distribution.zones().end());
+ zonesPlusEmpty.push_back("");
- for (const auto& tag : tagsPlusEmpty) {
- size_t totalNumberOfShardsWithTag = 0;
+ for (const auto& zone : zonesPlusEmpty) {
+ size_t totalNumberOfShardsWithZone = 0;
for (const auto& stat : shardStats) {
- if (tag.empty() || stat.shardTags.count(tag)) {
- totalNumberOfShardsWithTag++;
+ if (zone.empty() || stat.shardZones.count(zone)) {
+ totalNumberOfShardsWithZone++;
}
}
// Skip zones which have no shards assigned to them. This situation is not harmful, but
// should not be possible so warn the operator to correct it.
- if (totalNumberOfShardsWithTag == 0) {
- if (!tag.empty()) {
+ if (totalNumberOfShardsWithZone == 0) {
+ if (!zone.empty()) {
LOGV2_WARNING(
21893,
"Zone {zone} in collection {namespace} has no assigned shards and chunks "
@@ -601,7 +559,7 @@ MigrateInfosWithReason BalancerPolicy::balance(
"Zone in collection has no assigned shards and chunks which fall into it "
"cannot be balanced. This should be corrected by either assigning shards "
"to the zone or by deleting it.",
- "zone"_attr = redact(tag),
+ "zone"_attr = redact(zone),
"namespace"_attr = distribution.nss());
}
continue;
@@ -612,7 +570,7 @@ MigrateInfosWithReason BalancerPolicy::balance(
return _singleZoneBalanceBasedOnDataSize(shardStats,
distribution,
*collDataSizeInfo,
- tag,
+ zone,
&migrations,
usedShards,
forceJumbo ? ForceJumbo::kForceBalancer
@@ -621,8 +579,8 @@ MigrateInfosWithReason BalancerPolicy::balance(
return _singleZoneBalanceBasedOnChunks(shardStats,
distribution,
- tag,
- totalNumberOfShardsWithTag,
+ zone,
+ totalNumberOfShardsWithZone,
&migrations,
usedShards,
forceJumbo ? ForceJumbo::kForceBalancer
@@ -643,12 +601,12 @@ boost::optional<MigrateInfo> BalancerPolicy::balanceSingleChunk(
const ChunkType& chunk,
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution) {
- const string tag = distribution.getTagForChunk(chunk);
+ const string zone = distribution.getZoneForChunk(chunk);
const auto [newShardId, _] = _getLeastLoadedReceiverShard(shardStats,
distribution,
boost::none /* collDataSizeInfo */,
- tag,
+ zone,
stdx::unordered_set<ShardId>());
if (!newShardId.isValid() || newShardId == chunk.getShard()) {
return boost::optional<MigrateInfo>();
@@ -659,59 +617,59 @@ boost::optional<MigrateInfo> BalancerPolicy::balanceSingleChunk(
bool BalancerPolicy::_singleZoneBalanceBasedOnChunks(const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
- const string& tag,
- size_t totalNumberOfShardsWithTag,
+ const string& zone,
+ size_t totalNumberOfShardsWithZone,
vector<MigrateInfo>* migrations,
stdx::unordered_set<ShardId>* usedShards,
ForceJumbo forceJumbo) {
// Calculate the rounded optimal number of chunks per shard
- const size_t totalNumberOfChunksWithTag =
- (tag.empty() ? distribution.totalChunks() : distribution.totalChunksWithTag(tag));
- const size_t idealNumberOfChunksPerShardForTag =
- (size_t)std::roundf(totalNumberOfChunksWithTag / (float)totalNumberOfShardsWithTag);
+ const size_t totalNumberOfChunksInZone =
+ (zone.empty() ? distribution.totalChunks() : distribution.totalChunksInZone(zone));
+ const size_t idealNumberOfChunksPerShardForZone =
+ (size_t)std::roundf(totalNumberOfChunksInZone / (float)totalNumberOfShardsWithZone);
const auto [from, fromSize] =
- _getMostOverloadedShard(shardStats, distribution, boost::none, tag, *usedShards);
+ _getMostOverloadedShard(shardStats, distribution, boost::none, zone, *usedShards);
if (!from.isValid())
return false;
- const size_t max = distribution.numberOfChunksInShardWithTag(from, tag);
+ const size_t max = distribution.numberOfChunksInShardWithZone(from, zone);
// Do not use a shard if it already has less entries than the optimal per-shard chunk count
- if (max <= idealNumberOfChunksPerShardForTag)
+ if (max <= idealNumberOfChunksPerShardForZone)
return false;
const auto [to, toSize] =
- _getLeastLoadedReceiverShard(shardStats, distribution, boost::none, tag, *usedShards);
+ _getLeastLoadedReceiverShard(shardStats, distribution, boost::none, zone, *usedShards);
if (!to.isValid()) {
if (migrations->empty()) {
- LOGV2(21882, "No available shards to take chunks for zone", "zone"_attr = tag);
+ LOGV2(21882, "No available shards to take chunks for zone", "zone"_attr = zone);
}
return false;
}
- const size_t min = distribution.numberOfChunksInShardWithTag(to, tag);
+ const size_t min = distribution.numberOfChunksInShardWithZone(to, zone);
// Do not use a shard if it already has more entries than the optimal per-shard chunk count
- if (min >= idealNumberOfChunksPerShardForTag)
+ if (min >= idealNumberOfChunksPerShardForZone)
return false;
- const size_t imbalance = max - idealNumberOfChunksPerShardForTag;
+ const size_t imbalance = max - idealNumberOfChunksPerShardForZone;
LOGV2_DEBUG(
21883,
1,
"collection: {namespace}, zone: {zone}, donor: {fromShardId} chunks on "
" {fromShardChunkCount}, receiver: {toShardId} chunks on {toShardChunkCount}, "
- "ideal: {idealNumberOfChunksPerShardForTag}, threshold: {chunkCountImbalanceThreshold}",
+ "ideal: {idealNumberOfChunksPerShardForZone}, threshold: {chunkCountImbalanceThreshold}",
"Balancing single zone",
"namespace"_attr = distribution.nss().ns(),
- "zone"_attr = tag,
+ "zone"_attr = zone,
"fromShardId"_attr = from,
"fromShardChunkCount"_attr = max,
"toShardId"_attr = to,
"toShardChunkCount"_attr = min,
- "idealNumberOfChunksPerShardForTag"_attr = idealNumberOfChunksPerShardForTag,
+ "idealNumberOfChunksPerShardForZone"_attr = idealNumberOfChunksPerShardForZone,
"chunkCountImbalanceThreshold"_attr = kDefaultImbalanceThreshold);
// Check whether it is necessary to balance within this zone
@@ -723,7 +681,7 @@ bool BalancerPolicy::_singleZoneBalanceBasedOnChunks(const ShardStatisticsVector
unsigned numJumboChunks = 0;
for (const auto& chunk : chunks) {
- if (distribution.getTagForChunk(chunk) != tag)
+ if (distribution.getZoneForChunk(chunk) != zone)
continue;
if (chunk.getJumbo()) {
@@ -745,7 +703,7 @@ bool BalancerPolicy::_singleZoneBalanceBasedOnChunks(const ShardStatisticsVector
"Shard has only jumbo chunks for and cannot be balanced",
"shardId"_attr = from,
"namespace"_attr = distribution.nss().ns(),
- "zone"_attr = tag,
+ "zone"_attr = zone,
"numJumboChunks"_attr = numJumboChunks);
}
@@ -756,20 +714,20 @@ bool BalancerPolicy::_singleZoneBalanceBasedOnDataSize(
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
- const string& tag,
+ const string& zone,
vector<MigrateInfo>* migrations,
stdx::unordered_set<ShardId>* usedShards,
ForceJumbo forceJumbo) {
const auto [from, fromSize] =
- _getMostOverloadedShard(shardStats, distribution, collDataSizeInfo, tag, *usedShards);
+ _getMostOverloadedShard(shardStats, distribution, collDataSizeInfo, zone, *usedShards);
if (!from.isValid())
return false;
const auto [to, toSize] =
- _getLeastLoadedReceiverShard(shardStats, distribution, collDataSizeInfo, tag, *usedShards);
+ _getLeastLoadedReceiverShard(shardStats, distribution, collDataSizeInfo, zone, *usedShards);
if (!to.isValid()) {
if (migrations->empty()) {
- LOGV2(6581600, "No available shards to take chunks for zone", "zone"_attr = tag);
+ LOGV2(6581600, "No available shards to take chunks for zone", "zone"_attr = zone);
}
return false;
}
@@ -782,7 +740,7 @@ bool BalancerPolicy::_singleZoneBalanceBasedOnDataSize(
1,
"Balancing single zone",
"namespace"_attr = distribution.nss().ns(),
- "zone"_attr = tag,
+ "zone"_attr = zone,
"fromShardId"_attr = from,
"fromShardDataSize"_attr = fromSize,
"toShardId"_attr = to,
@@ -799,7 +757,7 @@ bool BalancerPolicy::_singleZoneBalanceBasedOnDataSize(
unsigned numJumboChunks = 0;
for (const auto& chunk : chunks) {
- if (distribution.getTagForChunk(chunk) != tag)
+ if (distribution.getZoneForChunk(chunk) != zone)
continue;
if (chunk.getJumbo()) {
@@ -826,7 +784,7 @@ bool BalancerPolicy::_singleZoneBalanceBasedOnDataSize(
"Shard has only jumbo chunks for this collection and cannot be balanced",
"namespace"_attr = distribution.nss().ns(),
"shardId"_attr = from,
- "zone"_attr = tag,
+ "zone"_attr = zone,
"numJumboChunks"_attr = numJumboChunks);
}
diff --git a/src/mongo/db/s/balancer/balancer_policy.h b/src/mongo/db/s/balancer/balancer_policy.h
index fe7d893949e..5759a319ac9 100644
--- a/src/mongo/db/s/balancer/balancer_policy.h
+++ b/src/mongo/db/s/balancer/balancer_policy.h
@@ -258,12 +258,11 @@ public:
}
/**
- * read all tags for collection via the catalog client and add to the zoneInfo
+ * Retrieves the collection zones from the catalog client
*/
- static Status addTagsFromCatalog(OperationContext* opCtx,
- const NamespaceString& nss,
- const KeyPattern& keyPattern,
- ZoneInfo& zoneInfo);
+ static StatusWith<ZoneInfo> getZonesForCollection(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const KeyPattern& keyPattern);
private:
// Map of zone max key to the zone description
@@ -285,7 +284,9 @@ class DistributionStatus final {
DistributionStatus& operator=(const DistributionStatus&) = delete;
public:
- DistributionStatus(NamespaceString nss, ShardToChunksMap shardToChunksMap);
+ DistributionStatus(NamespaceString nss,
+ ShardToChunksMap shardToChunksMap,
+ ZoneInfo zoneInfo = {});
DistributionStatus(DistributionStatus&&) = default;
~DistributionStatus() {}
@@ -311,7 +312,7 @@ public:
* Returns the total number of chunks across all shards, which fall into the specified zone's
* range.
*/
- size_t totalChunksWithTag(const std::string& tag) const;
+ size_t totalChunksInZone(const std::string& zone) const;
/**
* Returns number of chunks in the specified shard.
@@ -319,9 +320,9 @@ public:
size_t numberOfChunksInShard(const ShardId& shardId) const;
/**
- * Returns number of chunks in the specified shard, which have the given tag.
+ * Returns number of chunks in the specified shard, which also belong to the give zone.
*/
- size_t numberOfChunksInShardWithTag(const ShardId& shardId, const std::string& tag) const;
+ size_t numberOfChunksInShardWithZone(const ShardId& shardId, const std::string& zone) const;
/**
* Returns all chunks for the specified shard.
@@ -329,16 +330,16 @@ public:
const std::vector<ChunkType>& getChunks(const ShardId& shardId) const;
/**
- * Returns all tag ranges defined for the collection.
+ * Returns all zone ranges defined for the collection.
*/
- const BSONObjIndexedMap<ZoneRange>& tagRanges() const {
+ const BSONObjIndexedMap<ZoneRange>& zoneRanges() const {
return _zoneInfo.zoneRanges();
}
/**
- * Returns all tags defined for the collection.
+ * Returns all zones defined for the collection.
*/
- const std::set<std::string>& tags() const {
+ const std::set<std::string>& zones() const {
return _zoneInfo.allZones();
}
@@ -350,16 +351,10 @@ public:
}
/**
- * Using the set of tags defined for the collection, returns what tag corresponds to the
- * specified chunk. If the chunk doesn't fall into any tag returns the empty string.
+ * Using the set of zones defined for the collection, returns what zone corresponds to the
+ * specified chunk. If the chunk doesn't fall into any zone returns the empty string.
*/
- std::string getTagForChunk(const ChunkType& chunk) const;
-
- /**
- * Returns a BSON/string representation of this distribution status.
- */
- void report(BSONObjBuilder* builder) const;
- std::string toString() const;
+ std::string getZoneForChunk(const ChunkType& chunk) const;
private:
// Namespace for which this distribution applies
@@ -376,11 +371,11 @@ class BalancerPolicy {
public:
/**
* Determines whether a shard with the specified utilization statistics would be able to accept
- * a chunk with the specified tag. According to the policy a shard cannot accept chunks if its
- * size is maxed out and if the chunk's tag conflicts with the tag of the shard.
+ * a chunk with the specified zone. According to the policy a shard cannot accept chunks if its
+ * size is maxed out and if the chunk's zone conflicts with the zone of the shard.
*/
static Status isShardSuitableReceiver(const ClusterStatistics::ShardStatistics& stat,
- const std::string& chunkTag);
+ const std::string& chunkZone);
/**
* Returns a suggested set of chunks or ranges to move within a collection's shards, given the
@@ -414,7 +409,7 @@ public:
private:
/*
- * Only considers shards with the specified tag, all shards in case the tag is empty.
+ * Only considers shards with the specified zone, all shards in case the zone is empty.
*
* Returns a tuple <ShardID, number of chunks> referring the shard with less chunks.
*
@@ -425,11 +420,11 @@ private:
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
- const std::string& tag,
+ const std::string& zone,
const stdx::unordered_set<ShardId>& excludedShards);
/**
- * Only considers shards with the specified tag, all shards in case the tag is empty.
+ * Only considers shards with the specified zone, all shards in case the zone is empty.
*
* If balancing based on number of chunks:
* - Returns a tuple <ShardID, number of chunks> referring the shard with more chunks.
@@ -441,7 +436,7 @@ private:
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
- const std::string& chunkTag,
+ const std::string& zone,
const stdx::unordered_set<ShardId>& excludedShards);
/**
@@ -454,8 +449,8 @@ private:
*/
static bool _singleZoneBalanceBasedOnChunks(const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
- const std::string& tag,
- size_t totalNumberOfShardsWithTag,
+ const std::string& zone,
+ size_t totalNumberOfShardsWithZone,
std::vector<MigrateInfo>* migrations,
stdx::unordered_set<ShardId>* usedShards,
ForceJumbo forceJumbo);
@@ -472,7 +467,7 @@ private:
const ShardStatisticsVector& shardStats,
const DistributionStatus& distribution,
const CollectionDataSizeInfoForBalancing& collDataSizeInfo,
- const std::string& tag,
+ const std::string& zone,
std::vector<MigrateInfo>* migrations,
stdx::unordered_set<ShardId>* usedShards,
ForceJumbo forceJumbo);
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index be3532fee56..06ba27f029a 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -46,7 +46,7 @@ using std::vector;
using ShardStatistics = ClusterStatistics::ShardStatistics;
-const auto emptyTagSet = std::set<std::string>();
+const auto emptyZoneSet = std::set<std::string>();
const std::string emptyShardVersion = "";
const auto kShardId0 = ShardId("shard0");
const auto kShardId1 = ShardId("shard1");
@@ -121,9 +121,9 @@ MigrateInfosWithReason balanceChunks(const ShardStatisticsVector& shardStats,
TEST(BalancerPolicy, Basic) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, kNoMaxSize, 3, false, emptyTagSet, emptyShardVersion), 3}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId2, kNoMaxSize, 3, false, emptyZoneSet, emptyShardVersion), 3}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -137,9 +137,9 @@ TEST(BalancerPolicy, Basic) {
TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 1, false, emptyTagSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 2, false, emptyTagSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -153,8 +153,8 @@ TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
TEST(BalancerPolicy, SingleChunkShouldNotMove) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 1, false, emptyTagSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
{
auto [migrations, reason] = balanceChunks(
cluster.first, DistributionStatus(kNamespace, cluster.second), true, false);
@@ -171,10 +171,10 @@ TEST(BalancerPolicy, SingleChunkShouldNotMove) {
TEST(BalancerPolicy, BalanceThresholdObeyed) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyTagSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 2, false, emptyTagSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, kNoMaxSize, 1, false, emptyTagSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId3, kNoMaxSize, 1, false, emptyTagSet, emptyShardVersion), 1}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId2, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId3, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1}});
{
auto [migrations, reason] = balanceChunks(
@@ -192,10 +192,10 @@ TEST(BalancerPolicy, BalanceThresholdObeyed) {
TEST(BalancerPolicy, ParallelBalancing) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -215,12 +215,12 @@ TEST(BalancerPolicy, ParallelBalancing) {
TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 100, false, emptyTagSet, emptyShardVersion), 100},
- {ShardStatistics(kShardId1, kNoMaxSize, 90, false, emptyTagSet, emptyShardVersion), 90},
- {ShardStatistics(kShardId2, kNoMaxSize, 90, false, emptyTagSet, emptyShardVersion), 90},
- {ShardStatistics(kShardId3, kNoMaxSize, 80, false, emptyTagSet, emptyShardVersion), 80},
- {ShardStatistics(kShardId4, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId5, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 100, false, emptyZoneSet, emptyShardVersion), 100},
+ {ShardStatistics(kShardId1, kNoMaxSize, 90, false, emptyZoneSet, emptyShardVersion), 90},
+ {ShardStatistics(kShardId2, kNoMaxSize, 90, false, emptyZoneSet, emptyShardVersion), 90},
+ {ShardStatistics(kShardId3, kNoMaxSize, 80, false, emptyZoneSet, emptyShardVersion), 80},
+ {ShardStatistics(kShardId4, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId5, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -240,10 +240,10 @@ TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
TEST(BalancerPolicy, ParallelBalancingDoesNotMoveChunksFromShardsBelowOptimal) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 100, false, emptyTagSet, emptyShardVersion), 100},
- {ShardStatistics(kShardId1, kNoMaxSize, 30, false, emptyTagSet, emptyShardVersion), 30},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 100, false, emptyZoneSet, emptyShardVersion), 100},
+ {ShardStatistics(kShardId1, kNoMaxSize, 30, false, emptyZoneSet, emptyShardVersion), 30},
+ {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 5},
+ {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -258,10 +258,10 @@ TEST(BalancerPolicy, ParallelBalancingDoesNotMoveChunksFromShardsBelowOptimal) {
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNecessary) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 8, false, emptyTagSet, emptyShardVersion), 8},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 8, false, emptyZoneSet, emptyShardVersion), 8},
+ {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
// Here kShardId0 would have been selected as a donor
stdx::unordered_set<ShardId> usedShards{kShardId0};
@@ -282,10 +282,10 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNe
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNotNecessary) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 12, false, emptyTagSet, emptyShardVersion), 12},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 12, false, emptyZoneSet, emptyShardVersion), 12},
+ {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
// Here kShardId0 would have been selected as a donor
stdx::unordered_set<ShardId> usedShards{kShardId0};
@@ -300,10 +300,10 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNo
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseDestinationShards) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, kNoMaxSize, 1, false, emptyTagSet, emptyShardVersion), 1}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId3, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1}});
// Here kShardId2 would have been selected as a recipient
stdx::unordered_set<ShardId> usedShards{kShardId2};
@@ -324,8 +324,8 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseDestinationShards) {
TEST(BalancerPolicy, JumboChunksNotMoved) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
cluster.second[kShardId0][0].setJumbo(true);
cluster.second[kShardId0][1].setJumbo(false); // Only chunk 1 is not jumbo
@@ -344,10 +344,10 @@ TEST(BalancerPolicy, JumboChunksNotMoved) {
TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, kNoMaxSize, 2, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId2, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
cluster.second[kShardId0][0].setJumbo(true);
cluster.second[kShardId0][1].setJumbo(false); // Only chunk 1 is not jumbo
@@ -378,8 +378,8 @@ TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
TEST(BalancerPolicy, DrainingSingleChunk) {
// shard0 is draining and chunks will go to shard1, even though it has a lot more chunks
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyTagSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 5}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 5}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -394,10 +394,10 @@ TEST(BalancerPolicy, DrainingSingleChunk) {
TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
// shard0 and shard2 are draining and chunks will go to shard1 and shard3 in parallel
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyTagSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId2, kNoMaxSize, 2, true, emptyTagSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 5}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 5},
+ {ShardStatistics(kShardId2, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 5}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -418,8 +418,8 @@ TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
TEST(BalancerPolicy, DrainingWithTwoChunksFirstOneSelected) {
// shard0 is draining and chunks will go to shard1, even though it has a lot more chunks
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyTagSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyTagSet, emptyShardVersion), 5}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 5}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -435,9 +435,9 @@ TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
// shard0 and shard1 are both draining with very little chunks in them and chunks will go to
// shard2, even though it has a lot more chunks that the other two
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, true, emptyTagSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, true, emptyTagSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 16}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 5, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, kNoMaxSize, 5, true, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 16}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -453,16 +453,16 @@ TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptChunks) {
// shard0 has many chunks, but can't move them to shard1 or shard2 because they are draining
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, true, emptyTagSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, true, emptyTagSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, kNoMaxSize, 0, true, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId2, kNoMaxSize, 0, true, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT(migrations.empty());
}
-TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToTag) {
+TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToZone) {
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 2, false, {"NYC"}, emptyShardVersion), 4},
{ShardStatistics(kShardId1, kNoMaxSize, 2, false, {"LAX"}, emptyShardVersion), 4},
@@ -481,7 +481,7 @@ TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToTag) {
ASSERT_EQ(MigrationReason::drain, reason);
}
-TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToTag) {
+TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToZone) {
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 2, false, {"NYC"}, emptyShardVersion), 4},
{ShardStatistics(kShardId1, kNoMaxSize, 2, false, {"LAX"}, emptyShardVersion), 4},
@@ -498,9 +498,9 @@ TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToTag) {
TEST(BalancerPolicy, NoBalancingDueToAllNodesEitherDrainingOrMaxedOut) {
// shard0 and shard2 are draining, shard1 is maxed out
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyTagSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 1, 1, false, emptyTagSet, emptyShardVersion), 6},
- {ShardStatistics(kShardId2, kNoMaxSize, 1, true, emptyTagSet, emptyShardVersion), 1}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, 1, 1, false, emptyZoneSet, emptyShardVersion), 6},
+ {ShardStatistics(kShardId2, kNoMaxSize, 1, true, emptyZoneSet, emptyShardVersion), 1}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -512,9 +512,9 @@ TEST(BalancerPolicy, BalancerRespectsMaxShardSizeOnlyBalanceToNonMaxed) {
// shards have maxSize = 0 = unset. Even though the overloaded shard has the least number of
// less chunks, we shouldn't move chunks to that shard.
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 1, 3, false, emptyTagSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId2, kNoMaxSize, 10, false, emptyTagSet, emptyShardVersion), 10}});
+ {{ShardStatistics(kShardId0, 1, 3, false, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 5},
+ {ShardStatistics(kShardId2, kNoMaxSize, 10, false, emptyZoneSet, emptyShardVersion), 10}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -530,16 +530,16 @@ TEST(BalancerPolicy, BalancerRespectsMaxShardSizeWhenAllBalanced) {
// shards have maxSize = 0 = unset. We check that being over the maxSize is NOT equivalent to
// draining, we don't want to empty shards for no other reason than they are over this limit.
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 1, 4, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 4, false, emptyTagSet, emptyShardVersion), 4}});
+ {{ShardStatistics(kShardId0, 1, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
ASSERT(migrations.empty());
}
-TEST(BalancerPolicy, BalancerRespectsTagsWhenDraining) {
+TEST(BalancerPolicy, BalancerRespectsZonesWhenDraining) {
// shard1 drains the proper chunk to shard0, even though it is more loaded than shard2
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 6},
@@ -559,13 +559,13 @@ TEST(BalancerPolicy, BalancerRespectsTagsWhenDraining) {
ASSERT_EQ(MigrationReason::drain, reason);
}
-TEST(BalancerPolicy, BalancerRespectsTagPolicyBeforeImbalance) {
+TEST(BalancerPolicy, BalancerRespectsZonePolicyBeforeImbalance) {
// There is a large imbalance between shard0 and shard1, but the balancer must first fix the
- // chunks, which are on a wrong shard due to tag policy
+ // chunks, which are on a wrong shard due to zone policy
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 2},
{ShardStatistics(kShardId1, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 6},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 2}});
+ {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 2}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 100), "a")));
@@ -579,7 +579,7 @@ TEST(BalancerPolicy, BalancerRespectsTagPolicyBeforeImbalance) {
ASSERT_EQ(MigrationReason::zoneViolation, reason);
}
-TEST(BalancerPolicy, BalancerFixesIncorrectTagsWithCrossShardViolationOfTags) {
+TEST(BalancerPolicy, BalancerFixesIncorrectZonesWithCrossShardViolationOfZones) {
// The zone policy dictates that the same shard must donate and also receive chunks. The test
// validates that the same shard is not used as a donor and recipient as part of the same round.
auto cluster = generateCluster(
@@ -600,12 +600,12 @@ TEST(BalancerPolicy, BalancerFixesIncorrectTagsWithCrossShardViolationOfTags) {
ASSERT_EQ(MigrationReason::zoneViolation, reason);
}
-TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedCluster) {
- // Chunks are balanced across shards, but there are wrong tags, which need to be fixed
+TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedCluster) {
+ // Chunks are balanced across shards, but there are wrong zones, which need to be fixed
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
{ShardStatistics(kShardId1, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 3}});
+ {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 3}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 10), "a")));
@@ -619,8 +619,8 @@ TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedCluster) {
ASSERT_EQ(MigrationReason::zoneViolation, reason);
}
-TEST(BalancerPolicy, BalancerTagAlreadyBalanced) {
- // Chunks are balanced across shards for the tag.
+TEST(BalancerPolicy, BalancerZoneAlreadyBalanced) {
+ // Chunks are balanced across shards for the zone.
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 3, false, {"a"}, emptyShardVersion), 2},
{ShardStatistics(kShardId1, kNoMaxSize, 2, false, {"a"}, emptyShardVersion), 2}});
@@ -630,9 +630,9 @@ TEST(BalancerPolicy, BalancerTagAlreadyBalanced) {
ASSERT(balanceChunks(cluster.first, distribution, false, false).first.empty());
}
-TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleTags) {
+TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleZones) {
// shard0 has chunks [MinKey, 1), [1, 2), [2, 3), [3, 4), [4, 5), so two chunks each
- // for tag "b" and "c". So [1, 2) is expected to be moved to shard1 in round 1.
+ // for zones "b" and "c". So [1, 2) is expected to be moved to shard1 in round 1.
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
{ShardStatistics(kShardId1, kNoMaxSize, 1, false, {"b"}, emptyShardVersion), 1},
@@ -652,9 +652,9 @@ TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleTags) {
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), *migrations[0].maxKey);
}
-TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleTagsSkipTagWithShardInUse) {
+TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleZonesSkipZoneWithShardInUse) {
// shard0 has chunks [MinKey, 1), [1, 2), [2, 3), [3, 4), [4, 5), so two chunks each
- // for tag "b" and "c". So [3, 4) is expected to be moved to shard2 because shard1 is
+ // for zones "b" and "c". So [3, 4) is expected to be moved to shard2 because shard1 is
// in use.
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
@@ -677,13 +677,13 @@ TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleTagsSkipTagWithShardInU
ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][3].getMax(), *migrations[0].maxKey);
}
-TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedClusterParallel) {
- // Chunks are balanced across shards, but there are wrong tags, which need to be fixed
+TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedClusterParallel) {
+ // Chunks are balanced across shards, but there are wrong zones, which need to be fixed
auto cluster = generateCluster(
{{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
{ShardStatistics(kShardId1, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 3},
- {ShardStatistics(kShardId3, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 3}});
+ {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 3},
+ {ShardStatistics(kShardId3, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 3}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 20), "a")));
@@ -704,10 +704,10 @@ TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedClusterParalle
ASSERT_EQ(MigrationReason::zoneViolation, reason);
}
-TEST(BalancerPolicy, BalancerHandlesNoShardsWithTag) {
+TEST(BalancerPolicy, BalancerHandlesNoShardsWithZone) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, false, emptyTagSet, emptyShardVersion), 2}});
+ {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 2}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(
@@ -716,10 +716,10 @@ TEST(BalancerPolicy, BalancerHandlesNoShardsWithTag) {
ASSERT(balanceChunks(cluster.first, distribution, false, false).first.empty());
}
-TEST(DistributionStatus, AddTagRangeOverlap) {
+TEST(DistributionStatus, AddZoneRangeOverlap) {
DistributionStatus d(kNamespace, ShardToChunksMap{});
- // Note that there is gap between 10 and 20 for which there is no tag
+ // Note that there is gap between 10 and 20 for which there is no zone
ASSERT_OK(d.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 10), "a")));
ASSERT_OK(d.addRangeToZone(ZoneRange(BSON("x" << 20), BSON("x" << 30), "b")));
@@ -739,7 +739,7 @@ TEST(DistributionStatus, AddTagRangeOverlap) {
d.addRangeToZone(ZoneRange(BSON("x" << 25), kMaxBSONKey, "d")));
}
-TEST(DistributionStatus, ChunkTagsSelectorWithRegularKeys) {
+TEST(DistributionStatus, ChunkZonesSelectorWithRegularKeys) {
DistributionStatus d(kNamespace, ShardToChunksMap{});
ASSERT_OK(d.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 10), "a")));
@@ -750,67 +750,67 @@ TEST(DistributionStatus, ChunkTagsSelectorWithRegularKeys) {
ChunkType chunk;
chunk.setMin(kMinBSONKey);
chunk.setMax(BSON("x" << 1));
- ASSERT_EQUALS("", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 0));
chunk.setMax(BSON("x" << 1));
- ASSERT_EQUALS("", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 1));
chunk.setMax(BSON("x" << 5));
- ASSERT_EQUALS("a", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("a", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 10));
chunk.setMax(BSON("x" << 20));
- ASSERT_EQUALS("b", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("b", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 15));
chunk.setMax(BSON("x" << 20));
- ASSERT_EQUALS("b", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("b", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 25));
chunk.setMax(BSON("x" << 30));
- ASSERT_EQUALS("c", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("c", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 35));
chunk.setMax(BSON("x" << 40));
- ASSERT_EQUALS("", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 30));
chunk.setMax(kMaxBSONKey);
- ASSERT_EQUALS("", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 40));
chunk.setMax(kMaxBSONKey);
- ASSERT_EQUALS("", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("", d.getZoneForChunk(chunk));
}
}
-TEST(DistributionStatus, ChunkTagsSelectorWithMinMaxKeys) {
+TEST(DistributionStatus, ChunkZonesSelectorWithMinMaxKeys) {
DistributionStatus d(kNamespace, ShardToChunksMap{});
ASSERT_OK(d.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << -100), "a")));
@@ -821,49 +821,49 @@ TEST(DistributionStatus, ChunkTagsSelectorWithMinMaxKeys) {
ChunkType chunk;
chunk.setMin(kMinBSONKey);
chunk.setMax(BSON("x" << -100));
- ASSERT_EQUALS("a", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("a", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << -100));
chunk.setMax(BSON("x" << -11));
- ASSERT_EQUALS("", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << -10));
chunk.setMax(BSON("x" << 0));
- ASSERT_EQUALS("b", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("b", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 0));
chunk.setMax(BSON("x" << 10));
- ASSERT_EQUALS("b", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("b", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 10));
chunk.setMax(BSON("x" << 20));
- ASSERT_EQUALS("", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 10));
chunk.setMax(BSON("x" << 100));
- ASSERT_EQUALS("", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("", d.getZoneForChunk(chunk));
}
{
ChunkType chunk;
chunk.setMin(BSON("x" << 200));
chunk.setMax(kMaxBSONKey);
- ASSERT_EQUALS("c", d.getTagForChunk(chunk));
+ ASSERT_EQUALS("c", d.getZoneForChunk(chunk));
}
}
diff --git a/src/mongo/db/s/balancer/cluster_statistics.cpp b/src/mongo/db/s/balancer/cluster_statistics.cpp
index b9fffb0af51..505a0beaaa8 100644
--- a/src/mongo/db/s/balancer/cluster_statistics.cpp
+++ b/src/mongo/db/s/balancer/cluster_statistics.cpp
@@ -53,27 +53,27 @@ ClusterStatistics::ShardStatistics::ShardStatistics(ShardId inShardId,
uint64_t inMaxSizeBytes,
uint64_t inCurrSizeBytes,
bool inIsDraining,
- std::set<std::string> inShardTags,
+ std::set<std::string> inShardZones,
std::string inMongoVersion,
use_bytes_t t)
: shardId(std::move(inShardId)),
maxSizeBytes(inMaxSizeBytes),
currSizeBytes(inCurrSizeBytes),
isDraining(inIsDraining),
- shardTags(std::move(inShardTags)),
+ shardZones(std::move(inShardZones)),
mongoVersion(std::move(inMongoVersion)) {}
ClusterStatistics::ShardStatistics::ShardStatistics(ShardId inShardId,
uint64_t inMaxSizeMB,
uint64_t inCurrSizeMB,
bool inIsDraining,
- std::set<std::string> inShardTags,
+ std::set<std::string> inShardZones,
std::string inMongoVersion)
: ShardStatistics(inShardId,
convertMBToBytes(inMaxSizeMB),
convertMBToBytes(inCurrSizeMB),
inIsDraining,
- std::move(inShardTags),
+ std::move(inShardZones),
std::move(inMongoVersion),
use_bytes_t{}) {}
@@ -85,19 +85,4 @@ bool ClusterStatistics::ShardStatistics::isSizeMaxed() const {
return currSizeBytes >= maxSizeBytes;
}
-BSONObj ClusterStatistics::ShardStatistics::toBSON() const {
- BSONObjBuilder builder;
- builder.append("id", shardId.toString());
- builder.append("maxSizeMB", static_cast<long long>(maxSizeBytes / 1024 / 1024));
- builder.append("currSizeMB", static_cast<long long>(currSizeBytes / 1024 / 1024));
- builder.append("draining", isDraining);
- if (!shardTags.empty()) {
- BSONArrayBuilder arrayBuilder(builder.subarrayStart("tags"));
- arrayBuilder.append(shardTags);
- }
-
- builder.append("version", mongoVersion);
- return builder.obj();
-}
-
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/cluster_statistics.h b/src/mongo/db/s/balancer/cluster_statistics.h
index 868c59d774a..19bb44cc2b0 100644
--- a/src/mongo/db/s/balancer/cluster_statistics.h
+++ b/src/mongo/db/s/balancer/cluster_statistics.h
@@ -65,7 +65,7 @@ public:
uint64_t maxSizeBytes,
uint64_t currSizeBytes,
bool isDraining,
- std::set<std::string> shardTags,
+ std::set<std::string> shardZones,
std::string mongoVersion,
use_bytes_t t);
@@ -73,7 +73,7 @@ public:
uint64_t maxSizeMB,
uint64_t currSizeMB,
bool isDraining,
- std::set<std::string> shardTags,
+ std::set<std::string> shardZones,
std::string mongoVersion);
/**
@@ -82,11 +82,6 @@ public:
*/
bool isSizeMaxed() const;
- /**
- * Returns BSON representation of this shard's statistics, for reporting purposes.
- */
- BSONObj toBSON() const;
-
// The id of the shard for which this statistic applies
ShardId shardId;
@@ -99,8 +94,8 @@ public:
// Whether the shard is in draining mode
bool isDraining{false};
- // Set of tags for the shard
- std::set<std::string> shardTags;
+ // Set of zones for the shard
+ std::set<std::string> shardZones;
// Version of mongod, which runs on this shard's primary
std::string mongoVersion;
diff --git a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
index 79554bb36dd..83b022294b4 100644
--- a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
+++ b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
@@ -162,17 +162,17 @@ StatusWith<std::vector<ShardStatistics>> ClusterStatisticsImpl::_getStats(
"error"_attr = mongoDVersionStatus.getStatus());
}
- std::set<std::string> shardTags;
+ std::set<std::string> shardZones;
- for (const auto& shardTag : shard.getTags()) {
- shardTags.insert(shardTag);
+ for (const auto& shardZone : shard.getTags()) {
+ shardZones.insert(shardZone);
}
stats.emplace_back(shard.getName(),
shard.getMaxSizeMB() * 1024 * 1024,
shardSizeStatus.getValue(),
shard.getDraining(),
- std::move(shardTags),
+ std::move(shardZones),
std::move(mongoDVersion),
ShardStatistics::use_bytes_t{});
}
diff --git a/src/mongo/db/s/balancer/cluster_statistics_test.cpp b/src/mongo/db/s/balancer/cluster_statistics_test.cpp
index 25bd0bb804d..3dd5357581b 100644
--- a/src/mongo/db/s/balancer/cluster_statistics_test.cpp
+++ b/src/mongo/db/s/balancer/cluster_statistics_test.cpp
@@ -37,14 +37,14 @@ namespace {
using ShardStatistics = ClusterStatistics::ShardStatistics;
-const auto emptyTagSet = std::set<std::string>();
+const auto emptyZoneSet = std::set<std::string>();
TEST(ShardStatistics, SizeMaxedTest) {
ASSERT(
- !ShardStatistics(ShardId("TestShardId"), 0, 0, false, emptyTagSet, "3.2.0").isSizeMaxed());
- ASSERT(!ShardStatistics(ShardId("TestShardId"), 100LL, 80LL, false, emptyTagSet, "3.2.0")
+ !ShardStatistics(ShardId("TestShardId"), 0, 0, false, emptyZoneSet, "3.2.0").isSizeMaxed());
+ ASSERT(!ShardStatistics(ShardId("TestShardId"), 100LL, 80LL, false, emptyZoneSet, "3.2.0")
.isSizeMaxed());
- ASSERT(ShardStatistics(ShardId("TestShardId"), 100LL, 110LL, false, emptyTagSet, "3.2.0")
+ ASSERT(ShardStatistics(ShardId("TestShardId"), 100LL, 110LL, false, emptyZoneSet, "3.2.0")
.isSizeMaxed());
}
diff --git a/src/mongo/db/s/balancer/migration_test_fixture.cpp b/src/mongo/db/s/balancer/migration_test_fixture.cpp
index fdc8b740979..d3bac026844 100644
--- a/src/mongo/db/s/balancer/migration_test_fixture.cpp
+++ b/src/mongo/db/s/balancer/migration_test_fixture.cpp
@@ -84,24 +84,24 @@ ChunkType MigrationTestFixture::setUpChunk(const UUID& collUUID,
return chunk;
}
-void MigrationTestFixture::setUpTags(const NamespaceString& collName,
- const StringMap<ChunkRange>& tagChunkRanges) {
- for (auto const& tagChunkRange : tagChunkRanges) {
- BSONObjBuilder tagDocBuilder;
- tagDocBuilder.append(
+void MigrationTestFixture::setUpZones(const NamespaceString& collName,
+ const StringMap<ChunkRange>& zoneChunkRanges) {
+ for (auto const& zoneChunkRange : zoneChunkRanges) {
+ BSONObjBuilder zoneDocBuilder;
+ zoneDocBuilder.append(
"_id",
- BSON(TagsType::ns(collName.ns()) << TagsType::min(tagChunkRange.second.getMin())));
- tagDocBuilder.append(TagsType::ns(), collName.ns());
- tagDocBuilder.append(TagsType::min(), tagChunkRange.second.getMin());
- tagDocBuilder.append(TagsType::max(), tagChunkRange.second.getMax());
- tagDocBuilder.append(TagsType::tag(), tagChunkRange.first);
+ BSON(TagsType::ns(collName.ns()) << TagsType::min(zoneChunkRange.second.getMin())));
+ zoneDocBuilder.append(TagsType::ns(), collName.ns());
+ zoneDocBuilder.append(TagsType::min(), zoneChunkRange.second.getMin());
+ zoneDocBuilder.append(TagsType::max(), zoneChunkRange.second.getMax());
+ zoneDocBuilder.append(TagsType::tag(), zoneChunkRange.first);
ASSERT_OK(catalogClient()->insertConfigDocument(
- operationContext(), TagsType::ConfigNS, tagDocBuilder.obj(), kMajorityWriteConcern));
+ operationContext(), TagsType::ConfigNS, zoneDocBuilder.obj(), kMajorityWriteConcern));
}
}
-void MigrationTestFixture::removeAllTags(const NamespaceString& collName) {
+void MigrationTestFixture::removeAllZones(const NamespaceString& collName) {
const auto query = BSON("ns" << collName.ns());
ASSERT_OK(catalogClient()->removeConfigDocuments(
operationContext(), TagsType::ConfigNS, query, kMajorityWriteConcern));
diff --git a/src/mongo/db/s/balancer/migration_test_fixture.h b/src/mongo/db/s/balancer/migration_test_fixture.h
index 6ea36503330..dddf24cf2d5 100644
--- a/src/mongo/db/s/balancer/migration_test_fixture.h
+++ b/src/mongo/db/s/balancer/migration_test_fixture.h
@@ -88,15 +88,15 @@ protected:
const ChunkVersion& version);
/**
- * Inserts a document into the config.tags collection so that the tag defined by the
+ * Inserts a document into the config.tags collection so that the zone defined by the
* parameters exists.
*/
- void setUpTags(const NamespaceString& collName, const StringMap<ChunkRange>& tagChunkRanges);
+ void setUpZones(const NamespaceString& collName, const StringMap<ChunkRange>& zoneChunkRanges);
/**
* Removes all document in the config.tags for the collection.
*/
- void removeAllTags(const NamespaceString& collName);
+ void removeAllZones(const NamespaceString& collName);
/**
* Removes all document in the config.chunks for the collection.