summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Polato <paolo.polato@mongodb.com>2022-05-04 10:21:44 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-16 16:58:35 +0000
commit4ad560df7858bd7a9ed8a6ee3852f4c113204c8a (patch)
tree477f0300d943d52db8dd5321e0c38815840d3372
parentfa5d029fd8a6f06e42be69a1ba7842ca35e39333 (diff)
downloadmongo-4ad560df7858bd7a9ed8a6ee3852f4c113204c8a.tar.gz
SERVER-65956 Skip split phase of collection defragmentation based on feature flag
(cherry picked from commit 7e7e34fb672286c403ccfc999ac1a66625a31acb)
-rw-r--r--jstests/sharding/libs/defragmentation_util.js103
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp9
2 files changed, 70 insertions, 42 deletions
diff --git a/jstests/sharding/libs/defragmentation_util.js b/jstests/sharding/libs/defragmentation_util.js
index 44f8edafe3c..8fcc8a35d99 100644
--- a/jstests/sharding/libs/defragmentation_util.js
+++ b/jstests/sharding/libs/defragmentation_util.js
@@ -1,4 +1,5 @@
var defragmentationUtil = (function() {
+ load("jstests/libs/feature_flag_util.js");
load("jstests/sharding/libs/find_chunks_util.js");
let createFragmentedCollection = function(mongos,
@@ -93,7 +94,37 @@ var defragmentationUtil = (function() {
assert.commandWorked(bulk.execute());
};
+ let checkForOversizedChunk = function(
+ coll, chunk, shardKey, avgObjSize, oversizedChunkThreshold) {
+ let chunkSize = coll.countDocuments(
+ {[shardKey]: {$gte: chunk.min[shardKey], $lt: chunk.max[shardKey]}}) *
+ avgObjSize;
+ assert.lte(
+ chunkSize,
+ oversizedChunkThreshold,
+ `Chunk ${tojson(chunk)} has size ${chunkSize} which is greater than max chunk size of ${
+ oversizedChunkThreshold}`);
+ };
+
+ let checkForMergeableChunkSiblings = function(
+ coll, leftChunk, rightChunk, shardKey, avgObjSize, oversizedChunkThreshold) {
+ let combinedDataSize =
+ coll.countDocuments(
+ {[shardKey]: {$gte: leftChunk.min[shardKey], $lt: rightChunk.max[shardKey]}}) *
+ avgObjSize;
+ // The autosplitter should not split chunks whose combined size is < 133% of
+ // maxChunkSize but this threshold may be off by a few documents depending on
+ // rounding of avgObjSize.
+ const autosplitRoundingTolerance = 3 * avgObjSize;
+ assert.gte(combinedDataSize,
+ oversizedChunkThreshold - autosplitRoundingTolerance,
+ `Chunks ${tojson(leftChunk)} and ${
+ tojson(rightChunk)} are mergeable with combined size ${combinedDataSize}`);
+ };
+
let checkPostDefragmentationState = function(mongos, ns, maxChunkSizeMB, shardKey) {
+ const withAutoSplitActive =
+ !FeatureFlagUtil.isEnabled(mongos.getDB('admin'), 'NoMoreAutoSplitter');
const oversizedChunkThreshold = maxChunkSizeMB * 1024 * 1024 * 4 / 3;
const chunks = findChunksUtil.findChunksByNs(mongos.getDB('config'), ns)
.sort({[shardKey]: 1})
@@ -111,55 +142,45 @@ var defragmentationUtil = (function() {
? 0
: storageStat['storageStats']['avgObjSize'];
});
- let checkForOversizedChunk = function(
- coll, chunk, shardKey, avgObjSize, oversizedChunkThreshold) {
- let chunkSize =
- coll.countDocuments(
- {[shardKey]: {$gte: chunk.min[shardKey], $lt: chunk.max[shardKey]}}) *
- avgObjSize;
- assert.lte(
- chunkSize,
- oversizedChunkThreshold,
- `Chunk ${tojson(chunk)} has size ${
- chunkSize} which is greater than max chunk size of ${oversizedChunkThreshold}`);
- };
for (let i = 1; i < chunks.length; i++) {
- let chunk1 = chunks[i - 1];
- let chunk2 = chunks[i];
+ let leftChunk = chunks[i - 1];
+ let rightChunk = chunks[i];
// Check for mergeable chunks with combined size less than maxChunkSize
- if (chunk1["shard"] === chunk2["shard"] &&
- bsonWoCompare(chunk1["max"], chunk2["min"]) === 0) {
- let chunk1Zone = getZoneForRange(mongos, ns, chunk1.min, chunk1.max);
- let chunk2Zone = getZoneForRange(mongos, ns, chunk2.min, chunk2.max);
- if (bsonWoCompare(chunk1Zone, chunk2Zone) === 0) {
- let combinedDataSize =
- coll.countDocuments(
- {[shardKey]: {$gte: chunk1.min[shardKey], $lt: chunk2.max[shardKey]}}) *
- avgObjSizeByShard[chunk1['shard']];
- // The autosplitter should not split chunks whose combined size is < 133% of
- // maxChunkSize but this threshold may be off by a few documents depending on
- // rounding of avgObjSize.
- const autosplitRoundingTolerance = 3 * avgObjSizeByShard[chunk1['shard']];
- assert.gte(
- combinedDataSize,
- oversizedChunkThreshold - autosplitRoundingTolerance,
- `Chunks ${tojson(chunk1)} and ${
- tojson(chunk2)} are mergeable with combined size ${combinedDataSize}`);
+ if (leftChunk["shard"] === rightChunk["shard"] &&
+ bsonWoCompare(leftChunk["max"], rightChunk["min"]) === 0) {
+ let leftChunkZone = getZoneForRange(mongos, ns, leftChunk.min, leftChunk.max);
+ let rightChunkZone = getZoneForRange(mongos, ns, rightChunk.min, rightChunk.max);
+ if (bsonWoCompare(leftChunkZone, rightChunkZone) === 0) {
+ if (withAutoSplitActive) {
+ checkForMergeableChunkSiblings(coll,
+ leftChunk,
+ rightChunk,
+ shardKey,
+ avgObjSizeByShard[leftChunk['shard']],
+ oversizedChunkThreshold);
+ } else {
+ assert(false,
+ `Chunks ${leftChunk} and ${rightChunk} should have been merged`);
+ }
}
}
- // Check for oversized chunks
+ if (withAutoSplitActive) {
+ checkForOversizedChunk(coll,
+ leftChunk,
+ shardKey,
+ avgObjSizeByShard[leftChunk['shard']],
+ oversizedChunkThreshold);
+ }
+ }
+
+ if (withAutoSplitActive) {
+ const lastChunk = chunks[chunks.length - 1];
checkForOversizedChunk(coll,
- chunk1,
+ lastChunk,
shardKey,
- avgObjSizeByShard[chunk1['shard']],
+ avgObjSizeByShard[lastChunk['shard']],
oversizedChunkThreshold);
}
- const lastChunk = chunks[chunks.length - 1];
- checkForOversizedChunk(coll,
- lastChunk,
- shardKey,
- avgObjSizeByShard[lastChunk['shard']],
- oversizedChunkThreshold);
};
let getZoneForRange = function(mongos, ns, minKey, maxKey) {
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
index 867ab0c2634..5563223fb0a 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
@@ -37,6 +37,7 @@
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/grid.h"
+#include "mongo/s/sharding_feature_flags_gen.h"
#include <fmt/format.h>
#include <tuple>
@@ -1628,7 +1629,13 @@ std::unique_ptr<DefragmentationPhase> BalancerDefragmentationPolicyImpl::_transi
nextPhaseObject = MergeChunksPhase::build(opCtx, coll);
break;
case DefragmentationPhaseEnum::kSplitChunks:
- nextPhaseObject = SplitChunksPhase::build(opCtx, coll);
+ if (feature_flags::gNoMoreAutoSplitter.isEnabled(
+ serverGlobalParams.featureCompatibility)) {
+ _clearDefragmentationState(opCtx, coll.getUuid());
+ } else {
+ nextPhaseObject = SplitChunksPhase::build(opCtx, coll);
+ }
+
break;
case DefragmentationPhaseEnum::kFinished:
_clearDefragmentationState(opCtx, coll.getUuid());