summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2021-12-07 15:16:20 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-12-07 15:40:12 +0000
commitf2b86c04cffb7124bafa50f363ccb260ebd0b854 (patch)
tree4f85292b10ef3b6ab291ef07919c06d3414c689b /jstests/sharding
parent5117fe4c17287501311df53d66335026e04a032e (diff)
downloadmongo-f2b86c04cffb7124bafa50f363ccb260ebd0b854.tar.gz
SERVER-59664 Implement merge chunks routine Phase I
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/balancer_defragmentation_merge_chunks.js41
1 files changed, 34 insertions, 7 deletions
diff --git a/jstests/sharding/balancer_defragmentation_merge_chunks.js b/jstests/sharding/balancer_defragmentation_merge_chunks.js
index 23a7c49273f..2aeeb64e7fb 100644
--- a/jstests/sharding/balancer_defragmentation_merge_chunks.js
+++ b/jstests/sharding/balancer_defragmentation_merge_chunks.js
@@ -12,9 +12,18 @@
load("jstests/libs/fail_point_util.js");
load('jstests/sharding/autosplit_include.js');
+load("jstests/sharding/libs/find_chunks_util.js");
-var st = new ShardingTest(
- {mongos: 1, shards: 3, config: 1, other: {enableBalancer: true, enableAutoSplit: true}});
+var st = new ShardingTest({
+ mongos: 1,
+ shards: 3,
+ config: 1,
+ other: {
+ enableBalancer: true,
+ enableAutoSplit: true,
+ configOptions: {setParameter: {logComponentVerbosity: tojson({sharding: {verbosity: 2}})}},
+ }
+});
// setup the database for the test
assert.commandWorked(st.s.adminCommand({enableSharding: 'db'}));
@@ -23,27 +32,32 @@ var coll = db['test'];
var fullNs = coll.getFullName();
var configPrimary = st.configRS.getPrimary();
-const defaultChunkSize = 2 * 1024 * 1024;
+const defaultChunkSize = 2;
const bigString = "X".repeat(32 * 1024); // 32 KB
assert.commandWorked(st.s.adminCommand({shardCollection: fullNs, key: {key: 1}}));
+// TODO (SERVER-61848) remove this once the chunk size setting works
+let configDB = st.s.getDB('config');
+assert.commandWorked(configDB["settings"].insertOne({_id: "chunksize", value: 1}));
+
var bulk = coll.initializeUnorderedBulkOp();
-for (let i = 0; i < 32 * 128; i++) {
+for (let i = 0; i < 12 * 128; i++) {
bulk.insert({key: i, str: bigString});
}
assert.commandWorked(bulk.execute());
waitForOngoingChunkSplits(st);
+const numChunksPrev = findChunksUtil.countChunksForNs(st.config, fullNs);
+jsTest.log("Number of chunks before merging " + numChunksPrev);
jsTest.log("Balance cluster before beginning defragmentation");
function waitForBalanced() {
assert.soon(function() {
st.awaitBalancerRound();
- balancerStatus =
+ var balancerStatus =
assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: fullNs}));
return balancerStatus.balancerCompliant;
});
- jsTest.log("Balancer status of " + fullNs + " : \n" + tojson(balancerStatus));
}
waitForBalanced();
@@ -127,14 +141,27 @@ jsTest.log("Begin defragmentation with balancer off, end with it on");
jsTest.log("Balancer on, begin defragmentation and let it complete");
{
+ // Reset collection before starting
st.startBalancer();
+ waitForBalanced();
+ const numChunksPrev = findChunksUtil.countChunksForNs(st.config, fullNs);
+ jsTest.log("Number of chunks before merging " + numChunksPrev);
assert.commandWorked(st.s.adminCommand({
configureCollectionAutoSplitter: fullNs,
enableAutoSplitter: false,
balancerShouldMergeChunks: true,
defaultChunkSize: defaultChunkSize,
}));
- waitForBalanced();
+ assert.soon(function() {
+ st.awaitBalancerRound();
+ var balancerStatus =
+ assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: fullNs}));
+ return balancerStatus.firstComplianceViolation != 'chunksMerging';
+ });
+ st.stopBalancer();
+ const numChunksPost = findChunksUtil.countChunksForNs(st.config, fullNs);
+ jsTest.log("Number of chunks after merging " + numChunksPost);
+ assert.lt(numChunksPost, numChunksPrev);
}
st.stop();