summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2022-05-24 15:12:45 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-10-14 13:34:32 +0000
commit81d40559953a2e91796605b926468bd0a0a53291 (patch)
tree6c4d4cd3e416a8b17d560221ea3ab9ee146fe383
parent7b450643b3bdfb402de5fceb26b076e8a72dc17f (diff)
downloadmongo-81d40559953a2e91796605b926468bd0a0a53291.tar.gz
SERVER-66378 Adapt tests skipped due to featureFlagBalanceAccordingToDataSize
-rw-r--r--jstests/noPassthroughWithMongod/no_balance_collection.js76
-rw-r--r--jstests/sharding/auth.js13
-rw-r--r--jstests/sharding/authCommands.js18
-rw-r--r--jstests/sharding/auto_rebalance_parallel.js20
-rw-r--r--jstests/sharding/balancer_collection_status.js14
-rw-r--r--jstests/sharding/balancer_window.js30
-rw-r--r--jstests/sharding/balancing_sessions_collection.js2
-rw-r--r--jstests/sharding/enforce_zone_policy.js57
-rw-r--r--jstests/sharding/migrateBig.js16
-rw-r--r--jstests/sharding/move_chunk_allowMigrations.js19
-rw-r--r--jstests/sharding/move_chunk_permitMigrations.js20
-rw-r--r--jstests/sharding/zone_changes_hashed.js28
-rw-r--r--jstests/sharding/zone_changes_range.js23
13 files changed, 130 insertions, 206 deletions
diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js
index e4f2aaf5ce6..8b9d16d942b 100644
--- a/jstests/noPassthroughWithMongod/no_balance_collection.js
+++ b/jstests/noPassthroughWithMongod/no_balance_collection.js
@@ -7,15 +7,7 @@
load("jstests/sharding/libs/find_chunks_util.js");
load("jstests/libs/feature_flag_util.js");
-var st = new ShardingTest({shards: 2, mongos: 1});
-
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
+const st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: false}});
// First, test that shell helpers require an argument
assert.throws(sh.disableBalancing, [], "sh.disableBalancing requires a collection");
@@ -24,42 +16,39 @@ assert.throws(sh.enableBalancing, [], "sh.enableBalancing requires a collection"
var shardAName = st.shard0.shardName;
var shardBName = st.shard1.shardName;
-var collA = st.s.getCollection(jsTest.name() + ".collA");
-var collB = st.s.getCollection(jsTest.name() + ".collB");
+const dbName = jsTest.name();
+const collAName = 'collA';
+const collBName = 'collB';
+const collA = st.s.getCollection(dbName + '.' + collAName);
+const collB = st.s.getCollection(dbName + '.' + collBName);
// Shard two collections
st.shardColl(collA, {_id: 1}, false);
st.shardColl(collB, {_id: 1}, false);
-// Split into a lot of chunks so balancing can occur
-var totalNumChunks = 10;
-var numChunksPerShard = totalNumChunks / 2;
-for (var i = 0; i < totalNumChunks - 1; i++) { // 10 chunks total
- collA.getMongo().getDB("admin").runCommand({split: collA + "", middle: {_id: i}});
- collA.getMongo().getDB("admin").runCommand({split: collB + "", middle: {_id: i}});
-}
-
// Disable balancing on one collection
sh.disableBalancing(collB);
+// Insert 10MB data so balancing can occur
+const bigString = 'X'.repeat(1024 * 1024); // 1MB
+const bulkA = collA.initializeUnorderedBulkOp();
+var bulkB = collB.initializeUnorderedBulkOp();
+for (var i = 0; i < 10; i++) {
+ bulkA.insert({_id: i, s: bigString});
+ assert.commandWorked(st.s.adminCommand({split: collA.getFullName(), middle: {_id: i}}));
+ bulkB.insert({_id: i, s: bigString});
+ assert.commandWorked(st.s.adminCommand({split: collB.getFullName(), middle: {_id: i}}));
+}
+assert.commandWorked(bulkA.execute());
+assert.commandWorked(bulkB.execute());
+
jsTest.log("Balancing disabled on " + collB);
printjson(collA.getDB().getSiblingDB("config").collections.find().toArray());
st.startBalancer();
// Make sure collA gets balanced
-assert.soon(function() {
- var shardAChunks =
- findChunksUtil
- .findChunksByNs(st.s.getDB("config"), collA.getFullName(), {shard: shardAName})
- .itcount();
- var shardBChunks =
- findChunksUtil
- .findChunksByNs(st.s.getDB("config"), collA.getFullName(), {shard: shardBName})
- .itcount();
- printjson({shardA: shardAChunks, shardB: shardBChunks});
- return (shardAChunks == numChunksPerShard) && (shardAChunks == shardBChunks);
-}, "" + collA + " chunks not balanced!", 5 * 60 * 1000);
+st.awaitBalance(collAName, dbName, 60 * 1000);
jsTest.log("Chunks for " + collA + " are balanced.");
@@ -77,18 +66,7 @@ assert(shardAChunks == 0 || shardBChunks == 0);
sh.enableBalancing(collB);
// Make sure that collB is now balanced
-assert.soon(function() {
- var shardAChunks =
- findChunksUtil
- .findChunksByNs(st.s.getDB("config"), collB.getFullName(), {shard: shardAName})
- .itcount();
- var shardBChunks =
- findChunksUtil
- .findChunksByNs(st.s.getDB("config"), collB.getFullName(), {shard: shardBName})
- .itcount();
- printjson({shardA: shardAChunks, shardB: shardBChunks});
- return (shardAChunks == numChunksPerShard) && (shardAChunks == shardBChunks);
-}, "" + collB + " chunks not balanced!", 5 * 60 * 1000);
+st.awaitBalance(collBName, dbName, 60 * 1000);
jsTest.log("Chunks for " + collB + " are balanced.");
@@ -100,16 +78,14 @@ sh.disableBalancing(collB);
db = st.s0.getDB("config");
st.waitForBalancer(true, 60000);
-// Make sure auto-migrates on insert don't move chunks
+// Make sure auto-migrates on insert don't move data
var lastMigration = sh._lastMigration(collB);
-var bulk = collB.initializeUnorderedBulkOp();
-// Reduce the amount of data on live-record buildvariant
-var n = (TestData.undoRecorderPath ? 100000 : 1000000);
-for (var i = 0; i < n; i++) {
- bulk.insert({_id: i, hello: "world"});
+bulkB = collB.initializeUnorderedBulkOp();
+for (var i = 10; i < 20; i++) {
+ bulkB.insert({_id: i, s: bigString});
}
-assert.commandWorked(bulk.execute());
+assert.commandWorked(bulkB.execute());
printjson(lastMigration);
printjson(sh._lastMigration(collB));
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index dcb65c2f2a7..3bf15b56716 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -175,18 +175,19 @@ awaitRSClientHosts(s.s, d2.nodes, {ok: true});
s.getDB("test").foo.remove({});
-var num = 10000;
+var num = 10;
assert.commandWorked(s.s.adminCommand({split: "test.foo", middle: {x: num / 2}}));
+const bigString = 'X'.repeat(1024 * 1024); // 1MB
var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
for (i = 0; i < num; i++) {
- bulk.insert({_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
+ bulk.insert({_id: i, x: i, abc: "defg", date: new Date(), str: bigString});
}
assert.commandWorked(bulk.execute());
s.startBalancer(60000);
-// TODO SERVER-66378 adapt this test for data size aware balancing
-const balanceAccordingToDataSize = TestData.setParameters.featureFlagBalanceAccordingToDataSize;
+const balanceAccordingToDataSize =
+ FeatureFlagUtil.isEnabled(s.getDB('admin'), "BalanceAccordingToDataSize");
if (!balanceAccordingToDataSize) {
assert.soon(function() {
var d1Chunks =
@@ -241,7 +242,7 @@ if (numDocs != num) {
// This call also waits for any ongoing balancing to stop
s.stopBalancer(60000);
-var cursor = s.getDB("test").foo.find({x: {$lt: 500}});
+var cursor = s.getDB("test").foo.find({x: {$lt: 5}});
var count = 0;
while (cursor.hasNext()) {
@@ -249,7 +250,7 @@ while (cursor.hasNext()) {
count++;
}
-assert.eq(count, 500);
+assert.eq(count, 5);
logout(adminUser);
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index d3cfd168276..3373cf4b446 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -4,8 +4,6 @@
(function() {
'use strict';
-load("jstests/libs/feature_flag_util.js");
-
// Multiple users cannot be authenticated on one connection within a session.
TestData.disableImplicitSessions = true;
@@ -19,14 +17,6 @@ load("jstests/sharding/libs/find_chunks_util.js");
// gossip that time later in setup.
//
-// TODO SERVER-66378 adapt this test for data size aware balancing
-const dataSizeAwareBalancingFeatureFlag =
- TestData.setParameters.featureFlagBalanceAccordingToDataSize;
-if (dataSizeAwareBalancingFeatureFlag) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- return;
-}
-
var st = new ShardingTest({
shards: 2,
rs: {oplogSize: 10, useHostname: false},
@@ -98,12 +88,8 @@ st.startBalancer();
// Make sure we've done at least some splitting, so the balancer will work
assert.gt(findChunksUtil.findChunksByNs(configDB, 'test.foo').count(), 2);
-// Make sure we eventually balance all the chunks we've created
-assert.soon(function() {
- var x = st.chunkDiff("foo", "test");
- print("chunk diff: " + x);
- return x < 2 && configDB.locks.findOne({_id: 'test.foo'}).state == 0;
-}, "no balance happened", 15 * 60 * 1000);
+// Make sure we eventually balance the 'test.foo' collection
+st.awaitBalance('foo', 'test', 60 * 1000);
var map = function() {
emit(this.i, this.j);
diff --git a/jstests/sharding/auto_rebalance_parallel.js b/jstests/sharding/auto_rebalance_parallel.js
index d36d8adac57..cb424f0f920 100644
--- a/jstests/sharding/auto_rebalance_parallel.js
+++ b/jstests/sharding/auto_rebalance_parallel.js
@@ -5,18 +5,9 @@
(function() {
'use strict';
-load("jstests/libs/feature_flag_util.js");
load("jstests/sharding/libs/find_chunks_util.js");
-var st = new ShardingTest({shards: 4});
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
-
+const st = new ShardingTest({shards: 4, other: {chunkSize: 1, enableAutoSplitter: false}});
var config = st.s0.getDB('config');
assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
@@ -27,11 +18,12 @@ function prepareCollectionForBalance(collName) {
var coll = st.s0.getCollection(collName);
+ const bigString = 'X'.repeat(1024 * 1024); // 1MB
// Create 4 chunks initially and ensure they get balanced within 1 balancer round
- assert.commandWorked(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.commandWorked(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.commandWorked(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.commandWorked(coll.insert({Key: 30, Value: 'Test value 30'}));
+ assert.commandWorked(coll.insert({Key: 1, Value: 'Test value 1', s: bigString}));
+ assert.commandWorked(coll.insert({Key: 10, Value: 'Test value 10', s: bigString}));
+ assert.commandWorked(coll.insert({Key: 20, Value: 'Test value 20', s: bigString}));
+ assert.commandWorked(coll.insert({Key: 30, Value: 'Test value 30', s: bigString}));
assert.commandWorked(st.splitAt(collName, {Key: 10}));
assert.commandWorked(st.splitAt(collName, {Key: 20}));
diff --git a/jstests/sharding/balancer_collection_status.js b/jstests/sharding/balancer_collection_status.js
index 4d47f1a2866..e2bb502dc04 100644
--- a/jstests/sharding/balancer_collection_status.js
+++ b/jstests/sharding/balancer_collection_status.js
@@ -5,8 +5,6 @@
(function() {
'use strict';
-load("jstests/libs/feature_flag_util.js");
-
const chunkSizeMB = 1;
let st = new ShardingTest({
shards: 3,
@@ -16,14 +14,6 @@ let st = new ShardingTest({
}
});
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
-
function runBalancer(rounds) {
st.startBalancer();
let numRounds = 0;
@@ -64,6 +54,10 @@ assert.eq(result.balancerCompliant, true);
// get shardIds
const shards = st.s0.getDB('config').shards.find().toArray();
+const bigString = 'X'.repeat(1024 * 1024); // 1MB
+for (var i = 0; i < 30; i += 10) {
+ assert.commandWorked(st.s0.getDB('db').getCollection('col').insert({key: i, s: bigString}));
+}
// manually split and place the 3 chunks on the same shard
assert.commandWorked(st.s0.adminCommand({split: 'db.col', middle: {key: 10}}));
assert.commandWorked(st.s0.adminCommand({split: 'db.col', middle: {key: 20}}));
diff --git a/jstests/sharding/balancer_window.js b/jstests/sharding/balancer_window.js
index a52e9eeee70..333cc30101e 100644
--- a/jstests/sharding/balancer_window.js
+++ b/jstests/sharding/balancer_window.js
@@ -13,7 +13,6 @@
(function() {
'use strict';
-load("jstests/libs/feature_flag_util.js");
load("jstests/sharding/libs/find_chunks_util.js");
/**
@@ -46,24 +45,23 @@ var HourAndMinute = function(hour, minutes) {
};
};
-var st = new ShardingTest({shards: 2});
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
-var configDB = st.s.getDB('config');
-assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+const st = new ShardingTest({shards: 2, other: {chunkSize: 1, enableAutoSplit: false}});
+const dbName = 'test';
+const collName = 'user';
+const ns = dbName + '.' + collName;
+const configDB = st.s.getDB('config');
+assert.commandWorked(configDB.adminCommand({enableSharding: dbName}));
+assert.commandWorked(configDB.adminCommand({shardCollection: ns, key: {_id: 1}}));
+const bigString = 'X'.repeat(1024 * 1024); // 1MB
+const coll = st.s.getDB(dbName).getCollection(collName);
for (var x = 0; x < 150; x += 10) {
- configDB.adminCommand({split: 'test.user', middle: {_id: x}});
+ coll.insert({_id: x, s: bigString});
+ configDB.adminCommand({split: ns, middle: {_id: x}});
}
var shard0Chunks =
- findChunksUtil.findChunksByNs(configDB, 'test.user', {shard: st.shard0.shardName}).count();
+ findChunksUtil.findChunksByNs(configDB, ns, {shard: st.shard0.shardName}).count();
var startDate = new Date();
var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
@@ -83,7 +81,7 @@ st.startBalancer();
st.waitForBalancer(true, 60000);
var shard0ChunksAfter =
- findChunksUtil.findChunksByNs(configDB, 'test.user', {shard: st.shard0.shardName}).count();
+ findChunksUtil.findChunksByNs(configDB, ns, {shard: st.shard0.shardName}).count();
assert.eq(shard0Chunks, shard0ChunksAfter);
assert.commandWorked(configDB.settings.update(
@@ -98,7 +96,7 @@ assert.commandWorked(configDB.settings.update(
st.waitForBalancer(true, 60000);
shard0ChunksAfter =
- findChunksUtil.findChunksByNs(configDB, 'test.user', {shard: st.shard0.shardName}).count();
+ findChunksUtil.findChunksByNs(configDB, ns, {shard: st.shard0.shardName}).count();
assert.neq(shard0Chunks, shard0ChunksAfter);
st.stop();
diff --git a/jstests/sharding/balancing_sessions_collection.js b/jstests/sharding/balancing_sessions_collection.js
index 960b19693ee..a896714fb2d 100644
--- a/jstests/sharding/balancing_sessions_collection.js
+++ b/jstests/sharding/balancing_sessions_collection.js
@@ -113,7 +113,7 @@ const st = new ShardingTest({
shards: numShards,
other: {configOptions: {setParameter: {minNumChunksForSessionsCollection: kMinNumChunks}}}
});
-// TODO SERVER-66378 adapt this test for data size aware balancing
+// TODO SERVER-66078 adapt this test for data size aware balancing
if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
"BalanceAccordingToDataSize")) {
jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
diff --git a/jstests/sharding/enforce_zone_policy.js b/jstests/sharding/enforce_zone_policy.js
index 30413599a07..1a35653fc00 100644
--- a/jstests/sharding/enforce_zone_policy.js
+++ b/jstests/sharding/enforce_zone_policy.js
@@ -3,35 +3,31 @@
(function() {
'use strict';
-load("jstests/libs/feature_flag_util.js");
load("jstests/sharding/libs/find_chunks_util.js");
-var st = new ShardingTest({shards: 3, mongos: 1});
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
+const st = new ShardingTest({shards: 3, mongos: 1, other: {chunkSize: 1, enableAutoSplit: false}});
+const dbName = 'test';
+const collName = 'foo';
+const ns = dbName + '.' + collName;
-assert.commandWorked(st.s0.adminCommand({enablesharding: 'test'}));
-st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(
+ st.s0.adminCommand({enablesharding: dbName, primaryShard: st.shard1.shardName}));
-var testDB = st.s0.getDB('test');
+var testDB = st.s0.getDB(dbName);
var configDB = st.s0.getDB('config');
+assert.commandWorked(st.s0.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+const bigString = 'X'.repeat(1024 * 1024); // 1MB
var bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < 9; i++) {
- bulk.insert({_id: i, x: i});
+ bulk.insert({_id: i, x: bigString});
}
assert.commandWorked(bulk.execute());
-assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
-
// Produce 9 chunks with min value at the documents just inserted
for (var i = 0; i < 8; i++) {
- assert.commandWorked(st.s0.adminCommand({split: 'test.foo', middle: {_id: i}}));
+ assert.commandWorked(st.s0.adminCommand({split: ns, middle: {_id: i}}));
}
/**
@@ -55,8 +51,8 @@ function assertBalanceCompleteAndStable(checkFunc, stepName) {
* cluster is evenly balanced.
*/
function checkClusterEvenlyBalanced() {
- var maxChunkDiff = st.chunkDiff('foo', 'test');
- return maxChunkDiff <= 1;
+ assert.commandWorked(st.s.getDB('admin').runCommand({balancerStatus: 1}));
+ return true;
}
st.startBalancer();
@@ -67,17 +63,16 @@ assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'initial');
// Spread chunks correctly across zones
st.addShardTag(st.shard0.shardName, 'a');
st.addShardTag(st.shard1.shardName, 'a');
-st.addTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
+st.addTagRange(ns, {_id: -100}, {_id: 100}, 'a');
st.addShardTag(st.shard2.shardName, 'b');
-st.addTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
-st.addTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
+st.addTagRange(ns, {_id: MinKey}, {_id: -100}, 'b');
+st.addTagRange(ns, {_id: 100}, {_id: MaxKey}, 'b');
assertBalanceCompleteAndStable(function() {
- var chunksOnShard2 =
- findChunksUtil.findChunksByNs(configDB, 'test.foo', {shard: st.shard2.shardName})
- .sort({min: 1})
- .toArray();
+ var chunksOnShard2 = findChunksUtil.findChunksByNs(configDB, ns, {shard: st.shard2.shardName})
+ .sort({min: 1})
+ .toArray();
jsTestLog('Chunks on shard2: ' + tojson(chunksOnShard2));
@@ -90,16 +85,16 @@ assertBalanceCompleteAndStable(function() {
}, 'chunks to zones a and b');
// Tag the entire collection to shard0 and wait for everything to move to that shard
-st.removeTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
-st.removeTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
-st.removeTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
+st.removeTagRange(ns, {_id: -100}, {_id: 100}, 'a');
+st.removeTagRange(ns, {_id: MinKey}, {_id: -100}, 'b');
+st.removeTagRange(ns, {_id: 100}, {_id: MaxKey}, 'b');
st.removeShardTag(st.shard1.shardName, 'a');
st.removeShardTag(st.shard2.shardName, 'b');
-st.addTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
+st.addTagRange(ns, {_id: MinKey}, {_id: MaxKey}, 'a');
assertBalanceCompleteAndStable(function() {
- var counts = st.chunkCounts('foo');
+ var counts = st.chunkCounts(collName);
printjson(counts);
return counts[st.shard0.shardName] == 11 && counts[st.shard1.shardName] == 0 &&
counts[st.shard2.shardName] == 0;
@@ -107,7 +102,7 @@ assertBalanceCompleteAndStable(function() {
// Remove all zones and ensure collection is correctly redistributed
st.removeShardTag(st.shard0.shardName, 'a');
-st.removeTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
+st.removeTagRange(ns, {_id: MinKey}, {_id: MaxKey}, 'a');
assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'final');
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 0282e5afe63..aa237832dd3 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -3,14 +3,8 @@
load("jstests/libs/feature_flag_util.js");
-var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(s.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- s.stop();
- return;
-}
+var s = new ShardingTest(
+ {name: "migrateBig", shards: 2, other: {chunkSize: 1, enableAutoSplit: false}});
assert.commandWorked(
s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
@@ -67,11 +61,7 @@ s.printShardingStatus();
s.startBalancer();
-assert.soon(function() {
- var x = s.chunkDiff("foo", "test");
- print("chunk diff: " + x);
- return x < 2;
-}, "no balance happened", 8 * 60 * 1000, 2000);
+s.awaitBalance('foo', 'test', 60 * 1000);
s.stop();
})();
diff --git a/jstests/sharding/move_chunk_allowMigrations.js b/jstests/sharding/move_chunk_allowMigrations.js
index 93eb2b937e2..f6f89263954 100644
--- a/jstests/sharding/move_chunk_allowMigrations.js
+++ b/jstests/sharding/move_chunk_allowMigrations.js
@@ -17,15 +17,8 @@ load('jstests/libs/parallel_shell_helpers.js');
load("jstests/sharding/libs/find_chunks_util.js");
load("jstests/sharding/libs/shard_versioning_util.js");
-const st = new ShardingTest({shards: 2});
+const st = new ShardingTest({shards: 2, other: {chunkSize: 1, enableAutoSplit: false}});
const configDB = st.s.getDB("config");
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
// Resets database dbName and enables sharding and establishes shard0 as primary, test case agnostic
function setUpDatabaseAndEnableSharding(dbName) {
@@ -153,12 +146,14 @@ function testAllowMigrationsFalseDisablesBalancer(allowMigrations, collBSetNoBal
assert.commandWorked(st.s.adminCommand({shardCollection: collA.getFullName(), key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({shardCollection: collB.getFullName(), key: {_id: 1}}));
+ const bigString = 'X'.repeat(1024 * 1024); // 1MB
+
// Split both collections into 4 chunks so balancing can occur.
for (let coll of [collA, collB]) {
- coll.insert({_id: 1});
- coll.insert({_id: 10});
- coll.insert({_id: 20});
- coll.insert({_id: 30});
+ coll.insert({_id: 1, s: bigString});
+ coll.insert({_id: 10, s: bigString});
+ coll.insert({_id: 20, s: bigString});
+ coll.insert({_id: 30, s: bigString});
assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 10}));
assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 20}));
diff --git a/jstests/sharding/move_chunk_permitMigrations.js b/jstests/sharding/move_chunk_permitMigrations.js
index f269eb7757c..7f61d87a8c8 100644
--- a/jstests/sharding/move_chunk_permitMigrations.js
+++ b/jstests/sharding/move_chunk_permitMigrations.js
@@ -10,20 +10,12 @@
(function() {
'use strict';
-load("jstests/libs/feature_flag_util.js");
load('jstests/libs/fail_point_util.js');
load('jstests/libs/parallel_shell_helpers.js');
load("jstests/sharding/libs/find_chunks_util.js");
load("jstests/sharding/libs/shard_versioning_util.js");
-const st = new ShardingTest({shards: 2});
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
+const st = new ShardingTest({shards: 2, other: {chunkSize: 1, enableAutoSplit: false}});
const configDB = st.s.getDB("config");
const dbName = 'AllowMigrations';
@@ -73,12 +65,14 @@ const testBalancer = function(setAllowMigrations, collBSetNoBalanceParam) {
assert.commandWorked(st.s.adminCommand({shardCollection: collA.getFullName(), key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({shardCollection: collB.getFullName(), key: {_id: 1}}));
+ const bigString = 'X'.repeat(1024 * 1024); // 1MB
+
// Split both collections into 4 chunks so balancing can occur.
for (let coll of [collA, collB]) {
- coll.insert({_id: 1});
- coll.insert({_id: 10});
- coll.insert({_id: 20});
- coll.insert({_id: 30});
+ coll.insert({_id: 1, s: bigString});
+ coll.insert({_id: 10, s: bigString});
+ coll.insert({_id: 20, s: bigString});
+ coll.insert({_id: 30, s: bigString});
assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 10}));
assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 20}));
diff --git a/jstests/sharding/zone_changes_hashed.js b/jstests/sharding/zone_changes_hashed.js
index 6bc975ea783..b39a5840431 100644
--- a/jstests/sharding/zone_changes_hashed.js
+++ b/jstests/sharding/zone_changes_hashed.js
@@ -48,14 +48,7 @@ function findHighestChunkBounds(chunkBounds) {
return highestBounds;
}
-let st = new ShardingTest({shards: 3});
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
+const st = new ShardingTest({shards: 3, other: {chunkSize: 1, enableAutoSplitter: false}});
let primaryShard = st.shard0;
let dbName = "test";
let testDB = st.s.getDB(dbName);
@@ -64,8 +57,8 @@ let coll = testDB.hashed;
let ns = coll.getFullName();
let shardKey = {x: "hashed"};
-assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, primaryShard.shardName);
+assert.commandWorked(
+ st.s.adminCommand({enableSharding: dbName, primaryShard: primaryShard.shardName}));
jsTest.log(
"Shard the collection. The command creates two chunks on each of the shards by default.");
@@ -74,7 +67,15 @@ let chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).sort({min: 1}).toArr
let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
jsTest.log("Insert docs (one for each chunk) and check that they end up on the right shards.");
-let docs = [{x: -25}, {x: -18}, {x: -5}, {x: -1}, {x: 5}, {x: 10}];
+const bigString = 'X'.repeat(1024 * 1024); // 1MB
+let docs = [
+ {x: -25, s: bigString},
+ {x: -18, s: bigString},
+ {x: -5, s: bigString},
+ {x: -1, s: bigString},
+ {x: 5, s: bigString},
+ {x: 10, s: bigString}
+];
assert.commandWorked(coll.insert(docs));
let docChunkBounds = [];
@@ -134,7 +135,10 @@ shardTags = {
};
assertShardTags(configDB, shardTags);
-let numChunksToMove = zoneChunkBounds["zoneB"].length / 2;
+const balanceAccordingToDataSize = FeatureFlagUtil.isEnabled(
+ st.configRS.getPrimary().getDB('admin'), "BalanceAccordingToDataSize");
+let numChunksToMove = balanceAccordingToDataSize ? zoneChunkBounds["zoneB"].length
+ : zoneChunkBounds["zoneB"].length / 2;
runBalancer(st, numChunksToMove);
shardChunkBounds = {
[st.shard0.shardName]: zoneChunkBounds["zoneB"].slice(0, numChunksToMove),
diff --git a/jstests/sharding/zone_changes_range.js b/jstests/sharding/zone_changes_range.js
index 751c5984568..78ec7c40dad 100644
--- a/jstests/sharding/zone_changes_range.js
+++ b/jstests/sharding/zone_changes_range.js
@@ -4,18 +4,10 @@
(function() {
'use strict';
-load("jstests/libs/feature_flag_util.js");
load("jstests/sharding/libs/zone_changes_util.js");
load("jstests/sharding/libs/find_chunks_util.js");
-let st = new ShardingTest({shards: 3});
-// TODO SERVER-66378 adapt this test for data size aware balancing
-if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'),
- "BalanceAccordingToDataSize")) {
- jsTestLog("Skipping as featureFlagBalanceAccordingToDataSize is enabled");
- st.stop();
- return;
-}
+const st = new ShardingTest({shards: 3, other: {chunkSize: 1, enableAutoSplitter: false}});
let primaryShard = st.shard0;
let dbName = "test";
let testDB = st.s.getDB(dbName);
@@ -24,8 +16,8 @@ let coll = testDB.range;
let ns = coll.getFullName();
let shardKey = {x: 1};
-assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, primaryShard.shardName);
+assert.commandWorked(
+ st.s.adminCommand({enableSharding: dbName, primaryShard: primaryShard.shardName}));
jsTest.log("Shard the collection and create chunks.");
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: shardKey}));
@@ -34,8 +26,15 @@ assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 0}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 10}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 20}}));
+const bigString = 'X'.repeat(1024 * 1024); // 1MB
jsTest.log("Insert docs (one for each chunk) and check that they end up on the primary shard.");
-let docs = [{x: -15}, {x: -5}, {x: 5}, {x: 15}, {x: 25}];
+let docs = [
+ {x: -15, s: bigString},
+ {x: -5, s: bigString},
+ {x: 5, s: bigString},
+ {x: 15, s: bigString},
+ {x: 25, s: bigString}
+];
assert.eq(docs.length, findChunksUtil.countChunksForNs(configDB, ns));
assert.commandWorked(coll.insert(docs));
assert.eq(docs.length, primaryShard.getCollection(ns).count());