summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheahuychou Mao <cheahuychou.mao@mongodb.com>2019-11-20 22:31:42 +0000
committerevergreen <evergreen@mongodb.com>2019-11-20 22:31:42 +0000
commit89860ea231fbb8a7e516da588b74a837345435bf (patch)
treed59dc086000dd74e76e63c0ae8bb78ba1953689a
parentd410d3733c768e4bbcc0e009cb4d48a7a653c7cf (diff)
downloadmongo-89860ea231fbb8a7e516da588b74a837345435bf.tar.gz
SERVER-44602 Test that chunks and documents are moved after zone changes
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml2
-rw-r--r--jstests/sharding/libs/chunk_bounds_util.js61
-rw-r--r--jstests/sharding/libs/zone_changes_util.js88
-rw-r--r--jstests/sharding/zone_changes_hashed.js207
-rw-r--r--jstests/sharding/zone_changes_range.js192
5 files changed, 547 insertions, 3 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
index 13576f1380e..c4bd89cb062 100644
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
@@ -68,6 +68,8 @@ selector:
- jstests/sharding/count_config_servers.js
- jstests/sharding/split_large_key.js
- jstests/sharding/balancer_window.js
+ - jstests/sharding/zone_changes_hashed.js
+ - jstests/sharding/zone_changes_range.js
# No retries on direct writes to the config/admin databases on the config servers
- jstests/sharding/database_versioning_safe_secondary_reads.js
- jstests/sharding/listDatabases.js
diff --git a/jstests/sharding/libs/chunk_bounds_util.js b/jstests/sharding/libs/chunk_bounds_util.js
index 968213530d8..40d11d00d0a 100644
--- a/jstests/sharding/libs/chunk_bounds_util.js
+++ b/jstests/sharding/libs/chunk_bounds_util.js
@@ -2,21 +2,64 @@
* Utilities for dealing with chunk bounds.
*/
var chunkBoundsUtil = (function() {
- let _gte = function(shardKeyA, shardKeyB) {
+ let eq = function(shardKeyA, shardKeyB) {
+ return bsonWoCompare(shardKeyA, shardKeyB) == 0;
+ };
+
+ let gte = function(shardKeyA, shardKeyB) {
return bsonWoCompare(shardKeyA, shardKeyB) >= 0;
};
- let _lt = function(shardKeyA, shardKeyB) {
+ let lt = function(shardKeyA, shardKeyB) {
return bsonWoCompare(shardKeyA, shardKeyB) < 0;
};
+ let max = function(shardKeyA, shardKeyB) {
+ return gte(shardKeyA, shardKeyB) ? shardKeyA : shardKeyB;
+ };
+
+ let min = function(shardKeyA, shardKeyB) {
+ return lt(shardKeyA, shardKeyB) ? shardKeyA : shardKeyB;
+ };
+
let containsKey = function(shardKey, minKey, maxKey) {
- return _gte(shardKey, minKey) && _lt(shardKey, maxKey);
+ return gte(shardKey, minKey) && lt(shardKey, maxKey);
+ };
+
+ let overlapsWith = function(chunkBoundsA, chunkBoundsB) {
+ return containsKey(chunkBoundsA[0], chunkBoundsB[0], chunkBoundsB[1]) ||
+ containsKey(chunkBoundsA[1], chunkBoundsB[0], chunkBoundsB[1]);
+ };
+
+ /*
+ * Combines chunk bounds chunkBoundsA and chunkBoundsB. Assumes that the bounds
+ * overlap.
+ */
+ let combine = function(chunkBoundsA, chunkBoundsB) {
+ let rangeMin = min(chunkBoundsA[0], chunkBoundsB[0]);
+ let rangeMax = max(chunkBoundsA[1], chunkBoundsB[1]);
+ return [rangeMin, rangeMax];
+ };
+
+ /*
+ * Computes the range that the given chunk bounds are in by combining the given chunk
+ * bounds into bounds for one chunk. Assumes the chunk bounds are contiguous and in
+ * nondescending order.
+ */
+ let computeRange = function(allChunkBounds) {
+ let combinedBounds = allChunkBounds[0];
+ for (let i = 1; i < allChunkBounds.length; i++) {
+ assert(overlapsWith(combinedBounds, allChunkBounds[i]));
+ combinedBounds = combine(combinedBounds, allChunkBounds[i]);
+ }
+ return combinedBounds;
};
/*
* Returns a object mapping each shard name to an array of chunk bounds
* that it owns.
+ *
+ * @param chunkDocs {Array} an array of chunk documents in the config database.
*/
let findShardChunkBounds = function(chunkDocs) {
let allBounds = {};
@@ -46,6 +89,10 @@ var chunkBoundsUtil = (function() {
/*
* Returns the shard object for the shard that owns the chunk that contains
* the given shard key value and the bounds of the chunk.
+ *
+ * @param shardChunkBounds {Object} a map from each shard name to an array of the bounds
+ * for all the chunks on the shard. Each pair of chunk
+ * bounds is an array of the form [minKey, maxKey].
*/
let findShardAndChunkBoundsForShardKey = function(st, shardChunkBounds, shardKey) {
for (const [shardName, chunkBounds] of Object.entries(shardChunkBounds)) {
@@ -60,12 +107,20 @@ var chunkBoundsUtil = (function() {
/*
* Returns the shard object for the shard that owns the chunk that contains
* the given shard key value.
+ *
+ * @param shardChunkBounds {Object} a map from each shard name to an array of the bounds
+ * for all the chunks on the shard. Each pair of chunk
+ * bounds is an array of the form [minKey, maxKey].
*/
let findShardForShardKey = function(st, shardChunkBounds, shardKey) {
return findShardAndChunkBoundsForShardKey(st, shardChunkBounds, shardKey).shard;
};
return {
+ eq,
+ gte,
+ lt,
+ computeRange,
containsKey,
findShardChunkBounds,
findShardAndChunkBoundsForShardKey,
diff --git a/jstests/sharding/libs/zone_changes_util.js b/jstests/sharding/libs/zone_changes_util.js
new file mode 100644
index 00000000000..f45a5e28ef7
--- /dev/null
+++ b/jstests/sharding/libs/zone_changes_util.js
@@ -0,0 +1,88 @@
+load("jstests/sharding/libs/chunk_bounds_util.js");
+
+/**
+ * Asserts that the given shards have the given chunks.
+ *
+ * @param shardChunkBounds {Object} a map from each shard name to an array of the bounds for all
+ * the chunks on the shard. Each pair of chunk bounds is an array
+ * of the form [minKey, maxKey].
+ */
+function assertChunksOnShards(configDB, ns, shardChunkBounds) {
+ for (let [shardName, chunkBounds] of Object.entries(shardChunkBounds)) {
+ for (let bounds of chunkBounds) {
+ assert.eq(
+ shardName,
+ configDB.chunks.findOne({ns: ns, min: bounds[0], max: bounds[1]}).shard,
+ "expected to find chunk " + tojson(bounds) + " on shard \"" + shardName + "\"");
+ }
+ }
+}
+
+/**
+ * Asserts that the docs are on the shards that own their corresponding chunks.
+ *
+ * @param shardChunkBounds {Object} a map from each shard name to an array of the bounds for all
+ * the chunks on the shard. Each pair of chunk bounds is an array
+ * of the form [minKey, maxKey].
+ * @param shardKey {Object} a map from each shard key field to 1 if the collection uses
+ * range based sharding and "hashed" if the collection uses
+ * hashed sharding. (i.e. equivalent to the value passed for the
+ * "key" field for the shardCollection command).
+ */
+function assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey) {
+ for (let doc of docs) {
+ let docShardKey = {};
+ for (const [k, v] of Object.entries(shardKey)) {
+ docShardKey[k] = (v == "hashed") ? convertShardKeyToHashed(doc[k]) : doc[k];
+ }
+ let shard = chunkBoundsUtil.findShardForShardKey(st, shardChunkBounds, docShardKey);
+ assert.eq(1,
+ shard.getCollection(ns).count(doc),
+ "expected to find doc " + tojson(doc) + " on shard \"" + shard.shardName + "\"");
+ }
+}
+
+/**
+ * Asserts that the given shards have the given tags.
+ *
+ * @param shardTags {Object} a map from each shard name to an array of strings representing the zone
+ * names that the shard owns.
+ */
+function assertShardTags(configDB, shardTags) {
+ for (let [shardName, tags] of Object.entries(shardTags)) {
+ assert.eq(tags.sort(),
+ configDB.shards.findOne({_id: shardName}).tags.sort(),
+ "expected shard \"" + shardName + "\" to have tags " + tojson(tags.sort()));
+ }
+}
+
+/**
+ * Adds toShard to zone and removes fromShard from zone.
+ */
+function moveZoneToShard(st, zoneName, fromShard, toShard) {
+ assert.commandWorked(st.s.adminCommand({addShardToZone: toShard.shardName, zone: zoneName}));
+ assert.commandWorked(
+ st.s.adminCommand({removeShardFromZone: fromShard.shardName, zone: zoneName}));
+}
+
+/**
+ * Starts the balancer, lets it run for the given number of rounds, then stops the
+ * balancer.
+ */
+function runBalancer(st, numRounds) {
+ st.startBalancer();
+ for (let i = 0; i < numRounds; i++) {
+ st.awaitBalancerRound();
+ }
+ st.stopBalancer();
+}
+
+/**
+ * Updates the zone key range for the given namespace.
+ */
+function updateZoneKeyRange(st, ns, zoneName, fromRange, toRange) {
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: fromRange[0], max: fromRange[1], zone: null}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: toRange[0], max: toRange[1], zone: zoneName}));
+}
diff --git a/jstests/sharding/zone_changes_hashed.js b/jstests/sharding/zone_changes_hashed.js
new file mode 100644
index 00000000000..c2605690b5a
--- /dev/null
+++ b/jstests/sharding/zone_changes_hashed.js
@@ -0,0 +1,207 @@
+/**
+ * Test that chunks and documents are moved correctly after zone changes.
+ */
+(function() {
+'use strict';
+
+load("jstests/sharding/libs/zone_changes_util.js");
+
+/**
+ * Adds each shard to the corresponding zone in zoneTags, and makes the zone range equal
+ * to the chunk range of the shard. Assumes that there are no chunk holes on each shard.
+ */
+function addShardsToZonesAndAssignZoneRanges(st, ns, shardChunkBounds, shardTags) {
+ let zoneChunks = {};
+ for (let [shardName, chunkBounds] of Object.entries(shardChunkBounds)) {
+ let zoneName = shardTags[shardName][0];
+ let rangeMin = {x: MaxKey};
+ let rangeMax = {x: MinKey};
+ for (let bounds of chunkBounds) {
+ if (chunkBoundsUtil.lt(bounds[0], rangeMin)) {
+ rangeMin = bounds[0];
+ }
+ if (chunkBoundsUtil.gte(bounds[1], rangeMax)) {
+ rangeMax = bounds[1];
+ }
+ }
+ zoneChunks[zoneName] = chunkBounds;
+ assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zoneName}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: rangeMin, max: rangeMax, zone: zoneName}));
+ }
+ return zoneChunks;
+}
+
+/**
+ * Returns the highest chunk bounds out of the given chunk bounds. Assumes that the
+ * chunks do not overlap.
+ */
+function findHighestChunkBounds(chunkBounds) {
+ let highestBounds = chunkBounds[0];
+ for (let i = 1; i < chunkBounds.length; i++) {
+ if (chunkBoundsUtil.lt(highestBounds, chunkBounds[i])) {
+ highestBounds = chunkBounds[i];
+ }
+ }
+ return highestBounds;
+}
+
+let st = new ShardingTest({shards: 3});
+let primaryShard = st.shard0;
+let dbName = "test";
+let testDB = st.s.getDB(dbName);
+let configDB = st.s.getDB("config");
+let coll = testDB.hashed;
+let ns = coll.getFullName();
+let shardKey = {x: "hashed"};
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, primaryShard.shardName);
+
+jsTest.log(
+ "Shard the collection. The command creates two chunks on each of the shards by default.");
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: shardKey}));
+let chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
+
+jsTest.log("Insert docs (one for each chunk) and check that they end up on the right shards.");
+let docs = [{x: -25}, {x: -18}, {x: -5}, {x: -1}, {x: 5}, {x: 10}];
+assert.commandWorked(coll.insert(docs));
+
+let docChunkBounds = [];
+let minHash = MaxKey;
+docs.forEach(function(doc) {
+ let hash = convertShardKeyToHashed(doc.x);
+ let {shard, bounds} =
+ chunkBoundsUtil.findShardAndChunkBoundsForShardKey(st, shardChunkBounds, {x: hash});
+ assert.eq(1, shard.getCollection(ns).count(doc));
+ docChunkBounds.push(bounds);
+ if (bsonWoCompare(hash, minHash) < 0) {
+ minHash = hash;
+ }
+});
+assert.eq(docs.length, (new Set(docChunkBounds)).size);
+assert.eq(docs.length, configDB.chunks.count({ns: ns}));
+
+jsTest.log(
+ "Assign each shard a zone, make each zone range equal to the chunk range for the shard, " +
+ "and store the chunks for each zone.");
+let shardTags = {
+ [st.shard0.shardName]: ["zoneA"],
+ [st.shard1.shardName]: ["zoneB"],
+ [st.shard2.shardName]: ["zoneC"]
+};
+let zoneChunkBounds = addShardsToZonesAndAssignZoneRanges(st, ns, shardChunkBounds, shardTags);
+assertShardTags(configDB, shardTags);
+
+jsTest.log("Test shard's zone changes...");
+
+jsTest.log(
+ "Check that removing a zone from a shard causes its chunks and documents to move to other" +
+ " shards that the zone belongs to.");
+moveZoneToShard(st, "zoneA", st.shard0, st.shard1);
+shardTags = {
+ [st.shard0.shardName]: [],
+ [st.shard1.shardName]: ["zoneB", "zoneA"],
+ [st.shard2.shardName]: ["zoneC"]
+};
+assertShardTags(configDB, shardTags);
+
+runBalancer(st, zoneChunkBounds["zoneA"].length);
+shardChunkBounds = {
+ [st.shard0.shardName]: [],
+ [st.shard1.shardName]: [...zoneChunkBounds["zoneB"], ...zoneChunkBounds["zoneA"]],
+ [st.shard2.shardName]: zoneChunkBounds["zoneC"]
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+jsTest.log("Check that the balancer balances chunks within zones.");
+assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "zoneB"}));
+shardTags = {
+ [st.shard0.shardName]: ["zoneB"],
+ [st.shard1.shardName]: ["zoneB", "zoneA"],
+ [st.shard2.shardName]: ["zoneC"]
+};
+assertShardTags(configDB, shardTags);
+
+let numChunksToMove = zoneChunkBounds["zoneB"].length / 2;
+runBalancer(st, numChunksToMove);
+shardChunkBounds = {
+ [st.shard0.shardName]: zoneChunkBounds["zoneB"].slice(0, numChunksToMove),
+ [st.shard1.shardName]: [
+ ...zoneChunkBounds["zoneA"],
+ ...zoneChunkBounds["zoneB"].slice(numChunksToMove, zoneChunkBounds["zoneB"].length)
+ ],
+ [st.shard2.shardName]: zoneChunkBounds["zoneC"]
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+jsTest.log("Make another zone change, and check that the chunks and docs are on the right shards.");
+assert.commandWorked(st.s.adminCommand({removeShardFromZone: st.shard0.shardName, zone: "zoneB"}));
+moveZoneToShard(st, "zoneC", st.shard2, st.shard0);
+moveZoneToShard(st, "zoneA", st.shard1, st.shard2);
+shardTags = {
+ [st.shard0.shardName]: ["zoneC"],
+ [st.shard1.shardName]: ["zoneB"],
+ [st.shard2.shardName]: ["zoneA"]
+};
+assertShardTags(configDB, shardTags);
+
+runBalancer(st,
+ numChunksToMove + zoneChunkBounds["zoneA"].length + zoneChunkBounds["zoneC"].length);
+shardChunkBounds = {
+ [st.shard0.shardName]: zoneChunkBounds["zoneC"],
+ [st.shard1.shardName]: zoneChunkBounds["zoneB"],
+ [st.shard2.shardName]: zoneChunkBounds["zoneA"]
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+jsTest.log("Test chunk's zone changes...");
+
+// Find the chunk with the highest bounds in zoneA.
+let originalZoneARange = chunkBoundsUtil.computeRange(zoneChunkBounds["zoneA"]);
+let chunkToMove = findHighestChunkBounds(zoneChunkBounds["zoneA"]);
+assert(chunkBoundsUtil.containsKey(chunkToMove[0], ...originalZoneARange));
+assert(chunkBoundsUtil.eq(chunkToMove[1], originalZoneARange[1]));
+
+jsTest.log("Make the chunk originally in zoneA belong to zoneB.");
+assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: originalZoneARange[0], max: originalZoneARange[1], zone: null}));
+assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: originalZoneARange[0], max: chunkToMove[0], zone: "zoneA"}));
+assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: chunkToMove[0], max: originalZoneARange[1], zone: "zoneB"}));
+
+jsTest.log("Check that the chunk moves from zoneA to zoneB after the zone range change");
+runBalancer(st, 1);
+shardChunkBounds = {
+ [st.shard0.shardName]: zoneChunkBounds["zoneC"],
+ [st.shard1.shardName]: [chunkToMove, ...zoneChunkBounds["zoneB"]],
+ [st.shard2.shardName]: zoneChunkBounds["zoneA"].filter(
+ (chunkBounds) => !chunkBoundsUtil.eq(chunkToMove, chunkBounds))
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+jsTest.log("Make the chunk originally in zoneB belong to zoneC.");
+assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: chunkToMove[0], max: chunkToMove[1], zone: null}));
+assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: chunkToMove[0], max: chunkToMove[1], zone: "zoneC"}));
+
+jsTest.log("Check that the chunk moves from zoneB to zoneC after the zone range change");
+runBalancer(st, 1);
+shardChunkBounds = {
+ [st.shard0.shardName]: [chunkToMove, ...zoneChunkBounds["zoneC"]],
+ [st.shard1.shardName]: zoneChunkBounds["zoneB"],
+ [st.shard2.shardName]: zoneChunkBounds["zoneA"].filter(
+ (chunkBounds) => !chunkBoundsUtil.eq(chunkToMove, chunkBounds))
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+st.stop();
+})();
diff --git a/jstests/sharding/zone_changes_range.js b/jstests/sharding/zone_changes_range.js
new file mode 100644
index 00000000000..49710624f47
--- /dev/null
+++ b/jstests/sharding/zone_changes_range.js
@@ -0,0 +1,192 @@
+/**
+ * Test that chunks and documents are moved correctly after zone changes.
+ */
+(function() {
+'use strict';
+
+load("jstests/sharding/libs/zone_changes_util.js");
+
+let st = new ShardingTest({shards: 3});
+let primaryShard = st.shard0;
+let dbName = "test";
+let testDB = st.s.getDB(dbName);
+let configDB = st.s.getDB("config");
+let coll = testDB.range;
+let ns = coll.getFullName();
+let shardKey = {x: 1};
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, primaryShard.shardName);
+
+jsTest.log("Shard the collection and create chunks.");
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: shardKey}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: -10}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 0}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 10}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 20}}));
+
+jsTest.log("Insert docs (one for each chunk) and check that they end up on the primary shard.");
+let docs = [{x: -15}, {x: -5}, {x: 5}, {x: 15}, {x: 25}];
+assert.eq(docs.length, configDB.chunks.count({ns: ns}));
+assert.commandWorked(coll.insert(docs));
+assert.eq(docs.length, primaryShard.getCollection(ns).count());
+
+jsTest.log("Add shards to zones and assign zone key ranges.");
+// The chunks on each zone are:
+// zoneA: [MinKey, -10)
+// zoneB: [-10, 0), [0, 10)
+// zoneC: [10, 20), [20, MaxKey)
+assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "zoneA"}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: "zoneB"}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard2.shardName, zone: "zoneC"}));
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {x: MinKey}, max: {x: -10}, zone: "zoneA"}));
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {x: -10}, max: {x: 10}, zone: "zoneB"}));
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {x: 10}, max: {x: MaxKey}, zone: "zoneC"}));
+
+jsTest.log("Check that the shards have the assigned zones.");
+let shardTags = {
+ [st.shard0.shardName]: ["zoneA"],
+ [st.shard1.shardName]: ["zoneB"],
+ [st.shard2.shardName]: ["zoneC"]
+};
+assertShardTags(configDB, shardTags);
+
+jsTest.log("Check that the balancer does not balance if \"noBalance\" is true.");
+assert.commandWorked(
+ configDB.collections.update({_id: ns}, {$set: {"noBalance": true}}, {upsert: true}));
+runBalancer(st, 4);
+let shardChunkBounds = {
+ [primaryShard.shardName]: [
+ [{x: MinKey}, {x: -10}],
+ [{x: -10}, {x: 0}],
+ [{x: 0}, {x: 10}],
+ [{x: 10}, {x: 20}],
+ [{x: 20}, {x: MaxKey}]
+ ]
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assert.eq(docs.length, primaryShard.getCollection(ns).count());
+
+jsTest.log(
+ "Let the balancer do the balancing, and check that the chunks and the docs are on the right shards.");
+assert.commandWorked(
+ configDB.collections.update({_id: ns}, {$set: {"noBalance": false}}, {upsert: true}));
+runBalancer(st, 4);
+shardChunkBounds = {
+ [st.shard0.shardName]: [[{x: MinKey}, {x: -10}]],
+ [st.shard1.shardName]: [[{x: -10}, {x: 0}], [{x: 0}, {x: 10}]],
+ [st.shard2.shardName]: [[{x: 10}, {x: 20}], [{x: 20}, {x: MaxKey}]]
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+jsTest.log("Test shard's zone changes...");
+
+jsTest.log("Check that removing the only shard that a zone belongs to is not allowed.");
+assert.commandFailedWithCode(
+ st.s.adminCommand({removeShardFromZone: st.shard0.shardName, zone: "zoneA"}),
+ ErrorCodes.ZoneStillInUse);
+
+jsTest.log(
+ "Check that removing a zone from a shard causes its chunks and documents to move to other" +
+ " shards that the zone belongs to.");
+moveZoneToShard(st, "zoneA", st.shard0, st.shard1);
+shardTags = {
+ [st.shard0.shardName]: [],
+ [st.shard1.shardName]: ["zoneB", "zoneA"],
+ [st.shard2.shardName]: ["zoneC"]
+};
+assertShardTags(configDB, shardTags);
+
+runBalancer(st, 1);
+shardChunkBounds = {
+ [st.shard0.shardName]: [],
+ [st.shard1.shardName]: [[{x: MinKey}, {x: -10}], [{x: -10}, {x: 0}], [{x: 0}, {x: 10}]],
+ [st.shard2.shardName]: [[{x: 10}, {x: 20}], [{x: 20}, {x: MaxKey}]]
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+jsTest.log("Check that the balancer balances chunks within zones.");
+assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "zoneB"}));
+shardTags = {
+ [st.shard0.shardName]: ["zoneB"],
+ [st.shard1.shardName]: ["zoneB", "zoneA"],
+ [st.shard2.shardName]: ["zoneC"]
+};
+assertShardTags(configDB, shardTags);
+
+runBalancer(st, 1);
+shardChunkBounds = {
+ [st.shard0.shardName]: [[{x: -10}, {x: 0}]],
+ [st.shard1.shardName]: [[{x: MinKey}, {x: -10}], [{x: 0}, {x: 10}]],
+ [st.shard2.shardName]: [[{x: 10}, {x: 20}], [{x: 20}, {x: MaxKey}]]
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+jsTest.log("Make another zone change, and check that the chunks and docs are on the right shards.");
+assert.commandWorked(st.s.adminCommand({removeShardFromZone: st.shard0.shardName, zone: "zoneB"}));
+moveZoneToShard(st, "zoneC", st.shard2, st.shard0);
+moveZoneToShard(st, "zoneA", st.shard1, st.shard2);
+shardTags = {
+ [st.shard0.shardName]: ["zoneC"],
+ [st.shard1.shardName]: ["zoneB"],
+ [st.shard2.shardName]: ["zoneA"]
+};
+assertShardTags(configDB, shardTags);
+
+runBalancer(st, 4);
+shardChunkBounds = {
+ [st.shard0.shardName]: [[{x: 10}, {x: 20}], [{x: 20}, {x: MaxKey}]],
+ [st.shard1.shardName]: [[{x: -10}, {x: 0}], [{x: 0}, {x: 10}]],
+ [st.shard2.shardName]: [[{x: MinKey}, {x: -10}]]
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+jsTest.log("Test chunk's zone changes...");
+
+// Make a series of zone range changes to make zoneA (and later also zoneB) own only
+// chunks that contains no docs. Each time the balancer is expected to split the
+// affected chunks and move the chunks and docs that no longer belong to the updated
+// zone to the shards with zone that the chunks belong to.
+
+jsTest.log("Assign the key range in zoneA that contains chunks to zoneB, and check that the " +
+ "chunks and docs are on the right shards.");
+updateZoneKeyRange(st, ns, "zoneA", [{x: MinKey}, {x: -10}], [{x: MinKey}, {x: -20}]);
+updateZoneKeyRange(st, ns, "zoneB", [{x: -10}, {x: 10}], [{x: -20}, {x: 10}]);
+runBalancer(st, 1);
+shardChunkBounds = {
+ [st.shard0.shardName]: [[{x: 10}, {x: 20}], [{x: 20}, {x: MaxKey}]],
+ [st.shard1.shardName]: [[{x: -20}, {x: -10}], [{x: -10}, {x: 0}], [{x: 0}, {x: 10}]],
+ [st.shard2.shardName]: [[{x: MinKey}, {x: -20}]] // no docs
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+
+jsTest.log("Assign the key range in zoneB that contains chunks to zoneC, and check that the " +
+ "chunks and docs are on the right shards.");
+updateZoneKeyRange(st, ns, "zoneB", [{x: -20}, {x: 10}], [{x: -20}, {x: -15}]);
+updateZoneKeyRange(st, ns, "zoneC", [{x: 10}, {x: MaxKey}], [{x: -15}, {x: MaxKey}]);
+runBalancer(st, 3);
+shardChunkBounds = {
+ [st.shard0.shardName]: [
+ [{x: -15}, {x: -10}],
+ [{x: -10}, {x: 0}],
+ [{x: 0}, {x: 10}],
+ [{x: 10}, {x: 20}],
+ [{x: 20}, {x: MaxKey}]
+ ],
+ [st.shard1.shardName]: [[{x: -20}, {x: -15}]], // no docs
+ [st.shard2.shardName]: [[{x: MinKey}, {x: -20}]] // no docs
+};
+assertChunksOnShards(configDB, ns, shardChunkBounds);
+assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
+assert.eq(docs.length, st.shard0.getCollection(ns).count());
+
+st.stop();
+})();