summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJordi Serra Torrens <jordi.serra-torrens@mongodb.com>2023-03-02 09:38:13 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-03-02 10:57:39 +0000
commit37b6c9f3fbeef686273563a30f6a049c253358ea (patch)
tree62c0f39e134e885bd7e7de6dfad064c08be36913
parent20e995074cf7420ced7e46636ec0a78e2fd0c19f (diff)
downloadmongo-37b6c9f3fbeef686273563a30f6a049c253358ea.tar.gz
SERVER-72650 Make shardCollection with hashed key randomly distribute chunks across the cluster
-rw-r--r--jstests/sharding/compound_hashed_shard_key_presplitting.js82
-rw-r--r--jstests/sharding/merge_chunk_hashed.js25
-rw-r--r--jstests/sharding/migration_fails_if_exists_in_rangedeletions.js12
-rw-r--r--jstests/sharding/shard_collection_existing_zones.js24
-rw-r--r--jstests/sharding/transactions_reject_writes_for_moved_chunks.js38
-rw-r--r--jstests/sharding/update_replace_id.js18
-rw-r--r--jstests/sharding/zone_changes_hashed.js4
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp20
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp575
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp7
10 files changed, 545 insertions, 260 deletions
diff --git a/jstests/sharding/compound_hashed_shard_key_presplitting.js b/jstests/sharding/compound_hashed_shard_key_presplitting.js
index d288eba14b0..45530ce3e32 100644
--- a/jstests/sharding/compound_hashed_shard_key_presplitting.js
+++ b/jstests/sharding/compound_hashed_shard_key_presplitting.js
@@ -48,7 +48,7 @@ st.ensurePrimaryShard('test', st.shard1.shardName);
* Helper function to validate that the chunks ranges have all the shard key fields and each shard
* has expected number of chunks.
*/
-function checkValidChunks(coll, shardKey, expectedChunks) {
+function checkValidChunks(coll, shardKey, checkChunksPerShardFn) {
const chunks = findChunksUtil.findChunksByNs(st.config, coll.getFullName()).toArray();
let shardCountsMap =
{[st.shard0.shardName]: 0, [st.shard1.shardName]: 0, [st.shard2.shardName]: 0};
@@ -64,12 +64,8 @@ function checkValidChunks(coll, shardKey, expectedChunks) {
assertHasAllShardKeyFields(chunk.min);
assertHasAllShardKeyFields(chunk.max);
}
- let index = 0;
- for (let shardName in shardCountsMap) {
- assert.eq(expectedChunks[index++],
- shardCountsMap[shardName],
- "Expected chunks did not match for " + shardName + ". " + tojson(chunks));
- }
+
+ checkChunksPerShardFn(shardCountsMap);
}
//
@@ -83,25 +79,47 @@ assert.commandWorked(db.hashedCollEmpty.createIndex(shardKey));
let coll = db.hashedCollEmpty;
assert.commandWorked(
mongos.adminCommand({shardCollection: coll.getFullName(), key: shardKey, numInitialChunks: 6}));
-checkValidChunks(coll, shardKey, [2, 2, 2]);
+checkValidChunks(coll, shardKey, (shardCountsMap) => {
+ // Each shard has 2 chunks.
+ Object.values(shardCountsMap).every((count) => count === 2);
+});
// Supported: Hashed sharding + numInitialChunks + non-existent collection.
-// Expected: Even chunk distribution and the remainder chunks on the first shard.
+// Expected: Even chunk distribution and the remainder chunks on the any shard.
coll = db.hashedCollNonExistent;
assert.commandWorked(
mongos.adminCommand({shardCollection: coll.getFullName(), key: shardKey, numInitialChunks: 8}));
-checkValidChunks(coll, shardKey, [4, 2, 2]);
+checkValidChunks(coll, shardKey, (shardCountsMap) => {
+ const totalChunks = Object.values(shardCountsMap).reduce((accumulator, v) => accumulator + v);
+ assert.eq(8, totalChunks, "Unexpected total amount of chunks");
+
+ Object.values(shardCountsMap).every((count) => count >= 2);
+});
// When 'numInitialChunks' is one, primary shard should have the chunk.
coll = db.hashedNumInitialChunksOne;
assert.commandWorked(
mongos.adminCommand({shardCollection: coll.getFullName(), key: shardKey, numInitialChunks: 1}));
-checkValidChunks(coll, shardKey, [0, 1, 0]);
+checkValidChunks(coll, shardKey, (shardCountsMap) => {
+ // Just one chunk, on any shard.
+ const totalChunks = Object.values(shardCountsMap).reduce((accumulator, v) => accumulator + v);
+ assert.eq(1, totalChunks, "Unexpected total amount of chunks");
+});
// Default pre-splitting assigns two chunks per shard.
coll = db.hashedDefaultPreSplit;
assert.commandWorked(mongos.adminCommand({shardCollection: coll.getFullName(), key: shardKey}));
-checkValidChunks(coll, shardKey, [2, 2, 2]);
+checkValidChunks(coll, shardKey, (shardCountsMap) => {
+ assert.gte(shardCountsMap[st.shard0.shardName],
+ 2,
+ "Unexpected amount of chunks on " + st.shard0.shardName);
+ assert.gte(shardCountsMap[st.shard1.shardName],
+ 2,
+ "Unexpected amount of chunks on " + st.shard1.shardName);
+ assert.gte(shardCountsMap[st.shard2.shardName],
+ 2,
+ "Unexpected amount of chunks on " + st.shard2.shardName);
+});
db.hashedPrefixColl.drop();
@@ -138,8 +156,12 @@ assert.commandFailedWithCode(db.adminCommand({
// Creates chunks based on the zones.
assert.commandWorked(db.adminCommand(
{shardcollection: db.hashedPrefixColl.getFullName(), key: shardKey, numInitialChunks: 2}));
-checkValidChunks(
- db.hashedPrefixColl, shardKey, [1 /* Boundary chunk */, 0, 1 /* zone present in shard2 */]);
+checkValidChunks(db.hashedPrefixColl, shardKey, (shardCountsMap) => {
+ // Two chunks in total. One of them on shard2 (zoned) and the other one on any shard.
+ const totalChunks = Object.values(shardCountsMap).reduce((accumulator, v) => accumulator + v);
+ assert.eq(2, totalChunks, "Unexpected total amount of chunks");
+ assert.gte(shardCountsMap[st.shard2.shardName], 1, "Unexpected amount of chunks on shard2");
+});
// Verify that 'shardCollection' command will pre-split chunks if a single zone is set up ranging
// from MinKey to MaxKey and 'presplitHashedZones' flag is set.
@@ -161,7 +183,11 @@ assert.commandWorked(db.adminCommand({
}));
// By default, we create two chunks per shard for each shard that contains at least one zone.
-checkValidChunks(db.hashedPrefixColl, shardKey, [0, 2, 2]);
+checkValidChunks(db.hashedPrefixColl, shardKey, (shardCountsMap) => {
+ assert.eq(0, shardCountsMap[st.shard0.shardName], "Unexpected amount of chunks on shard0");
+ assert.eq(2, shardCountsMap[st.shard1.shardName], "Unexpected amount of chunks on shard1");
+ assert.eq(2, shardCountsMap[st.shard2.shardName], "Unexpected amount of chunks on shard2");
+});
// Verify that 'shardCollection' command will pre-split chunks equally among all the eligible
// shards.
@@ -185,7 +211,9 @@ assert.commandWorked(db.adminCommand({
presplitHashedZones: true,
numInitialChunks: 100
}));
-checkValidChunks(db.hashedPrefixColl, shardKey, [34, 34, 34]);
+checkValidChunks(db.hashedPrefixColl, shardKey, (shardCountsMap) => {
+ Object.values(shardCountsMap).every((count) => count === 34);
+});
//
// Test cases for compound hashed shard keys with non-hashed prefix.
@@ -323,7 +351,9 @@ assert.commandWorked(db.adminCommand({
// shards have 2 tags. So we create ceil(167/4) = 42 per tag on shard1 = 168, while we create
// ceil(167/2) = 84 per tag on others. In addition, we create 5 chunks for boundaries which will be
// distributed among the three shards using round robin.
-checkValidChunks(db.coll, shardKey, [170, 170, 169]);
+checkValidChunks(db.coll, shardKey, (shardCountsMap) => {
+ Object.values(shardCountsMap).every((count) => count === 169);
+});
// When 'numInitialChunks = 1'.
db.coll.drop();
@@ -337,7 +367,14 @@ assert.commandWorked(db.adminCommand({
// The chunk distribution from zones should be [2, 2+2 (two zones), 2]. The 5 gap chunks should be
// distributed among three shards.
-checkValidChunks(db.coll, shardKey, [4, 6, 3]);
+checkValidChunks(db.coll, shardKey, (shardCountsMap) => {
+ const totalChunks = Object.values(shardCountsMap).reduce((accumulator, v) => accumulator + v);
+ assert.eq(13, totalChunks, "Unexpected total amount of chunks");
+
+ assert.gte(shardCountsMap[st.shard0.shardName], 3, "Unexpected amount of chunks on shard0");
+ assert.gte(shardCountsMap[st.shard1.shardName], 5, "Unexpected amount of chunks on shard1");
+ assert.gte(shardCountsMap[st.shard2.shardName], 3, "Unexpected amount of chunks on shard2");
+});
// Verify that 'presplitHashedZones' uses default value of two per shard when 'numInitialChunks' is
// not passed.
@@ -348,7 +385,14 @@ assert.commandWorked(db.adminCommand(
// Since only Shard0 has chunks, we create on chunk per tag on shard0. The 5 gap chunks should be
// distributed among three shards.
-checkValidChunks(db.coll, shardKey, [6, 2, 1]);
+checkValidChunks(db.coll, shardKey, (shardCountsMap) => {
+ const totalChunks = Object.values(shardCountsMap).reduce((accumulator, v) => accumulator + v);
+ assert.eq(9, totalChunks, "Unexpected total amount of chunks");
+
+ assert.gte(shardCountsMap[st.shard0.shardName], 5, "Unexpected amount of chunks on shard0");
+ assert.gte(shardCountsMap[st.shard1.shardName], 1, "Unexpected amount of chunks on shard1");
+ assert.gte(shardCountsMap[st.shard2.shardName], 1, "Unexpected amount of chunks on shard2");
+});
st.stop();
})();
diff --git a/jstests/sharding/merge_chunk_hashed.js b/jstests/sharding/merge_chunk_hashed.js
index 52dace078fe..ca0dcbd2bc0 100644
--- a/jstests/sharding/merge_chunk_hashed.js
+++ b/jstests/sharding/merge_chunk_hashed.js
@@ -24,11 +24,36 @@ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(admin.runCommand({shardCollection: ns, key: {x: 'hashed'}}));
+// Setup predictable chunk distribution:
// Default chunks:
// shard0: MIN -> -4611686018427387902,
// -4611686018427387902 -> 0
// shard1: 0 -> 4611686018427387902
// 4611686018427387902 -> MAX
+assert.commandWorked(admin.runCommand({
+ moveChunk: ns,
+ bounds: [{x: MinKey}, {x: NumberLong("-4611686018427387902")}],
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: ns,
+ bounds: [{x: NumberLong("-4611686018427387902")}, {x: 0}],
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: ns,
+ bounds: [{x: 0}, {x: NumberLong("4611686018427387902")}],
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: ns,
+ bounds: [{x: NumberLong("4611686018427387902")}, {x: MaxKey}],
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
// Get the chunk -4611686018427387902 -> 0 on shard0.
let chunkToSplit = findChunksUtil.findOneChunkByNs(
diff --git a/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js b/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js
index bd1a68450b3..f2af78f5d8f 100644
--- a/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js
+++ b/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js
@@ -62,16 +62,20 @@ st.shard1.rs.getPrimary().adminCommand(
assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 'hashed'}}));
+ // Make sure the chunk is on shard0.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {x: 50}, to: st.shard0.shardName, _waitForDelete: true}));
+
// Pause range deletion on shard0.
let suspendRangeDeletionFailpoint = configureFailPoint(st.shard0, "suspendRangeDeletion");
- // Move the only chunk from shard0 to shard1. This will leave orphans on shard0 since we paused
- // range deletion.
+ // Move the chunk from shard0 to shard1. This will leave orphans on shard0 since we paused range
+ // deletion.
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: {x: 50}, to: st.shard1.shardName}));
- // Move the only chunk back to shard0 and expect timeout failure, since range deletion was
- // paused and there are orphans on shard0.
+ // Move chunk back to shard0 and expect timeout failure, since range deletion was paused and
+ // there are orphans on shard0.
assert.commandFailedWithCode(
st.s.adminCommand({moveChunk: ns, find: {x: 50}, to: st.shard0.shardName, maxTimeMS: 5000}),
ErrorCodes.MaxTimeMSExpired);
diff --git a/jstests/sharding/shard_collection_existing_zones.js b/jstests/sharding/shard_collection_existing_zones.js
index b0c5b2a31a1..666df4f5a50 100644
--- a/jstests/sharding/shard_collection_existing_zones.js
+++ b/jstests/sharding/shard_collection_existing_zones.js
@@ -82,8 +82,8 @@ function testChunkSplits(collectionExists) {
// create zones:
// shard0 - zonename0 - [0, 10)
- // shard1 - zonename0 - [10, 20)
- // shard2 - zonename0 - [30, 40)
+ // shard1 - zonename1 - [10, 20)
+ // shard2 - zonename2 - [30, 40)
for (var i = 0; i < shards.length; i++) {
assert.commandWorked(
st.s.adminCommand({addShardToZone: shards[i]._id, zone: zoneName + i}));
@@ -98,13 +98,16 @@ function testChunkSplits(collectionExists) {
// shard the collection and validate the resulting chunks
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
+ // Expected:
+ // - zoned chunks on the corresponding shard.
+ // - 2 chunks on each shard
var expectedChunks = [
- {range: [{x: {"$minKey": 1}}, {x: 0}], shardId: st.shard0.shardName},
- {range: [{x: 0}, {x: 10}], shardId: st.shard0.shardName}, // pre-defined
- {range: [{x: 10}, {x: 20}], shardId: st.shard1.shardName},
- {range: [{x: 20}, {x: 30}], shardId: st.shard1.shardName}, // pre-defined
+ {range: [{x: {"$minKey": 1}}, {x: 0}], shardId: null}, // any shard
+ {range: [{x: 0}, {x: 10}], shardId: st.shard0.shardName}, // pre-defined
+ {range: [{x: 10}, {x: 20}], shardId: st.shard1.shardName}, // pre-defined
+ {range: [{x: 20}, {x: 30}], shardId: null}, // any shard
{range: [{x: 30}, {x: 40}], shardId: st.shard2.shardName}, // pre-defined
- {range: [{x: 40}, {x: {"$maxKey": 1}}], shardId: st.shard2.shardName}
+ {range: [{x: 40}, {x: {"$maxKey": 1}}], shardId: null} // any shard
];
var chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).sort({min: 1}).toArray();
assert.eq(chunkDocs.length,
@@ -115,8 +118,13 @@ function testChunkSplits(collectionExists) {
tojson(chunkDocs[i]);
assert.eq(expectedChunks[i].range[0], chunkDocs[i].min, errMsg);
assert.eq(expectedChunks[i].range[1], chunkDocs[i].max, errMsg);
- assert.eq(expectedChunks[i].shardId, chunkDocs[i].shard, errMsg);
+ if (expectedChunks[i].shardId !== null) {
+ assert.eq(expectedChunks[i].shardId, chunkDocs[i].shard, errMsg);
+ }
}
+ assert.eq(2, findChunksUtil.countChunksForNs(configDB, ns, {shard: st.shard0.shardName}));
+ assert.eq(2, findChunksUtil.countChunksForNs(configDB, ns, {shard: st.shard1.shardName}));
+ assert.eq(2, findChunksUtil.countChunksForNs(configDB, ns, {shard: st.shard2.shardName}));
assert.commandWorked(testDB.runCommand({drop: kCollName}));
}
diff --git a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
index 51971b55cef..846c91c3be9 100644
--- a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
+++ b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
@@ -235,6 +235,44 @@ const hashedNs = dbName + '.' + hashedCollName;
assert.commandWorked(st.s.adminCommand({shardCollection: hashedNs, key: {_id: 'hashed'}}));
+// Setup a predictable chunk distribution:
+assert.commandWorked(st.s.adminCommand({
+ moveChunk: hashedNs,
+ bounds: [{_id: MinKey}, {_id: NumberLong("-6148914691236517204")}],
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(st.s.adminCommand({
+ moveChunk: hashedNs,
+ bounds: [{_id: NumberLong("-6148914691236517204")}, {_id: NumberLong("-3074457345618258602")}],
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(st.s.adminCommand({
+ moveChunk: hashedNs,
+ bounds: [{_id: NumberLong("-3074457345618258602")}, {_id: NumberLong("0")}],
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(st.s.adminCommand({
+ moveChunk: hashedNs,
+ bounds: [{_id: NumberLong("0")}, {_id: NumberLong("3074457345618258602")}],
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(st.s.adminCommand({
+ moveChunk: hashedNs,
+ bounds: [{_id: NumberLong("3074457345618258602")}, {_id: NumberLong("6148914691236517204")}],
+ to: st.shard2.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(st.s.adminCommand({
+ moveChunk: hashedNs,
+ bounds: [{_id: NumberLong("6148914691236517204")}, {_id: MaxKey}],
+ to: st.shard2.shardName,
+ _waitForDelete: true
+}));
+
assert.commandWorked(
st.s.getDB(dbName)[hashedCollName].insert({_id: -3}, {writeConcern: {w: "majority"}}));
assert.commandWorked(
diff --git a/jstests/sharding/update_replace_id.js b/jstests/sharding/update_replace_id.js
index 0320cadc40e..d38bb73474d 100644
--- a/jstests/sharding/update_replace_id.js
+++ b/jstests/sharding/update_replace_id.js
@@ -60,6 +60,22 @@ function setUpData() {
}
function runReplacementUpdateTestsForHashedShardKey() {
+ // Make sure the chunk containing key {_id: -100} is on shard0.
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: -100},
+ to: st.shard0.shardName,
+ _waitForDelete: true
+ }));
+
+ // Make sure the chunk containing key {_id: -101} is on shard1.
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: 101},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+ }));
+
setUpData();
// Perform a replacement update whose query is an exact match on _id and whose replacement
@@ -195,4 +211,4 @@ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: "ha
runReplacementUpdateTestsForHashedShardKey();
st.stop();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/sharding/zone_changes_hashed.js b/jstests/sharding/zone_changes_hashed.js
index b61d7933a06..6a5a6ca6914 100644
--- a/jstests/sharding/zone_changes_hashed.js
+++ b/jstests/sharding/zone_changes_hashed.js
@@ -224,7 +224,9 @@ assertChunksOnShards(configDB, ns, shardChunkBounds);
assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey);
jsTest.log("Make the chunk not aligned with zone ranges.");
-let splitPoint = {x: NumberLong(targetChunkBounds[1].x - 5000)};
+let splitPoint = (targetChunkBounds[1].x === MaxKey)
+ ? {x: NumberLong(targetChunkBounds[0].x + 5000)}
+ : {x: NumberLong(targetChunkBounds[1].x - 5000)};
assert(chunkBoundsUtil.containsKey(splitPoint, ...targetChunkBounds));
assert.commandWorked(st.s.adminCommand(
{updateZoneKeyRange: ns, min: targetChunkBounds[0], max: targetChunkBounds[1], zone: null}));
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 19fd5da8567..08db69f4781 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -56,11 +56,10 @@ using ChunkDistributionMap = stdx::unordered_map<ShardId, size_t>;
using ZoneShardMap = StringMap<std::vector<ShardId>>;
using boost::intrusive_ptr;
-std::vector<ShardId> getAllShardIdsSorted(OperationContext* opCtx) {
- // Many tests assume that chunks will be placed on shards
- // according to their IDs in ascending lexical order.
+std::vector<ShardId> getAllShardIdsShuffled(OperationContext* opCtx) {
auto shardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
- std::sort(shardIds.begin(), shardIds.end());
+ std::default_random_engine rng{};
+ std::shuffle(shardIds.begin(), shardIds.end(), rng);
return shardIds;
}
@@ -241,12 +240,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::generateShardColle
const BSONObj min = (i == 0) ? keyPattern.globalMin() : finalSplitPoints[i - 1];
const BSONObj max =
(i < finalSplitPoints.size()) ? finalSplitPoints[i] : keyPattern.globalMax();
-
- // It's possible there are no split points or fewer split points than total number of
- // shards, and we need to be sure that at least one chunk is placed on the primary shard
- const ShardId shardId = (i == 0 && finalSplitPoints.size() + 1 < allShardIds.size())
- ? params.primaryShardId
- : allShardIds[(i / numContiguousChunksPerShard) % allShardIds.size()];
+ const ShardId shardId = allShardIds[(i / numContiguousChunksPerShard) % allShardIds.size()];
appendChunk(params, min, max, &version, shardId, &chunks);
}
@@ -363,7 +357,7 @@ InitialSplitPolicy::ShardCollectionConfig SplitPointsBasedSplitPolicy::createFir
const SplitPolicyParams& params) {
// On which shards are the generated chunks allowed to be placed.
- const auto shardIds = getAllShardIdsSorted(opCtx);
+ const auto shardIds = getAllShardIdsShuffled(opCtx);
const auto currentTime = VectorClock::get(opCtx)->getTime();
const auto validAfter = currentTime.clusterTime().asTimestamp();
@@ -394,7 +388,7 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
const SplitPolicyParams& params) {
invariant(!_tags.empty());
- const auto shardIds = getAllShardIdsSorted(opCtx);
+ const auto shardIds = getAllShardIdsShuffled(opCtx);
const auto currentTime = VectorClock::get(opCtx)->getTime();
const auto validAfter = currentTime.clusterTime().asTimestamp();
const auto& keyPattern = shardKeyPattern.getKeyPattern();
@@ -730,7 +724,7 @@ InitialSplitPolicy::ShardCollectionConfig SamplingBasedSplitPolicy::createFirstC
}
{
- auto allShardIds = getAllShardIdsSorted(opCtx);
+ auto allShardIds = getAllShardIdsShuffled(opCtx);
for (const auto& shard : allShardIds) {
chunkDistribution.emplace(shard, 0);
}
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index 4db00e91978..923a59e01ea 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -70,6 +70,26 @@ void assertChunkVectorsAreEqual(const std::vector<ChunkType>& expected,
}
}
+void assertChunkRangesMatch(const std::vector<ChunkRange> expectedRanges,
+ const std::vector<ChunkType>& actualChunks) {
+ const auto actualRanges = [&]() {
+ std::vector<ChunkRange> actualRanges;
+ std::transform(actualChunks.begin(),
+ actualChunks.end(),
+ std::back_inserter(actualRanges),
+ [](auto& chunk) { return chunk.getRange(); });
+ return actualRanges;
+ }();
+
+ ASSERT_EQ(actualRanges, expectedRanges);
+}
+
+int getNumberOfChunksOnShard(const std::vector<ChunkType>& chunks, const ShardId& shardId) {
+ return std::count_if(chunks.begin(), chunks.end(), [&shardId](const ChunkType& chunk) {
+ return chunk.getShard() == shardId;
+ });
+}
+
/**
* Calls calculateHashedSplitPoints according to the given arguments
* and asserts that calculated split points match with the expected split points.
@@ -352,6 +372,18 @@ public:
assertChunkVectorsAreEqual(expectedChunks, shardCollectionConfig.chunks);
}
+ std::vector<ChunkType> generateInitialZoneChunks(const std::vector<ShardType> shards,
+ const std::vector<TagsType>& tags,
+ const ShardKeyPattern& shardKeyPattern,
+ const ShardId& primaryShard) {
+ auto opCtx = operationContext();
+ setupShards(shards);
+ shardRegistry()->reload(opCtx);
+ SingleChunkPerTagSplitPolicy splitPolicy(opCtx, tags);
+ return splitPolicy.createFirstChunks(opCtx, shardKeyPattern, {UUID::gen(), primaryShard})
+ .chunks;
+ }
+
std::string shardKey() {
return _shardKey;
}
@@ -401,13 +433,19 @@ TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesSpanDoNotSpanFromMinToMa
ChunkRange(BSON(shardKey() << 10), keyPattern().globalMax()),
};
const std::vector<TagsType> tags = {makeTag(expectedChunkRanges[1], zoneName("0"))};
- const std::vector<ShardId> expectedShardIds = {shardId("0"), shardId("0"), shardId("1")};
- checkGeneratedInitialZoneChunks(kShards,
- tags,
- expectedChunkRanges,
- expectedShardIds,
- ShardKeyPattern(BSON("x"
- << "hashed")));
+
+ const auto generatedChunks = generateInitialZoneChunks(kShards,
+ tags,
+ ShardKeyPattern(BSON("x"
+ << "hashed")),
+ shardId("0"));
+
+ assertChunkRangesMatch(expectedChunkRanges, generatedChunks);
+ ASSERT_EQ(shardId("0"), generatedChunks[1].getShard()); // corresponds to a zone
+
+ // Shard0 owns the chunk corresponding to the zone plus another one. Shard1 owns just one.
+ ASSERT_EQ(2, getNumberOfChunksOnShard(generatedChunks, shardId("0")));
+ ASSERT_EQ(1, getNumberOfChunksOnShard(generatedChunks, shardId("1")));
}
TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesContainGlobalMin) {
@@ -419,9 +457,17 @@ TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesContainGlobalMin) {
ChunkRange(BSON(shardKey() << 0), keyPattern().globalMax()),
};
const std::vector<TagsType> tags = {makeTag(expectedChunkRanges[0], zoneName("0"))};
- const std::vector<ShardId> expectedShardIds = {shardId("0"), shardId("0")};
- checkGeneratedInitialZoneChunks(
- kShards, tags, expectedChunkRanges, expectedShardIds, ShardKeyPattern(BSON("x" << 1)));
+ const auto generatedChunks =
+ generateInitialZoneChunks(kShards, tags, ShardKeyPattern(BSON("x" << 1)), shardId("0"));
+
+ assertChunkRangesMatch(expectedChunkRanges, generatedChunks);
+ ASSERT_EQ(shardId("0"), generatedChunks[0].getShard()); // corresponds to a zone
+
+ // Shard0 owns the chunk corresponding to the zone. The other chunk (gap chunk) is randomly
+ // assigned to either shard.
+ const auto numChunkOnShard0 = getNumberOfChunksOnShard(generatedChunks, shardId("0"));
+ const auto numChunkOnShard1 = getNumberOfChunksOnShard(generatedChunks, shardId("1"));
+ ASSERT_TRUE((numChunkOnShard0 == 2 || numChunkOnShard1 == 1));
}
TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesContainGlobalMax) {
@@ -433,9 +479,17 @@ TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesContainGlobalMax) {
ChunkRange(BSON(shardKey() << 0), keyPattern().globalMax()), // corresponds to a zone
};
const std::vector<TagsType> tags = {makeTag(expectedChunkRanges[1], zoneName("0"))};
- const std::vector<ShardId> expectedShardIds = {shardId("0"), shardId("0")};
- checkGeneratedInitialZoneChunks(
- kShards, tags, expectedChunkRanges, expectedShardIds, ShardKeyPattern(BSON("x" << 1)));
+ const auto generatedChunks =
+ generateInitialZoneChunks(kShards, tags, ShardKeyPattern(BSON("x" << 1)), shardId("0"));
+
+ assertChunkRangesMatch(expectedChunkRanges, generatedChunks);
+ ASSERT_EQ(shardId("0"), generatedChunks[1].getShard()); // corresponds to a zone
+
+ // Shard0 owns the chunk corresponding to the zone. The other chunk (gap chunk) is randomly
+ // assigned to either shard.
+ const auto numChunkOnShard0 = getNumberOfChunksOnShard(generatedChunks, shardId("0"));
+ const auto numChunkOnShard1 = getNumberOfChunksOnShard(generatedChunks, shardId("1"));
+ ASSERT_TRUE((numChunkOnShard0 == 2 || numChunkOnShard1 == 1));
}
TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesContainGlobalMinAndMax) {
@@ -449,9 +503,12 @@ TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesContainGlobalMinAndMax)
};
const std::vector<TagsType> tags = {makeTag(expectedChunkRanges[0], zoneName("0")),
makeTag(expectedChunkRanges[2], zoneName("1"))};
- const std::vector<ShardId> expectedShardIds = {shardId("0"), shardId("0"), shardId("1")};
- checkGeneratedInitialZoneChunks(
- kShards, tags, expectedChunkRanges, expectedShardIds, ShardKeyPattern(BSON("x" << 1)));
+ const auto generatedChunks =
+ generateInitialZoneChunks(kShards, tags, ShardKeyPattern(BSON("x" << 1)), shardId("0"));
+
+ assertChunkRangesMatch(expectedChunkRanges, generatedChunks);
+ ASSERT_EQ(shardId("0"), generatedChunks[0].getShard()); // corresponds to a zone
+ ASSERT_EQ(shardId("1"), generatedChunks[2].getShard()); // corresponds to a zone
}
TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesContiguous) {
@@ -466,10 +523,16 @@ TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesContiguous) {
};
const std::vector<TagsType> tags = {makeTag(expectedChunkRanges[1], zoneName("1")),
makeTag(expectedChunkRanges[2], zoneName("0"))};
- const std::vector<ShardId> expectedShardIds = {
- shardId("0"), shardId("1"), shardId("0"), shardId("1")};
- checkGeneratedInitialZoneChunks(
- kShards, tags, expectedChunkRanges, expectedShardIds, ShardKeyPattern(BSON("x" << 1)));
+ const auto generatedChunks =
+ generateInitialZoneChunks(kShards, tags, ShardKeyPattern(BSON("x" << 1)), shardId("0"));
+
+ assertChunkRangesMatch(expectedChunkRanges, generatedChunks);
+ ASSERT_EQ(shardId("1"), generatedChunks[1].getShard()); // corresponds to a zone
+ ASSERT_EQ(shardId("0"), generatedChunks[2].getShard()); // corresponds to a zone
+
+ // The other two chunks are evenly spread over the two shards.
+ ASSERT_EQ(2, getNumberOfChunksOnShard(generatedChunks, shardId("0")));
+ ASSERT_EQ(2, getNumberOfChunksOnShard(generatedChunks, shardId("1")));
}
TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesNotContiguous) {
@@ -486,10 +549,17 @@ TEST_F(SingleChunkPerTagSplitPolicyTest, PredefinedZonesNotContiguous) {
};
const std::vector<TagsType> tags = {makeTag(expectedChunkRanges[1], zoneName("0")),
makeTag(expectedChunkRanges[3], zoneName("1"))};
- const std::vector<ShardId> expectedShardIds = {
- shardId("0"), shardId("0"), shardId("1"), shardId("1"), shardId("2")};
- checkGeneratedInitialZoneChunks(
- kShards, tags, expectedChunkRanges, expectedShardIds, ShardKeyPattern(BSON("x" << 1)));
+ const auto generatedChunks =
+ generateInitialZoneChunks(kShards, tags, ShardKeyPattern(BSON("x" << 1)), shardId("0"));
+
+ assertChunkRangesMatch(expectedChunkRanges, generatedChunks);
+ ASSERT_EQ(shardId("0"), generatedChunks[1].getShard()); // corresponds to a zone
+ ASSERT_EQ(shardId("1"), generatedChunks[3].getShard()); // corresponds to a zone
+
+ // The three gap chunks get spread evenly over the three shards
+ ASSERT_EQ(2, getNumberOfChunksOnShard(generatedChunks, shardId("0")));
+ ASSERT_EQ(2, getNumberOfChunksOnShard(generatedChunks, shardId("1")));
+ ASSERT_EQ(1, getNumberOfChunksOnShard(generatedChunks, shardId("2")));
}
TEST_F(SingleChunkPerTagSplitPolicyTest, NumRemainingChunksGreaterThanNumShards) {
@@ -500,16 +570,22 @@ TEST_F(SingleChunkPerTagSplitPolicyTest, NumRemainingChunksGreaterThanNumShards)
ChunkRange(keyPattern().globalMin(), BSON(shardKey() << 0)),
ChunkRange(BSON(shardKey() << 0), BSON(shardKey() << 10)), // corresponds to a zone
ChunkRange(BSON(shardKey() << 10), BSON(shardKey() << 20)),
- ChunkRange(BSON(shardKey() << 20), BSON(shardKey() << 30)),
+ ChunkRange(BSON(shardKey() << 20), BSON(shardKey() << 30)), // corresponds to a zone
ChunkRange(BSON(shardKey() << 30), keyPattern().globalMax()),
};
const std::vector<TagsType> tags = {makeTag(expectedChunkRanges[1], zoneName("0")),
makeTag(expectedChunkRanges[3], zoneName("1"))};
- // shard assignment should wrap around to the first shard
- const std::vector<ShardId> expectedShardIds = {
- shardId("0"), shardId("0"), shardId("1"), shardId("1"), shardId("0")};
- checkGeneratedInitialZoneChunks(
- kShards, tags, expectedChunkRanges, expectedShardIds, ShardKeyPattern(BSON("x" << 1)));
+ const auto generatedChunks =
+ generateInitialZoneChunks(kShards, tags, ShardKeyPattern(BSON("x" << 1)), shardId("0"));
+
+ assertChunkRangesMatch(expectedChunkRanges, generatedChunks);
+ ASSERT_EQ(shardId("0"), generatedChunks[1].getShard()); // corresponds to a zone
+ ASSERT_EQ(shardId("1"), generatedChunks[3].getShard()); // corresponds to a zone
+
+ // The three gap chunks get spread over the two shards. One shard will get two of them, and the
+ // other just one.
+ ASSERT_GTE(getNumberOfChunksOnShard(generatedChunks, shardId("0")), 2);
+ ASSERT_GTE(getNumberOfChunksOnShard(generatedChunks, shardId("1")), 2);
}
TEST_F(SingleChunkPerTagSplitPolicyTest, MultipleChunksToOneZoneWithMultipleShards) {
@@ -528,10 +604,13 @@ TEST_F(SingleChunkPerTagSplitPolicyTest, MultipleChunksToOneZoneWithMultipleShar
makeTag(expectedChunkRanges.at(2), zone0),
makeTag(expectedChunkRanges.at(3), zone0),
};
- const std::vector<ShardId> expectedShardIds = {
- shardId("0"), shardId("0"), shardId("1"), shardId("0")};
- checkGeneratedInitialZoneChunks(
- kShards, tags, expectedChunkRanges, expectedShardIds, ShardKeyPattern(BSON("x" << 1)));
+ const auto generatedChunks =
+ generateInitialZoneChunks(kShards, tags, ShardKeyPattern(BSON("x" << 1)), shardId("0"));
+
+ assertChunkRangesMatch(expectedChunkRanges, generatedChunks);
+ // Zoned chunks are assigned round-robin.
+ ASSERT_EQ(generatedChunks[0].getShard(), generatedChunks[3].getShard());
+ ASSERT_NE(generatedChunks[0].getShard(), generatedChunks[2].getShard());
}
TEST_F(SingleChunkPerTagSplitPolicyTest, MultipleChunksToInterleavedZonesWithMultipleShards) {
@@ -552,10 +631,11 @@ TEST_F(SingleChunkPerTagSplitPolicyTest, MultipleChunksToInterleavedZonesWithMul
makeTag(expectedChunkRanges.at(3), zone0),
};
- const std::vector<ShardId> expectedShardIds = {
- shardId("0"), shardId("0"), shardId("0"), shardId("1")};
- checkGeneratedInitialZoneChunks(
- kShards, tags, expectedChunkRanges, expectedShardIds, ShardKeyPattern(BSON("x" << 1)));
+ const auto generatedChunks =
+ generateInitialZoneChunks(kShards, tags, ShardKeyPattern(BSON("x" << 1)), shardId("0"));
+
+ // For each tag, chunks are assigned round-robin.
+ ASSERT_NE(generatedChunks[0].getShard(), generatedChunks[3].getShard());
}
TEST_F(SingleChunkPerTagSplitPolicyTest, ZoneNotAssociatedWithAnyShardShouldFail) {
@@ -586,22 +666,41 @@ public:
* Calls PresplitHashedZonesSplitPolicy::createFirstChunks() according to the given arguments
* and asserts that returned chunks match with the chunks created using expectedChunkRanges and
* expectedShardIds.
+ * A 'boost::none' value on expectedShardIds means that the corresponding chunk can be on any
+ * shard (because it is a gap/boundary chunk).
*/
- void checkGeneratedInitialZoneChunks(const std::vector<TagsType>& tags,
- const std::vector<ChunkRange>& expectedChunkRanges,
- const std::vector<ShardId>& expectedShardIds,
- const ShardKeyPattern& shardKeyPattern,
- int numInitialChunk,
- bool isCollEmpty = true) {
+ void checkGeneratedInitialZoneChunks(
+ const std::vector<TagsType>& tags,
+ const std::vector<ChunkRange>& expectedChunkRanges,
+ const std::vector<boost::optional<ShardId>>& expectedShardIds,
+ const ShardKeyPattern& shardKeyPattern,
+ int numInitialChunk,
+ bool isCollEmpty = true) {
+ ShardId primaryShard("doesntMatter");
+
PresplitHashedZonesSplitPolicy splitPolicy(
operationContext(), shardKeyPattern, tags, numInitialChunk, isCollEmpty);
const auto shardCollectionConfig = splitPolicy.createFirstChunks(
- operationContext(), shardKeyPattern, {UUID::gen(), expectedShardIds.front()});
-
- const auto currentTime = VectorClock::get(operationContext())->getTime();
- const std::vector<ChunkType> expectedChunks = makeChunks(
- expectedChunkRanges, expectedShardIds, currentTime.clusterTime().asTimestamp());
- assertChunkVectorsAreEqual(expectedChunks, shardCollectionConfig.chunks);
+ operationContext(), shardKeyPattern, {UUID::gen(), primaryShard});
+
+ ASSERT_EQ(expectedShardIds.size(), expectedChunkRanges.size());
+ ASSERT_EQ(expectedChunkRanges.size(), shardCollectionConfig.chunks.size());
+
+ ShardId lastHoleShardId("dummy");
+ for (size_t i = 0; i < shardCollectionConfig.chunks.size(); ++i) {
+ // Check the chunk range matches the expected range.
+ ASSERT_EQ(expectedChunkRanges[i], shardCollectionConfig.chunks[i].getRange());
+
+ // Check that the shardId matches the expected.
+ if (expectedShardIds[i]) {
+ ASSERT_EQ(expectedShardIds[i], shardCollectionConfig.chunks[i].getShard());
+ } else {
+ // Boundary/hole chunks are assigned to any shard in a round-robin fashion.
+ // Note this assert is only valid if there's more than one shard.
+ ASSERT_NE(lastHoleShardId, shardCollectionConfig.chunks[i].getShard());
+ lastHoleShardId = shardCollectionConfig.chunks[i].getShard();
+ }
+ }
}
};
@@ -727,7 +826,7 @@ TEST_F(PresplitHashedZonesChunksTest, WithHashedPrefix) {
// numInitialChunks = 0.
std::vector<ChunkRange> expectedChunkRanges =
buildExpectedChunkRanges(tags, shardKeyPattern, {2 * 3});
- std::vector<ShardId> expectedShardIds = {
+ std::vector<boost::optional<ShardId>> expectedShardIds = {
shardId("0"), shardId("0"), shardId("1"), shardId("1"), shardId("2"), shardId("2")};
checkGeneratedInitialZoneChunks(
tags, expectedChunkRanges, expectedShardIds, shardKeyPattern, 0 /* numInitialChunks*/);
@@ -766,20 +865,32 @@ TEST_F(PresplitHashedZonesChunksTest, SingleZone) {
// numInitialChunks = 0.
std::vector<ChunkRange> expectedChunkRanges =
buildExpectedChunkRanges(tags, shardKeyPattern, {2});
- std::vector<ShardId> expectedShardIds = {
- shardId("0"), shardId("0"), shardId("0"), shardId("1")};
+ std::vector<boost::optional<ShardId>> expectedShardIds = {boost::none, // Lower bound.
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ boost::none}; // Upper bound.
checkGeneratedInitialZoneChunks(
tags, expectedChunkRanges, expectedShardIds, shardKeyPattern, 0 /* numInitialChunks*/);
// numInitialChunks = 1.
expectedChunkRanges = buildExpectedChunkRanges(tags, shardKeyPattern, {1});
- expectedShardIds = {shardId("0"), shardId("0"), shardId("1")};
+ expectedShardIds = {
+ boost::none, // Lower bound.
+ shardId("0"), // Zone 0
+ boost::none // Upper bound.
+ };
checkGeneratedInitialZoneChunks(
tags, expectedChunkRanges, expectedShardIds, shardKeyPattern, 1 /* numInitialChunks*/);
// numInitialChunks = 3.
expectedChunkRanges = buildExpectedChunkRanges(tags, shardKeyPattern, {3});
- expectedShardIds = {shardId("0"), shardId("0"), shardId("0"), shardId("0"), shardId("1")};
+ expectedShardIds = {
+ boost::none, // Lower bound.
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ boost::none // Upper bound.
+ };
checkGeneratedInitialZoneChunks(
tags, expectedChunkRanges, expectedShardIds, shardKeyPattern, 3 /* numInitialChunks*/);
}
@@ -834,15 +945,15 @@ TEST_F(PresplitHashedZonesChunksTest, WithMultipleZonesContiguous) {
std::vector<ChunkRange> expectedChunkRanges =
buildExpectedChunkRanges(tags, shardKeyPattern, {2, 2, 2});
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
- shardId("0"),
- shardId("0"),
- shardId("1"),
- shardId("1"),
- shardId("2"),
- shardId("2"),
- shardId("1") // Upper bound.
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, // Lower bound.
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ shardId("1"), // Zone 1
+ shardId("1"), // Zone 1
+ shardId("2"), // Zone 2
+ shardId("2"), // Zone 2
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -855,11 +966,11 @@ TEST_F(PresplitHashedZonesChunksTest, WithMultipleZonesContiguous) {
expectedChunkRanges = buildExpectedChunkRanges(tags, shardKeyPattern, {1, 1, 1});
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
- shardId("0"),
- shardId("1"),
- shardId("2"),
- shardId("1") // Upper bound.
+ boost::none, // Lower bound.
+ shardId("0"), // Zone 0
+ shardId("1"), // Zone 1
+ shardId("2"), // Zone 2
+ boost::none, // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -872,20 +983,20 @@ TEST_F(PresplitHashedZonesChunksTest, WithMultipleZonesContiguous) {
expectedChunkRanges = buildExpectedChunkRanges(tags, shardKeyPattern, {4, 4, 4});
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
- shardId("0"),
- shardId("0"),
- shardId("0"),
- shardId("0"),
- shardId("1"),
- shardId("1"),
- shardId("1"),
- shardId("1"),
- shardId("2"),
- shardId("2"),
- shardId("2"),
- shardId("2"),
- shardId("1") // Upper bound.
+ boost::none, // Lower bound.
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ shardId("1"), // Zone 1
+ shardId("1"), // Zone 1
+ shardId("1"), // Zone 1
+ shardId("1"), // Zone 1
+ shardId("2"), // Zone 2
+ shardId("2"), // Zone 2
+ shardId("2"), // Zone 2
+ shardId("2"), // Zone 2
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -937,8 +1048,8 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleContiguousZonesWithEachZoneHavingM
std::vector<ChunkRange> expectedChunkRanges =
buildExpectedChunkRanges(tags, shardKeyPattern, {6, 4});
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, // Lower bound.
shardId("0"), // zone 0.
shardId("0"), // zone 0.
shardId("3"), // zone 0.
@@ -949,7 +1060,7 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleContiguousZonesWithEachZoneHavingM
shardId("2"), // zone 1.
shardId("4"), // zone 1.
shardId("4"), // zone 1.
- shardId("1") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -962,13 +1073,13 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleContiguousZonesWithEachZoneHavingM
expectedChunkRanges = buildExpectedChunkRanges(tags, shardKeyPattern, {3, 2});
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ boost::none, // Lower bound.
shardId("0"), // zone0.
shardId("3"), // zone0.
shardId("5"), // zone0.
shardId("2"), // zone1.
shardId("4"), // zone1.
- shardId("1") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -981,13 +1092,13 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleContiguousZonesWithEachZoneHavingM
expectedChunkRanges = buildExpectedChunkRanges(tags, shardKeyPattern, {3, 2});
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ boost::none, // Lower bound.
shardId("0"), // zone0.
shardId("3"), // zone0.
shardId("5"), // zone0.
shardId("2"), // zone1.
shardId("4"), // zone1.
- shardId("1") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1045,17 +1156,17 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleZonesWithGaps) {
std::vector<ChunkRange> expectedChunkRanges =
buildExpectedChunkRanges(tags, shardKeyPattern, {2, 2, 2});
// The holes should use round-robin to choose a shard.
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), // LowerBound.
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, // LowerBound.
shardId("0"),
shardId("0"),
- shardId("1"), // Hole.
+ boost::none, // Hole.
shardId("1"),
shardId("1"),
- shardId("2"), // Hole.
+ boost::none, // Hole.
shardId("2"),
shardId("2"),
- shardId("noZone"), // UpperBound.
+ boost::none, // UpperBound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1068,13 +1179,13 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleZonesWithGaps) {
expectedChunkRanges = buildExpectedChunkRanges(tags, shardKeyPattern, {1, 1, 1});
// The holes should use round-robin to choose a shard.
expectedShardForEachChunk = {
- shardId("0"), // LowerBound.
- shardId("0"),
- shardId("1"), // Hole.
- shardId("1"),
- shardId("2"), // Hole.
- shardId("2"),
- shardId("noZone"), // UpperBound.
+ boost::none, // LowerBound.
+ shardId("0"), // Zone 0
+ boost::none, // Hole.
+ shardId("1"), // Zone 1
+ boost::none, // Hole.
+ shardId("2"), // Zone 2
+ boost::none, // UpperBound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1087,22 +1198,22 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleZonesWithGaps) {
expectedChunkRanges = buildExpectedChunkRanges(tags, shardKeyPattern, {4, 4, 4});
// The holes should use round-robin to choose a shard.
expectedShardForEachChunk = {
- shardId("0"), // LowerBound.
- shardId("0"),
- shardId("0"),
- shardId("0"),
- shardId("0"),
- shardId("1"), // Hole.
- shardId("1"),
- shardId("1"),
- shardId("1"),
- shardId("1"),
- shardId("2"), // Hole.
- shardId("2"),
- shardId("2"),
- shardId("2"),
- shardId("2"),
- shardId("noZone"), // UpperBound.
+ boost::none, // LowerBound.
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ shardId("0"), // Zone 0
+ boost::none, // Hole.
+ shardId("1"), // Zone 1
+ shardId("1"), // Zone 1
+ shardId("1"), // Zone 1
+ shardId("1"), // Zone 1
+ boost::none, // Hole.
+ shardId("2"), // Zone 2
+ shardId("2"), // Zone 2
+ shardId("2"), // Zone 2
+ shardId("2"), // Zone 2
+ boost::none, // UpperBound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1176,14 +1287,14 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleContiguousZonesWithEachShardHaving
std::vector<ChunkRange> expectedChunkRanges =
buildExpectedChunkRanges(tags, shardKeyPattern, {1, 1, 2, 1} /* numChunksPerTag*/);
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, // Lower bound.
shardId("0"), // zone0.
shardId("1"), // zone1.
shardId("0"), // zone2.
shardId("1"), // zone2.
shardId("1"), // zone3.
- shardId("1") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1197,13 +1308,13 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleContiguousZonesWithEachShardHaving
buildExpectedChunkRanges(tags, shardKeyPattern, {1, 1, 2, 1} /* numChunksPerTag*/);
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ boost::none, // Lower bound.
shardId("0"), // zone0.
shardId("1"), // zone1.
shardId("0"), // zone2.
shardId("1"), // zone2.
shardId("1"), // zone3.
- shardId("1") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1217,7 +1328,7 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleContiguousZonesWithEachShardHaving
buildExpectedChunkRanges(tags, shardKeyPattern, {2, 2, 4, 2} /* numChunksPerTag*/);
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ boost::none, // Lower bound.
shardId("0"), // zone0.
shardId("0"), // zone0.
shardId("1"), // zone1.
@@ -1228,7 +1339,7 @@ TEST_F(PresplitHashedZonesChunksTest, MultipleContiguousZonesWithEachShardHaving
shardId("1"), // zone2.
shardId("1"), // zone3.
shardId("1"), // zone3.
- shardId("1") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1314,14 +1425,14 @@ TEST_F(PresplitHashedZonesChunksTest, OneLargeZoneAndOtherSmallZonesSharingASing
std::vector<ChunkRange> expectedChunkRanges =
buildExpectedChunkRanges(tags, shardKeyPattern, {1, 1, 1, 2 * 5, 1} /* numChunksPerTag*/);
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, // Lower bound.
shardId("0"), // zone0.
- shardId("1"), // hole.
+ boost::none, // hole.
shardId("0"), // zone1.
- shardId("2"), // hole.
+ boost::none, // hole.
shardId("0"), // zone2.
- shardId("3"), // hole.
+ boost::none, // hole.
shardId("2"), // zone3.
shardId("2"), // zone3.
shardId("3"), // zone3.
@@ -1332,9 +1443,9 @@ TEST_F(PresplitHashedZonesChunksTest, OneLargeZoneAndOtherSmallZonesSharingASing
shardId("5"), // zone3.
shardId("6"), // zone3.
shardId("6"), // zone3.
- shardId("4"), // hole.
+ boost::none, // hole.
shardId("0"), // zone4.
- shardId("5") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1348,21 +1459,21 @@ TEST_F(PresplitHashedZonesChunksTest, OneLargeZoneAndOtherSmallZonesSharingASing
buildExpectedChunkRanges(tags, shardKeyPattern, {1, 1, 1, 5, 1} /* numChunksPerTag*/);
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ boost::none, // Lower bound.
shardId("0"), // zone0.
- shardId("1"), // hole.
+ boost::none, // hole.
shardId("0"), // zone1.
- shardId("2"), // hole.
+ boost::none, // hole.
shardId("0"), // zone2.
- shardId("3"), // hole.
+ boost::none, // hole.
shardId("2"), // zone3.
shardId("3"), // zone3.
shardId("4"), // zone3.
shardId("5"), // zone3.
shardId("6"), // zone3.
- shardId("4"), // hole.
+ boost::none, // hole.
shardId("0"), // zone4.
- shardId("5") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1376,13 +1487,13 @@ TEST_F(PresplitHashedZonesChunksTest, OneLargeZoneAndOtherSmallZonesSharingASing
buildExpectedChunkRanges(tags, shardKeyPattern, {1, 1, 1, 10, 1} /* numChunksPerTag*/);
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ boost::none, // Lower bound.
shardId("0"), // zone0.
- shardId("1"), // hole.
+ boost::none, // hole.
shardId("0"), // zone1.
- shardId("2"), // hole.
+ boost::none, // hole.
shardId("0"), // zone2.
- shardId("3"), // hole.
+ boost::none, // hole.
shardId("2"), // zone3.
shardId("2"), // zone3.
shardId("3"), // zone3.
@@ -1393,9 +1504,9 @@ TEST_F(PresplitHashedZonesChunksTest, OneLargeZoneAndOtherSmallZonesSharingASing
shardId("5"), // zone3.
shardId("6"), // zone3.
shardId("6"), // zone3.
- shardId("4"), // hole.
+ boost::none, // hole.
shardId("0"), // zone4.
- shardId("5") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1476,20 +1587,20 @@ TEST_F(PresplitHashedZonesChunksTest, InterweavingZones) {
std::vector<ChunkRange> expectedChunkRanges = buildExpectedChunkRanges(
tags, shardKeyPattern, {1, 1 * 2, 1, 1, 1 * 2} /* numChunksPerTag*/);
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, // Lower bound.
shardId("0"), // tag0.
- shardId("1"), // hole.
+ boost::none, // hole.
shardId("2"), // tag1.
shardId("3"), // tag1.
- shardId("2"), // hole.
+ boost::none, // hole.
shardId("0"), // tag2.
- shardId("3"), // hole.
+ boost::none, // hole.
shardId("0"), // tag3.
- shardId("0"), // hole.
+ boost::none, // hole.
shardId("2"), // tag4.
shardId("3"), // tag4.
- shardId("1") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1502,19 +1613,19 @@ TEST_F(PresplitHashedZonesChunksTest, InterweavingZones) {
expectedChunkRanges =
buildExpectedChunkRanges(tags, shardKeyPattern, {1, 2, 1, 1, 2} /* numChunksPerTag*/);
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ boost::none, // Lower bound.
shardId("0"), // tag0.
- shardId("1"), // hole.
+ boost::none, // hole.
shardId("2"), // tag1.
shardId("3"), // tag1.
- shardId("2"), // hole.
+ boost::none, // hole.
shardId("0"), // tag2.
- shardId("3"), // hole.
+ boost::none, // hole.
shardId("0"), // tag3.
- shardId("0"), // hole.
+ boost::none, // hole.
shardId("2"), // tag4.
shardId("3"), // tag4.
- shardId("1") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1528,23 +1639,23 @@ TEST_F(PresplitHashedZonesChunksTest, InterweavingZones) {
tags, shardKeyPattern, {1, 2 * 2, 1, 1, 2 * 2} /* numChunksPerTag*/);
expectedShardForEachChunk = {
- shardId("0"), // Lower bound.
+ boost::none, // Lower bound.
shardId("0"), // tag0.
- shardId("1"), // hole.
+ boost::none, // hole.
shardId("2"), // tag1.
shardId("2"), // tag1.
shardId("3"), // tag1.
shardId("3"), // tag1.
- shardId("2"), // hole.
+ boost::none, // hole.
shardId("0"), // tag2.
- shardId("3"), // hole.
+ boost::none, // hole.
shardId("0"), // tag3.
- shardId("0"), // hole.
+ boost::none, // hole.
shardId("2"), // tag4.
shardId("2"), // tag4.
shardId("3"), // tag4.
shardId("3"), // tag4.
- shardId("1") // Upper bound.
+ boost::none // Upper bound.
};
checkGeneratedInitialZoneChunks(tags,
expectedChunkRanges,
@@ -1750,17 +1861,51 @@ public:
* Calls createFirstChunks() according to the given arguments and asserts that returned chunks
* match with the chunks created using expectedChunkRanges and expectedShardIds.
*/
- void checkGeneratedInitialZoneChunks(SamplingBasedSplitPolicy* splitPolicy,
- const ShardKeyPattern& shardKeyPattern,
- const std::vector<ChunkRange>& expectedChunkRanges,
- const std::vector<ShardId>& expectedShardIds) {
+ void checkGeneratedInitialZoneChunks(
+ SamplingBasedSplitPolicy* splitPolicy,
+ const ShardKeyPattern& shardKeyPattern,
+ const std::vector<ShardType>& shardList,
+ const std::vector<ChunkRange>& expectedChunkRanges,
+ const std::vector<boost::optional<ShardId>>& expectedShardIds) {
+ const ShardId primaryShard("doesntMatter");
+
const auto shardCollectionConfig = splitPolicy->createFirstChunks(
- operationContext(), shardKeyPattern, {UUID::gen(), expectedShardIds.front()});
+ operationContext(), shardKeyPattern, {UUID::gen(), primaryShard});
- const auto currentTime = VectorClock::get(operationContext())->getTime();
- const std::vector<ChunkType> expectedChunks = makeChunks(
- expectedChunkRanges, expectedShardIds, currentTime.clusterTime().asTimestamp());
- assertChunkVectorsAreEqual(expectedChunks, shardCollectionConfig.chunks);
+ ASSERT_EQ(expectedShardIds.size(), expectedChunkRanges.size());
+ ASSERT_EQ(expectedChunkRanges.size(), shardCollectionConfig.chunks.size());
+
+ stdx::unordered_map<ShardId, int> shardToNumChunksMap;
+ for (const auto& shard : shardList) {
+ shardToNumChunksMap[ShardId(shard.getName())] = 0;
+ }
+
+ for (size_t i = 0; i < shardCollectionConfig.chunks.size(); ++i) {
+ // Check the chunk range matches the expected range.
+ ASSERT_EQ(expectedChunkRanges[i], shardCollectionConfig.chunks[i].getRange());
+
+ // Check that the shardId matches the expected.
+ const auto& actualShardId = shardCollectionConfig.chunks[i].getShard();
+ if (expectedShardIds[i]) {
+ ASSERT_EQ(expectedShardIds[i], actualShardId);
+ } else {
+ // If not in a zone, this chunk goes to whatever shard owns the least number of
+ // chunks.
+ int minNumChunks = shardToNumChunksMap.begin()->second;
+ std::set<ShardId> candidateShards;
+ for (const auto& it : shardToNumChunksMap) {
+ if (it.second < minNumChunks) {
+ candidateShards = {it.first};
+ minNumChunks = it.second;
+ } else if (it.second == minNumChunks) {
+ candidateShards.insert(it.first);
+ }
+ }
+ ASSERT_TRUE(candidateShards.find(actualShardId) != candidateShards.end());
+ }
+
+ shardToNumChunksMap[actualShardId]++;
+ }
}
/**
@@ -1769,10 +1914,10 @@ public:
*/
void checkGeneratedInitialSplitPoints(SamplingBasedSplitPolicy* splitPolicy,
const ShardKeyPattern& shardKeyPattern,
- const std::vector<ChunkRange>& expectedChunkRanges,
- const std::vector<ShardId>& expectedShardIds) {
+ const std::vector<ChunkRange>& expectedChunkRanges) {
+ const ShardId primaryShard("doesntMatter");
const auto splitPoints = splitPolicy->createFirstSplitPoints(
- operationContext(), shardKeyPattern, {UUID::gen(), expectedShardIds.front()});
+ operationContext(), shardKeyPattern, {UUID::gen(), primaryShard});
const auto expectedNumSplitPoints = expectedChunkRanges.size() - 1;
ASSERT_EQ(splitPoints.size(), expectedNumSplitPoints);
@@ -1810,19 +1955,20 @@ TEST_F(SamplingBasedInitSplitTest, NoZones) {
ChunkRange(BSON("y" << 20), BSON("y" << 30)),
ChunkRange(BSON("y" << 30), BSON("y" << MAXKEY))};
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), shardId("1"), shardId("0"), shardId("1")};
+ // No zones. Chunks assigned round-robin.
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, boost::none, boost::none, boost::none};
checkGeneratedInitialZoneChunks(
makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples).get(),
shardKey,
+ shardList,
expectedChunkRanges,
expectedShardForEachChunk);
checkGeneratedInitialSplitPoints(
makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples).get(),
shardKey,
- expectedChunkRanges,
- expectedShardForEachChunk);
+ expectedChunkRanges);
}
TEST_F(SamplingBasedInitSplitTest, HashedShardKey) {
@@ -1852,19 +1998,20 @@ TEST_F(SamplingBasedInitSplitTest, HashedShardKey) {
ChunkRange(BSON("y" << -1196399207910989725LL), BSON("y" << 7766103514953448109LL)),
ChunkRange(BSON("y" << 7766103514953448109LL), BSON("y" << MAXKEY))};
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), shardId("1"), shardId("0"), shardId("1")};
+ // No zones. Chunks assigned round-robin.
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, boost::none, boost::none, boost::none};
checkGeneratedInitialZoneChunks(
makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples).get(),
shardKey,
+ shardList,
expectedChunkRanges,
expectedShardForEachChunk);
checkGeneratedInitialSplitPoints(
makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples).get(),
shardKey,
- expectedChunkRanges,
- expectedShardForEachChunk);
+ expectedChunkRanges);
}
TEST_F(SamplingBasedInitSplitTest, SingleInitialChunk) {
@@ -1885,18 +2032,20 @@ TEST_F(SamplingBasedInitSplitTest, SingleInitialChunk) {
std::vector<ChunkRange> expectedChunkRanges = {
ChunkRange(BSON("y" << MINKEY), BSON("y" << MAXKEY))};
- std::vector<ShardId> expectedShardForEachChunk = {shardId("0")};
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none // Not in any zone. Can go to any shard.
+ };
checkGeneratedInitialZoneChunks(
makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, {} /* samples */).get(),
shardKey,
+ shardList,
expectedChunkRanges,
expectedShardForEachChunk);
checkGeneratedInitialSplitPoints(
makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, {} /* samples */).get(),
shardKey,
- expectedChunkRanges,
- expectedShardForEachChunk);
+ expectedChunkRanges);
}
TEST_F(SamplingBasedInitSplitTest, ZonesCoversEntireDomainButInsufficient) {
@@ -1929,19 +2078,19 @@ TEST_F(SamplingBasedInitSplitTest, ZonesCoversEntireDomainButInsufficient) {
ChunkRange(BSON("y" << 10), BSON("y" << 20)),
ChunkRange(BSON("y" << 20), BSON("y" << MAXKEY))};
- std::vector<ShardId> expectedShardForEachChunk = {
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
shardId("1"), shardId("0"), shardId("0"), shardId("0")};
checkGeneratedInitialZoneChunks(
makeInitialSplitPolicy(numInitialChunks, zones, mockSamples).get(),
shardKey,
+ shardList,
expectedChunkRanges,
expectedShardForEachChunk);
checkGeneratedInitialSplitPoints(
makeInitialSplitPolicy(numInitialChunks, zones, mockSamples).get(),
shardKey,
- expectedChunkRanges,
- expectedShardForEachChunk);
+ expectedChunkRanges);
}
TEST_F(SamplingBasedInitSplitTest, SamplesCoincidingWithZones) {
@@ -1973,23 +2122,23 @@ TEST_F(SamplingBasedInitSplitTest, SamplesCoincidingWithZones) {
ChunkRange(BSON("y" << 20), BSON("y" << 30)),
ChunkRange(BSON("y" << 30), BSON("y" << MAXKEY))};
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), // hole
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, // hole
shardId("0"), // zoneA
- shardId("1"), // hole
- shardId("1"), // hole
+ boost::none, // hole
+ boost::none, // hole
};
checkGeneratedInitialZoneChunks(
makeInitialSplitPolicy(numInitialChunks, zones, mockSamples).get(),
shardKey,
+ shardList,
expectedChunkRanges,
expectedShardForEachChunk);
checkGeneratedInitialSplitPoints(
makeInitialSplitPolicy(numInitialChunks, zones, mockSamples).get(),
shardKey,
- expectedChunkRanges,
- expectedShardForEachChunk);
+ expectedChunkRanges);
}
TEST_F(SamplingBasedInitSplitTest, ZoneWithHoles) {
@@ -2018,24 +2167,24 @@ TEST_F(SamplingBasedInitSplitTest, ZoneWithHoles) {
ChunkRange(BSON("y" << 30), BSON("y" << 40)),
ChunkRange(BSON("y" << 40), BSON("y" << MAXKEY))};
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), // hole
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, // hole
shardId("1"), // zoneB
- shardId("0"), // hole
+ boost::none, // hole
shardId("0"), // zoneA
- shardId("1"), // hole
+ boost::none, // hole
};
checkGeneratedInitialZoneChunks(
makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(),
shardKey,
+ shardList,
expectedChunkRanges,
expectedShardForEachChunk);
checkGeneratedInitialSplitPoints(
makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(),
shardKey,
- expectedChunkRanges,
- expectedShardForEachChunk);
+ expectedChunkRanges);
}
TEST_F(SamplingBasedInitSplitTest, UnsortedZoneWithHoles) {
@@ -2064,24 +2213,24 @@ TEST_F(SamplingBasedInitSplitTest, UnsortedZoneWithHoles) {
ChunkRange(BSON("y" << 30), BSON("y" << 40)),
ChunkRange(BSON("y" << 40), BSON("y" << MAXKEY))};
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("0"), // hole
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ boost::none, // hole
shardId("1"), // zoneB
- shardId("0"), // hole
+ boost::none, // hole
shardId("0"), // zoneA
- shardId("1"), // hole
+ boost::none, // hole
};
checkGeneratedInitialZoneChunks(
makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(),
shardKey,
+ shardList,
expectedChunkRanges,
expectedShardForEachChunk);
checkGeneratedInitialSplitPoints(
makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(),
shardKey,
- expectedChunkRanges,
- expectedShardForEachChunk);
+ expectedChunkRanges);
}
TEST_F(SamplingBasedInitSplitTest, ZonesIsPrefixOfShardKey) {
@@ -2109,22 +2258,22 @@ TEST_F(SamplingBasedInitSplitTest, ZonesIsPrefixOfShardKey) {
ChunkRange(BSON("y" << MAXKEY << "z" << MINKEY), BSON("y" << MAXKEY << "z" << MAXKEY)),
};
- std::vector<ShardId> expectedShardForEachChunk = {
- shardId("1"),
- shardId("0"),
- shardId("0"),
+ std::vector<boost::optional<ShardId>> expectedShardForEachChunk = {
+ shardId("1"), // ZoneB
+ shardId("0"), // ZoneA
+ boost::none, // hole
};
checkGeneratedInitialZoneChunks(
makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(),
shardKey,
+ shardList,
expectedChunkRanges,
expectedShardForEachChunk);
checkGeneratedInitialSplitPoints(
makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(),
shardKey,
- expectedChunkRanges,
- expectedShardForEachChunk);
+ expectedChunkRanges);
}
TEST_F(SamplingBasedInitSplitTest, ZonesHasIncompatibleShardKey) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
index 597fbbba219..e3f243eefac 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
@@ -258,8 +258,13 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_WithZones_ManyChunksOnFirstZoneSha
operationContext(), kShardKeyPattern, {UUID::gen(), ShardId("shard1")});
ASSERT_EQ(2U, firstChunks.chunks.size());
+ ASSERT_EQ(ChunkRange(kShardKeyPattern.getKeyPattern().globalMin(), BSON("x" << 0)),
+ firstChunks.chunks[0].getRange());
+ ASSERT_EQ(ChunkRange(BSON("x" << 0), kShardKeyPattern.getKeyPattern().globalMax()),
+ firstChunks.chunks[1].getRange());
+
ASSERT_EQ(kShards[0].getName(), firstChunks.chunks[0].getShard());
- ASSERT_EQ(kShards[0].getName(), firstChunks.chunks[1].getShard());
+ // Chunk1 (no zone) goes to any shard (selected randomly, round-robin);
}
} // namespace