summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/addshard1.js4
-rw-r--r--jstests/sharding/agg_merge_hashed.js3
-rw-r--r--jstests/sharding/auth.js7
-rw-r--r--jstests/sharding/authCommands.js5
-rw-r--r--jstests/sharding/auto_rebalance_parallel.js20
-rw-r--r--jstests/sharding/autosplit.js9
-rw-r--r--jstests/sharding/autosplit_low_cardinality.js3
-rw-r--r--jstests/sharding/balancer_window.js11
-rw-r--r--jstests/sharding/balancing_sessions_collection.js6
-rw-r--r--jstests/sharding/basic_drop_coll.js5
-rw-r--r--jstests/sharding/basic_merge.js11
-rw-r--r--jstests/sharding/basic_split.js43
-rw-r--r--jstests/sharding/catalog_cache_refresh_counters.js3
-rw-r--r--jstests/sharding/chunk_history_window.js9
-rw-r--r--jstests/sharding/chunk_operations_preserve_uuid.js6
-rw-r--r--jstests/sharding/clear_jumbo.js49
-rw-r--r--jstests/sharding/coll_epoch_test0.js6
-rw-r--r--jstests/sharding/coll_timestamp_test.js4
-rw-r--r--jstests/sharding/compound_hashed_shard_key_presplitting.js7
-rw-r--r--jstests/sharding/compound_hashed_shard_key_sharding_cmds.js46
-rw-r--r--jstests/sharding/compound_hashed_shard_key_targeting.js19
-rw-r--r--jstests/sharding/compound_hashed_shard_key_zoning.js15
-rw-r--r--jstests/sharding/count2.js4
-rw-r--r--jstests/sharding/create_sharded_collection_util_test.js3
-rw-r--r--jstests/sharding/cursor1.js6
-rw-r--r--jstests/sharding/disable_autosplit.js4
-rw-r--r--jstests/sharding/enforce_zone_policy.js6
-rw-r--r--jstests/sharding/findandmodify1.js16
-rw-r--r--jstests/sharding/findandmodify_autosplit.js4
-rw-r--r--jstests/sharding/hash_basic.js10
-rw-r--r--jstests/sharding/hash_crud.js3
-rw-r--r--jstests/sharding/hash_crud_during_migration.js5
-rw-r--r--jstests/sharding/hash_crud_txns_during_migration.js5
-rw-r--r--jstests/sharding/hash_migration_oplog_filter.js3
-rw-r--r--jstests/sharding/hash_shard1.js4
-rw-r--r--jstests/sharding/hash_shard_non_empty.js4
-rw-r--r--jstests/sharding/hash_shard_num_chunks.js4
-rw-r--r--jstests/sharding/hash_single_shard.js4
-rw-r--r--jstests/sharding/initial_split_validate_shard_collections.js3
-rw-r--r--jstests/sharding/jumbo1.js4
-rw-r--r--jstests/sharding/key_many.js6
-rw-r--r--jstests/sharding/key_string.js4
-rw-r--r--jstests/sharding/large_chunk.js9
-rw-r--r--jstests/sharding/libs/find_chunks_util.js70
-rw-r--r--jstests/sharding/libs/zone_changes_util.js4
-rw-r--r--jstests/sharding/limit_push.js5
-rw-r--r--jstests/sharding/merge_chunk_hashed.js19
-rw-r--r--jstests/sharding/merge_chunks_test.js18
-rw-r--r--jstests/sharding/migrateBig_balancer.js18
-rw-r--r--jstests/sharding/move_chunk_allowMigrations.js21
-rw-r--r--jstests/sharding/move_chunk_basic.js6
-rw-r--r--jstests/sharding/move_chunk_hashed.js11
-rw-r--r--jstests/sharding/move_chunk_respects_maxtimems.js5
-rw-r--r--jstests/sharding/move_jumbo_chunk.js15
-rw-r--r--jstests/sharding/move_primary_with_drop_collection.js17
-rw-r--r--jstests/sharding/movechunk_commit_changelog_stats.js4
-rw-r--r--jstests/sharding/movechunk_interrupt_at_primary_stepdown.js37
-rw-r--r--jstests/sharding/movechunk_parallel.js55
-rw-r--r--jstests/sharding/non_transaction_snapshot_errors.js13
-rw-r--r--jstests/sharding/prefix_shard_key.js6
-rw-r--r--jstests/sharding/presplit.js4
-rw-r--r--jstests/sharding/query/geo_near_sharded.js4
-rw-r--r--jstests/sharding/query_config.js24
-rw-r--r--jstests/sharding/refine_collection_shard_key_atomic.js21
-rw-r--r--jstests/sharding/refine_collection_shard_key_basic.js14
-rw-r--r--jstests/sharding/refine_collection_shard_key_jumbo.js48
-rw-r--r--jstests/sharding/remove2.js37
-rw-r--r--jstests/sharding/reshard_collection_basic.js3
-rw-r--r--jstests/sharding/shard1.js4
-rw-r--r--jstests/sharding/shard2.js14
-rw-r--r--jstests/sharding/shard_collection_basic.js4
-rw-r--r--jstests/sharding/shard_collection_existing_zones.js8
-rw-r--r--jstests/sharding/shard_existing.js4
-rw-r--r--jstests/sharding/shard_existing_coll_chunk_count.js3
-rw-r--r--jstests/sharding/sharding_balance1.js4
-rw-r--r--jstests/sharding/sharding_balance2.js4
-rw-r--r--jstests/sharding/sharding_balance3.js4
-rw-r--r--jstests/sharding/sharding_balance4.js3
-rw-r--r--jstests/sharding/sharding_non_transaction_snapshot_aggregate.js25
-rw-r--r--jstests/sharding/sharding_non_transaction_snapshot_read.js25
-rw-r--r--jstests/sharding/snapshot_cursor_commands_mongos.js25
-rw-r--r--jstests/sharding/snapshot_reads_target_at_point_in_time.js4
-rw-r--r--jstests/sharding/sort1.js6
-rw-r--r--jstests/sharding/split_large_key.js6
-rw-r--r--jstests/sharding/tag_auto_split.js16
-rw-r--r--jstests/sharding/tag_auto_split_partial_key.js23
-rw-r--r--jstests/sharding/tag_range.js4
-rw-r--r--jstests/sharding/top_chunk_autosplit.js5
-rw-r--r--jstests/sharding/transactions_reject_writes_for_moved_chunks.js5
-rw-r--r--jstests/sharding/transactions_snapshot_errors_first_statement.js13
-rw-r--r--jstests/sharding/transactions_snapshot_errors_subsequent_statements.js13
-rw-r--r--jstests/sharding/transactions_stale_shard_version_errors.js4
-rw-r--r--jstests/sharding/unowned_doc_filtering.js3
-rw-r--r--jstests/sharding/version1.js4
-rw-r--r--jstests/sharding/version2.js5
-rw-r--r--jstests/sharding/write_cmd_auto_split.js25
-rw-r--r--jstests/sharding/zone_changes_compound.js3
-rw-r--r--jstests/sharding/zone_changes_hashed.js5
-rw-r--r--jstests/sharding/zone_changes_range.js3
99 files changed, 786 insertions, 369 deletions
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index e7aa1920d6b..2fa38bb0cbf 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false});
// Create a shard and add a database; if the database is not duplicated the mongod should accept
@@ -67,7 +69,7 @@ s.adminCommand({enablesharding: "testDB"});
s.adminCommand({shardcollection: "testDB.foo", key: {a: 1}});
s.adminCommand({split: "testDB.foo", middle: {a: Math.floor(numObjs / 2)}});
assert.eq(2,
- s.config.chunks.count({"ns": "testDB.foo"}),
+ findChunksUtil.countChunksForNs(s.config, "testDB.foo"),
"wrong chunk number after splitting collection that existed before");
assert.eq(numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
diff --git a/jstests/sharding/agg_merge_hashed.js b/jstests/sharding/agg_merge_hashed.js
index 586dd65c9c8..9b8772c7b23 100644
--- a/jstests/sharding/agg_merge_hashed.js
+++ b/jstests/sharding/agg_merge_hashed.js
@@ -6,6 +6,7 @@
load("jstests/aggregation/extras/merge_helpers.js");
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
let st = new ShardingTest({shards: 3});
let dbName = "test";
@@ -19,7 +20,7 @@ let targetNs = targetColl.getFullName();
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
assert.commandWorked(st.s.adminCommand({shardCollection: targetNs, key: {x: 'hashed'}}));
-let chunkDocsForTargetColl = configDB.chunks.find({ns: targetNs}).toArray();
+let chunkDocsForTargetColl = findChunksUtil.findChunksByNs(configDB, targetNs).toArray();
let shardChunkBoundsForTargetColl = chunkBoundsUtil.findShardChunkBounds(chunkDocsForTargetColl);
// Use docs that are expected to go to three different shards.
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 24273b29884..48351d0a59b 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -9,6 +9,7 @@
(function() {
'use strict';
load("jstests/replsets/rslib.js");
+load("jstests/sharding/libs/find_chunks_util.js");
// Replica set nodes started with --shardsvr do not enable key generation until they are added
// to a sharded cluster and reject commands with gossiped clusterTime from users without the
@@ -184,9 +185,9 @@ assert.commandWorked(bulk.execute());
s.startBalancer(60000);
assert.soon(function() {
- var d1Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d1"});
- var d2Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d2"});
- var totalChunks = s.getDB("config").chunks.count({ns: 'test.foo'});
+ var d1Chunks = findChunksUtil.countChunksForNs(s.getDB("config"), 'test.foo', {shard: "d1"});
+ var d2Chunks = findChunksUtil.countChunksForNs(s.getDB("config"), 'test.foo', {shard: "d2"});
+ var totalChunks = findChunksUtil.countChunksForNs(s.getDB("config"), 'test.foo');
print("chunks: " + d1Chunks + " " + d2Chunks + " " + totalChunks);
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index 3c1e6ab9b27..041542face9 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -8,6 +8,7 @@
TestData.disableImplicitSessions = true;
load("jstests/replsets/rslib.js");
+load("jstests/sharding/libs/find_chunks_util.js");
// Replica set nodes started with --shardsvr do not enable key generation until they are added
// to a sharded cluster and reject commands with gossiped clusterTime from users without the
@@ -85,7 +86,7 @@ assert.commandWorked(
st.startBalancer();
// Make sure we've done at least some splitting, so the balancer will work
-assert.gt(configDB.chunks.find({ns: 'test.foo'}).count(), 2);
+assert.gt(findChunksUtil.findChunksByNs(configDB, 'test.foo').count(), 2);
// Make sure we eventually balance all the chunks we've created
assert.soon(function() {
@@ -219,7 +220,7 @@ var checkAdminOps = function(hasAuth) {
checkCommandSucceeded(adminDB, {ismaster: 1});
checkCommandSucceeded(adminDB, {hello: 1});
checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- var chunk = configDB.chunks.findOne({ns: 'test.foo', shard: st.rs0.name});
+ var chunk = findChunksUtil.findOneChunkByNs(configDB, 'test.foo', {shard: st.rs0.name});
checkCommandSucceeded(
adminDB,
{moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true});
diff --git a/jstests/sharding/auto_rebalance_parallel.js b/jstests/sharding/auto_rebalance_parallel.js
index 03e5755ec0c..1e55e8cbc11 100644
--- a/jstests/sharding/auto_rebalance_parallel.js
+++ b/jstests/sharding/auto_rebalance_parallel.js
@@ -5,6 +5,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({shards: 4});
var config = st.s0.getDB('config');
@@ -30,15 +32,21 @@ function prepareCollectionForBalance(collName) {
assert.commandWorked(st.moveChunk(collName, {Key: 20}, st.shard1.shardName));
assert.commandWorked(st.moveChunk(collName, {Key: 30}, st.shard1.shardName));
- assert.eq(2, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
- assert.eq(2, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
+ assert.eq(
+ 2, findChunksUtil.findChunksByNs(config, collName, {shard: st.shard0.shardName}).itcount());
+ assert.eq(
+ 2, findChunksUtil.findChunksByNs(config, collName, {shard: st.shard1.shardName}).itcount());
}
function checkCollectionBalanced(collName) {
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard2.shardName}).itcount());
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard3.shardName}).itcount());
+ assert.eq(
+ 1, findChunksUtil.findChunksByNs(config, collName, {shard: st.shard0.shardName}).itcount());
+ assert.eq(
+ 1, findChunksUtil.findChunksByNs(config, collName, {shard: st.shard1.shardName}).itcount());
+ assert.eq(
+ 1, findChunksUtil.findChunksByNs(config, collName, {shard: st.shard2.shardName}).itcount());
+ assert.eq(
+ 1, findChunksUtil.findChunksByNs(config, collName, {shard: st.shard3.shardName}).itcount());
}
function countMoves(collName) {
diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js
index 39e05e9daae..af67d7820cb 100644
--- a/jstests/sharding/autosplit.js
+++ b/jstests/sharding/autosplit.js
@@ -4,6 +4,7 @@
(function() {
'use strict';
load('jstests/sharding/autosplit_include.js');
+load("jstests/sharding/libs/find_chunks_util.js");
var s = new ShardingTest({
name: "auto1",
@@ -48,20 +49,20 @@ function insertDocsAndWaitForSplit(numDocs) {
insertDocsAndWaitForSplit(100);
-counts.push(s.config.chunks.count({"ns": "test.foo"}));
+counts.push(findChunksUtil.countChunksForNs(s.config, "test.foo"));
assert.eq(100, db.foo.find().itcount());
print("datasize: " +
tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
insertDocsAndWaitForSplit(100);
-counts.push(s.config.chunks.count({"ns": "test.foo"}));
+counts.push(findChunksUtil.countChunksForNs(s.config, "test.foo"));
insertDocsAndWaitForSplit(200);
-counts.push(s.config.chunks.count({"ns": "test.foo"}));
+counts.push(findChunksUtil.countChunksForNs(s.config, "test.foo"));
insertDocsAndWaitForSplit(300);
-counts.push(s.config.chunks.count({"ns": "test.foo"}));
+counts.push(findChunksUtil.countChunksForNs(s.config, "test.foo"));
assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts));
var sorted = counts.slice(0);
diff --git a/jstests/sharding/autosplit_low_cardinality.js b/jstests/sharding/autosplit_low_cardinality.js
index a8a190a13bb..8997c25b035 100644
--- a/jstests/sharding/autosplit_low_cardinality.js
+++ b/jstests/sharding/autosplit_low_cardinality.js
@@ -5,6 +5,7 @@
(function() {
'use strict';
load('jstests/sharding/autosplit_include.js');
+load("jstests/sharding/libs/find_chunks_util.js");
var st = new ShardingTest({
name: "low_cardinality",
@@ -32,7 +33,7 @@ function insertBigDocsWithKey(key, numDocs) {
}
function numChunks() {
- return st.config.chunks.count({"ns": "test.foo"});
+ return findChunksUtil.countChunksForNs(st.config, "test.foo");
}
// Accumulate ~1MB of documents under -10 and +10
diff --git a/jstests/sharding/balancer_window.js b/jstests/sharding/balancer_window.js
index 50ab8325802..ee48db64844 100644
--- a/jstests/sharding/balancer_window.js
+++ b/jstests/sharding/balancer_window.js
@@ -13,6 +13,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
/**
* Simple representation for wall clock time. Hour and minutes should be integers.
*/
@@ -52,7 +54,8 @@ for (var x = 0; x < 150; x += 10) {
configDB.adminCommand({split: 'test.user', middle: {_id: x}});
}
-var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
+var shard0Chunks =
+ findChunksUtil.findChunksByNs(configDB, 'test.user', {shard: st.shard0.shardName}).count();
var startDate = new Date();
var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
@@ -71,7 +74,8 @@ st.startBalancer();
st.waitForBalancer(true, 60000);
-var shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
+var shard0ChunksAfter =
+ findChunksUtil.findChunksByNs(configDB, 'test.user', {shard: st.shard0.shardName}).count();
assert.eq(shard0Chunks, shard0ChunksAfter);
assert.commandWorked(configDB.settings.update(
@@ -85,7 +89,8 @@ assert.commandWorked(configDB.settings.update(
st.waitForBalancer(true, 60000);
-shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
+shard0ChunksAfter =
+ findChunksUtil.findChunksByNs(configDB, 'test.user', {shard: st.shard0.shardName}).count();
assert.neq(shard0Chunks, shard0ChunksAfter);
st.stop();
diff --git a/jstests/sharding/balancing_sessions_collection.js b/jstests/sharding/balancing_sessions_collection.js
index 8313b1ca80d..19b60ec8d09 100644
--- a/jstests/sharding/balancing_sessions_collection.js
+++ b/jstests/sharding/balancing_sessions_collection.js
@@ -6,18 +6,20 @@
(function() {
"use strict";
+load("jstests/sharding/libs/find_chunks_util.js");
+
/*
* Returns the number of chunks for the sessions collection.
*/
function getNumTotalChunks() {
- return configDB.chunks.count({ns: kSessionsNs});
+ return findChunksUtil.countChunksForNs(configDB, kSessionsNs);
}
/*
* Returns the number of chunks for the sessions collection that are the given shard.
*/
function getNumChunksOnShard(shardName) {
- return configDB.chunks.count({ns: kSessionsNs, shard: shardName});
+ return findChunksUtil.countChunksForNs(configDB, kSessionsNs, {shard: shardName});
}
/*
diff --git a/jstests/sharding/basic_drop_coll.js b/jstests/sharding/basic_drop_coll.js
index 523a633ac51..96095cee60b 100644
--- a/jstests/sharding/basic_drop_coll.js
+++ b/jstests/sharding/basic_drop_coll.js
@@ -7,6 +7,8 @@
(function() {
"use strict";
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({shards: 2});
var testDB = st.s.getDB('test');
@@ -45,7 +47,7 @@ assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10}));
var configDB = st.s.getDB('config');
var collDoc = configDB.collections.findOne({_id: 'test.user'});
-assert.eq(2, configDB.chunks.count({ns: 'test.user'}));
+assert.eq(2, findChunksUtil.countChunksForNs(configDB, 'test.user'));
assert.eq(1, configDB.tags.count({ns: 'test.user'}));
assert.commandWorked(testDB.runCommand({drop: 'user'}));
@@ -73,6 +75,7 @@ if (collEntry.length > 0) {
}
assert.eq(0, configDB.chunks.count({ns: 'test.user'}));
+assert.eq(0, configDB.chunks.count({uuid: collDoc.uuid}));
assert.eq(0, configDB.tags.count({ns: 'test.user'}));
st.stop();
diff --git a/jstests/sharding/basic_merge.js b/jstests/sharding/basic_merge.js
index 9bc75636e5d..b0506f1f2c3 100644
--- a/jstests/sharding/basic_merge.js
+++ b/jstests/sharding/basic_merge.js
@@ -4,6 +4,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
var mongos = st.s0;
@@ -31,7 +33,7 @@ assert.commandFailed(
assert.eq(0, mongos.getDB('config').chunks.count({ns: ns}));
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {a: 1}}));
-assert.eq(1, mongos.getDB('config').chunks.count({ns: ns}));
+assert.eq(1, findChunksUtil.countChunksForNs(mongos.getDB('config'), ns));
assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 0}}));
assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: -1}}));
assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 1}}));
@@ -52,13 +54,14 @@ assert.commandWorked(st.s0.adminCommand(
// Validate metadata
// There are four chunks [{$minKey, -1}, {-1, 0}, {0, 1}, {1, $maxKey}]
-assert.eq(4, st.s0.getDB('config').chunks.count({ns: ns}));
+assert.eq(4, findChunksUtil.countChunksForNs(st.s0.getDB('config'), ns));
// Use the second (stale) mongos to invoke the mergeChunks command so we can exercise the stale
// shard version refresh logic
assert.commandWorked(st.s1.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 1}]}));
-assert.eq(3, mongos.getDB('config').chunks.count({ns: ns}));
-assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a: 1}}));
+assert.eq(3, findChunksUtil.countChunksForNs(mongos.getDB('config'), ns));
+assert.eq(1,
+ findChunksUtil.countChunksForNs(mongos.getDB('config'), ns, {min: {a: -1}, max: {a: 1}}));
st.stop();
})();
diff --git a/jstests/sharding/basic_split.js b/jstests/sharding/basic_split.js
index fcab7365aad..c4a819ae026 100644
--- a/jstests/sharding/basic_split.js
+++ b/jstests/sharding/basic_split.js
@@ -4,6 +4,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
var configDB = st.s0.getDB('config');
@@ -24,10 +26,10 @@ assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}))
assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
-assert.eq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
+assert.eq(null, findChunksUtil.findOneChunkByNs(configDB, 'test.user', {min: {_id: 0}}));
assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
-assert.neq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
+assert.neq(null, findChunksUtil.findOneChunkByNs(configDB, 'test.user', {min: {_id: 0}}));
// Cannot split on existing chunk boundary.
assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
@@ -48,19 +50,24 @@ for (var x = -1200; x < 1200; x++) {
}
assert.commandWorked(bulk.execute());
-assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
+assert.eq(1,
+ findChunksUtil.findChunksByNs(configDB, 'test.user', {min: {$gte: {_id: 0}}}).itcount());
// Errors if bounds do not correspond to existing chunk boundaries.
assert.commandFailed(configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: 1000}]}));
-assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
+assert.eq(1,
+ findChunksUtil.findChunksByNs(configDB, 'test.user', {min: {$gte: {_id: 0}}}).itcount());
assert.commandWorked(
configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: MaxKey}]}));
-assert.gt(configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount(), 1);
+assert.gt(findChunksUtil.findChunksByNs(configDB, 'test.user', {min: {$gte: {_id: 0}}}).itcount(),
+ 1);
-assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount());
+assert.eq(1,
+ findChunksUtil.findChunksByNs(configDB, 'test.user', {min: {$lt: {_id: 0}}}).itcount());
assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: -600}}));
-assert.gt(configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount(), 1);
+assert.gt(findChunksUtil.findChunksByNs(configDB, 'test.user', {min: {$lt: {_id: 0}}}).itcount(),
+ 1);
// Mongos must refresh metadata if the chunk version does not match
assert.commandWorked(st.s0.adminCommand(
@@ -70,7 +77,7 @@ assert.commandWorked(st.s1.adminCommand(
{moveChunk: 'test.user', find: {_id: -900}, to: shard0, _waitForDelete: true}));
assert.commandWorked(st.s1.adminCommand(
{moveChunk: 'test.user', find: {_id: -901}, to: shard0, _waitForDelete: true}));
-assert.eq(0, configDB.chunks.find({ns: 'test.user', shard: shard1}).itcount());
+assert.eq(0, findChunksUtil.findChunksByNs(configDB, 'test.user', {shard: shard1}).itcount());
//
// Compound Key
@@ -78,9 +85,9 @@ assert.eq(0, configDB.chunks.find({ns: 'test.user', shard: shard1}).itcount());
assert.commandWorked(configDB.adminCommand({shardCollection: 'test.compound', key: {x: 1, y: 1}}));
-assert.eq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
+assert.eq(null, findChunksUtil.findOneChunkByNs(configDB, 'test.compound', {min: {x: 0, y: 0}}));
assert.commandWorked(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
-assert.neq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
+assert.neq(null, findChunksUtil.findOneChunkByNs(configDB, 'test.compound', {min: {x: 0, y: 0}}));
// cannot split on existing chunk boundary.
assert.commandFailed(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
@@ -91,14 +98,22 @@ for (x = -1200; x < 1200; x++) {
}
assert.commandWorked(bulk.execute());
-assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount());
+assert.eq(1,
+ findChunksUtil.findChunksByNs(configDB, 'test.compound', {min: {$gte: {x: 0, y: 0}}})
+ .itcount());
assert.commandWorked(configDB.adminCommand(
{split: 'test.compound', bounds: [{x: 0, y: 0}, {x: MaxKey, y: MaxKey}]}));
-assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount(), 1);
+assert.gt(
+ findChunksUtil.findChunksByNs(configDB, 'test.compound', {min: {$gte: {x: 0, y: 0}}}).itcount(),
+ 1);
-assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount());
+assert.eq(
+ 1,
+ findChunksUtil.findChunksByNs(configDB, 'test.compound', {min: {$lt: {x: 0, y: 0}}}).itcount());
assert.commandWorked(configDB.adminCommand({split: 'test.compound', find: {x: -1, y: -1}}));
-assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount(), 1);
+assert.gt(
+ findChunksUtil.findChunksByNs(configDB, 'test.compound', {min: {$lt: {x: 0, y: 0}}}).itcount(),
+ 1);
st.stop();
})();
diff --git a/jstests/sharding/catalog_cache_refresh_counters.js b/jstests/sharding/catalog_cache_refresh_counters.js
index a29312461fa..f9946e5fb45 100644
--- a/jstests/sharding/catalog_cache_refresh_counters.js
+++ b/jstests/sharding/catalog_cache_refresh_counters.js
@@ -8,6 +8,7 @@
load('jstests/sharding/libs/sharded_transactions_helpers.js');
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
let st = new ShardingTest({mongos: 2, shards: 2});
const configDB = st.s.getDB('config');
@@ -47,7 +48,7 @@ let verifyBlockedOperationsChange = (oldOperationsCount, increasedOps) => {
};
let getShardToTargetForMoveChunk = () => {
- const chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+ const chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
const shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
const shardThatOwnsChunk = chunkBoundsUtil.findShardForShardKey(st, shardChunkBounds, {x: 100});
return st.getOther(shardThatOwnsChunk).shardName;
diff --git a/jstests/sharding/chunk_history_window.js b/jstests/sharding/chunk_history_window.js
index adc2ca7247a..dc9d0b29cf8 100644
--- a/jstests/sharding/chunk_history_window.js
+++ b/jstests/sharding/chunk_history_window.js
@@ -75,7 +75,14 @@ const getChunkHistory = (query) => {
return configChunks.findOne(query);
};
-const origChunk = getChunkHistory({ns: ns});
+const origChunk = (function() {
+ const coll = st.configRS.getPrimary().getDB("config").collections.findOne({_id: ns});
+ if (coll.timestamp) {
+ return getChunkHistory({uuid: coll.uuid});
+ } else {
+ return getChunkHistory({ns: ns});
+ }
+}());
jsTestLog(`Original chunk: ${tojson(origChunk)}`);
assert.eq(1, origChunk.history.length, tojson(origChunk));
let result = mongosDB.runCommand({insert: "test", documents: [{_id: 0}]});
diff --git a/jstests/sharding/chunk_operations_preserve_uuid.js b/jstests/sharding/chunk_operations_preserve_uuid.js
index ad86ed990fa..90fe57df4fd 100644
--- a/jstests/sharding/chunk_operations_preserve_uuid.js
+++ b/jstests/sharding/chunk_operations_preserve_uuid.js
@@ -8,6 +8,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
let st = new ShardingTest({mongos: 1, shards: 3});
const dbName = "test";
@@ -30,10 +32,10 @@ var collUUID; // Initialized after shardCollection
}
function allChunksWithUUID() {
- var cursor = st.config.chunks.find({"ns": ns});
+ var cursor = findChunksUtil.findChunksByNs(st.config, ns);
do {
var next = cursor.next().uuid;
- assert.eq(collUUID, UUID(next));
+ assert.eq(collUUID, next);
} while (cursor.hasNext());
}
diff --git a/jstests/sharding/clear_jumbo.js b/jstests/sharding/clear_jumbo.js
index 9b14e68db44..2e0f05b871a 100644
--- a/jstests/sharding/clear_jumbo.js
+++ b/jstests/sharding/clear_jumbo.js
@@ -1,6 +1,8 @@
(function() {
"use strict";
+load("jstests/sharding/libs/find_chunks_util.js");
+
let st = new ShardingTest({shards: 2});
assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
@@ -14,21 +16,28 @@ assert.commandWorked(st.s.adminCommand({shardCollection: 'test.range', key: {x:
assert.commandWorked(st.s.adminCommand({split: 'test.range', middle: {x: 0}}));
let chunkColl = st.s.getDB('config').chunks;
-assert.commandWorked(chunkColl.update({ns: 'test.range', min: {x: 0}}, {$set: {jumbo: true}}));
-let jumboChunk = chunkColl.findOne({ns: 'test.range', min: {x: 0}});
+let testRangeColl = st.s.getDB("config").collections.findOne({_id: 'test.range'});
+if (testRangeColl.timestamp) {
+ assert.commandWorked(
+ chunkColl.update({uuid: testRangeColl.uuid, min: {x: 0}}, {$set: {jumbo: true}}));
+} else {
+ assert.commandWorked(chunkColl.update({ns: 'test.range', min: {x: 0}}, {$set: {jumbo: true}}));
+}
+
+let jumboChunk = findChunksUtil.findOneChunkByNs(st.s.getDB('config'), 'test.range', {min: {x: 0}});
assert(jumboChunk.jumbo, tojson(jumboChunk));
let jumboMajorVersionBefore = jumboChunk.lastmod.getTime();
// Target non-jumbo chunk should not affect real jumbo chunk.
assert.commandWorked(st.s.adminCommand({clearJumboFlag: 'test.range', find: {x: -1}}));
-jumboChunk = chunkColl.findOne({ns: 'test.range', min: {x: 0}});
+jumboChunk = findChunksUtil.findOneChunkByNs(st.s.getDB('config'), 'test.range', {min: {x: 0}});
assert(jumboChunk.jumbo, tojson(jumboChunk));
assert.eq(jumboMajorVersionBefore, jumboChunk.lastmod.getTime());
// Target real jumbo chunk should bump version.
assert.commandWorked(st.s.adminCommand({clearJumboFlag: 'test.range', find: {x: 1}}));
-jumboChunk = chunkColl.findOne({ns: 'test.range', min: {x: 0}});
+jumboChunk = findChunksUtil.findOneChunkByNs(st.s.getDB('config'), 'test.range', {min: {x: 0}});
assert(!jumboChunk.jumbo, tojson(jumboChunk));
assert.lt(jumboMajorVersionBefore, jumboChunk.lastmod.getTime());
@@ -36,23 +45,31 @@ assert.lt(jumboMajorVersionBefore, jumboChunk.lastmod.getTime());
// Hashed shard key
assert.commandWorked(
st.s.adminCommand({shardCollection: 'test.hashed', key: {x: 'hashed'}, numInitialChunks: 2}));
-assert.commandWorked(chunkColl.update({ns: 'test.hashed', min: {x: 0}}, {$set: {jumbo: true}}));
-jumboChunk = chunkColl.findOne({ns: 'test.hashed', min: {x: 0}});
+
+let testHashedColl = st.s.getDB("config").collections.findOne({_id: 'test.hashed'});
+if (testHashedColl.timestamp) {
+ assert.commandWorked(
+ chunkColl.update({uuid: testHashedColl.uuid, min: {x: 0}}, {$set: {jumbo: true}}));
+} else {
+ assert.commandWorked(chunkColl.update({ns: 'test.hashed', min: {x: 0}}, {$set: {jumbo: true}}));
+}
+jumboChunk = findChunksUtil.findOneChunkByNs(st.s.getDB("config"), 'test.hashed', {min: {x: 0}});
assert(jumboChunk.jumbo, tojson(jumboChunk));
jumboMajorVersionBefore = jumboChunk.lastmod.getTime();
// Target non-jumbo chunk should not affect real jumbo chunk.
-let unrelatedChunk = chunkColl.findOne({ns: 'test.hashed', min: {x: MinKey}});
+let unrelatedChunk =
+ findChunksUtil.findOneChunkByNs(st.s.getDB("config"), 'test.hashed', {min: {x: MinKey}});
assert.commandWorked(st.s.adminCommand(
{clearJumboFlag: 'test.hashed', bounds: [unrelatedChunk.min, unrelatedChunk.max]}));
-jumboChunk = chunkColl.findOne({ns: 'test.hashed', min: {x: 0}});
+jumboChunk = findChunksUtil.findOneChunkByNs(st.s.getDB("config"), 'test.hashed', {min: {x: 0}});
assert(jumboChunk.jumbo, tojson(jumboChunk));
assert.eq(jumboMajorVersionBefore, jumboChunk.lastmod.getTime());
// Target real jumbo chunk should bump version.
assert.commandWorked(
st.s.adminCommand({clearJumboFlag: 'test.hashed', bounds: [jumboChunk.min, jumboChunk.max]}));
-jumboChunk = chunkColl.findOne({ns: 'test.hashed', min: {x: 0}});
+jumboChunk = findChunksUtil.findOneChunkByNs(st.s.getDB("config"), 'test.hashed', {min: {x: 0}});
assert(!jumboChunk.jumbo, tojson(jumboChunk));
assert.lt(jumboMajorVersionBefore, jumboChunk.lastmod.getTime());
@@ -62,11 +79,17 @@ assert.lt(jumboMajorVersionBefore, jumboChunk.lastmod.getTime());
// jumbo flag is cleared.
st.stopBalancer();
-assert.commandWorked(chunkColl.update({ns: 'test.range', min: {x: 0}}, {$set: {jumbo: true}}));
+
+if (testRangeColl.timestamp) {
+ assert.commandWorked(
+ chunkColl.update({uuid: testRangeColl.uuid, min: {x: 0}}, {$set: {jumbo: true}}));
+} else {
+ assert.commandWorked(chunkColl.update({ns: 'test.range', min: {x: 0}}, {$set: {jumbo: true}}));
+}
assert.commandWorked(st.s.adminCommand(
{updateZoneKeyRange: 'test.range', min: {x: 0}, max: {x: MaxKey}, zone: 'finalDestination'}));
-let chunk = chunkColl.findOne({ns: 'test.range', min: {x: 0}});
+let chunk = findChunksUtil.findOneChunkByNs(st.s.getDB("config"), 'test.range', {min: {x: 0}});
assert(chunk.jumbo, tojson(chunk));
assert.eq(st.shard0.shardName, chunk.shard);
@@ -93,14 +116,14 @@ let waitForBalancerToRun = function() {
waitForBalancerToRun();
-chunk = chunkColl.findOne({ns: 'test.range', min: {x: 0}});
+chunk = findChunksUtil.findOneChunkByNs(st.s.getDB("config"), 'test.range', {min: {x: 0}});
assert.eq(st.shard0.shardName, chunk.shard);
assert.commandWorked(st.s.adminCommand({clearJumboFlag: 'test.range', find: {x: 0}}));
waitForBalancerToRun();
-chunk = chunkColl.findOne({ns: 'test.range', min: {x: 0}});
+chunk = findChunksUtil.findOneChunkByNs(st.s.getDB("config"), 'test.range', {min: {x: 0}});
assert.eq(st.shard1.shardName, chunk.shard);
st.stop();
diff --git a/jstests/sharding/coll_epoch_test0.js b/jstests/sharding/coll_epoch_test0.js
index ba87929cb4e..b95e4b48b6a 100644
--- a/jstests/sharding/coll_epoch_test0.js
+++ b/jstests/sharding/coll_epoch_test0.js
@@ -1,4 +1,6 @@
-// Tests whether a split and a migrate in a sharded cluster preserve the epoch
+// Tests whether a split and a migrate in a sharded cluster preserve the epoch\
+
+load("jstests/sharding/libs/find_chunks_util.js");
var st = new ShardingTest({shards: 2, mongos: 1});
// Balancer is by default stopped, thus it will not interfere
@@ -21,7 +23,7 @@ config.shards.find().forEach(function(doc) {
var createdEpoch = null;
var checkEpochs = function() {
- config.chunks.find({ns: coll + ""}).forEach(function(chunk) {
+ findChunksUtil.findChunksByNs(config, coll + "").forEach(function(chunk) {
// Make sure the epochs exist, are non-zero, and are consistent
assert(chunk.lastmodEpoch);
print(chunk.lastmodEpoch + "");
diff --git a/jstests/sharding/coll_timestamp_test.js b/jstests/sharding/coll_timestamp_test.js
index 7467fa81222..7bac9f1fdf5 100644
--- a/jstests/sharding/coll_timestamp_test.js
+++ b/jstests/sharding/coll_timestamp_test.js
@@ -12,6 +12,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
function checkTimestampConsistencyInPersistentMetadata(nss, timestampInConfig) {
// Checking consistency on local shard collection: config.cache.collections
let timestampInShard =
@@ -20,7 +22,7 @@ function checkTimestampConsistencyInPersistentMetadata(nss, timestampInConfig) {
assert.eq(timestampCmp(timestampInConfig, timestampInShard), 0);
// Checking consistency on config server collection: config.chunks
- var cursor = st.config.chunks.find({ns: nss});
+ var cursor = findChunksUtil.findChunksByNs(st.config, nss);
assert(cursor.hasNext());
assert.eq(timestampInConfig, cursor.next().lastmodTimestamp);
}
diff --git a/jstests/sharding/compound_hashed_shard_key_presplitting.js b/jstests/sharding/compound_hashed_shard_key_presplitting.js
index 57b792e4a37..6fdb8abc001 100644
--- a/jstests/sharding/compound_hashed_shard_key_presplitting.js
+++ b/jstests/sharding/compound_hashed_shard_key_presplitting.js
@@ -8,6 +8,9 @@
*/
(function() {
'use strict';
+
+load("jstests/sharding/libs/find_chunks_util.js");
+
const st = new ShardingTest({name: jsTestName(), shards: 3});
const dbname = "test";
const mongos = st.s0;
@@ -35,7 +38,7 @@ st.ensurePrimaryShard('test', st.shard1.shardName);
assert.commandWorked(
db.adminCommand({shardcollection: db.collWithData.getFullName(), key: shardKey}));
- assert.eq(st.config.chunks.count({ns: db.collWithData.getFullName()}),
+ assert.eq(findChunksUtil.countChunksForNs(st.config, db.collWithData.getFullName()),
1,
"sharding non-empty collection should not pre-split");
});
@@ -45,7 +48,7 @@ st.ensurePrimaryShard('test', st.shard1.shardName);
* has expected number of chunks.
*/
function checkValidChunks(coll, shardKey, expectedChunks) {
- const chunks = st.config.chunks.find({"ns": coll.getFullName()}).toArray();
+ const chunks = findChunksUtil.findChunksByNs(st.config, coll.getFullName()).toArray();
let shardCountsMap =
{[st.shard0.shardName]: 0, [st.shard1.shardName]: 0, [st.shard2.shardName]: 0};
for (let chunk of chunks) {
diff --git a/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js b/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js
index a0c005f282d..4be26f1c18d 100644
--- a/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js
+++ b/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js
@@ -8,6 +8,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
const st = new ShardingTest({shards: 2, other: {chunkSize: 1}});
const configDB = st.s0.getDB('config');
assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
@@ -17,9 +19,10 @@ st.ensurePrimaryShard('test', shard0);
const testDBOnPrimary = st.rs0.getPrimary().getDB('test');
function verifyChunkSplitIntoTwo(namespace, chunk) {
- assert.eq(0, configDB.chunks.count({ns: namespace, min: chunk.min, max: chunk.max}));
- assert.eq(1, configDB.chunks.count({ns: namespace, min: chunk.min}));
- assert.eq(1, configDB.chunks.count({ns: namespace, max: chunk.max}));
+ assert.eq(
+ 0, findChunksUtil.countChunksForNs(configDB, namespace, {min: chunk.min, max: chunk.max}));
+ assert.eq(1, findChunksUtil.countChunksForNs(configDB, namespace, {min: chunk.min}));
+ assert.eq(1, findChunksUtil.countChunksForNs(configDB, namespace, {max: chunk.max}));
}
const nonHashedFieldValue = 111;
@@ -61,13 +64,14 @@ function testSplit(shardKey, collName) {
assert.commandFailed(configDB.adminCommand(
{split: namespace, bounds: [{someField: MinKey}, {someField: MaxKey}]}));
- let totalChunksBefore = configDB.chunks.count({ns: namespace});
- const lowestChunk = configDB.chunks.find({ns: namespace}).sort({min: 1}).limit(1).next();
+ let totalChunksBefore = findChunksUtil.countChunksForNs(configDB, namespace);
+ const lowestChunk =
+ findChunksUtil.findChunksByNs(configDB, namespace).sort({min: 1}).limit(1).next();
assert(lowestChunk);
// Split the chunk based on 'bounds' and verify total chunks increased by one.
assert.commandWorked(
configDB.adminCommand({split: namespace, bounds: [lowestChunk.min, lowestChunk.max]}));
- assert.eq(++totalChunksBefore, configDB.chunks.count({ns: namespace}));
+ assert.eq(++totalChunksBefore, findChunksUtil.countChunksForNs(configDB, namespace));
// Verify that a single chunk with the previous bounds no longer exists but split into two.
verifyChunkSplitIntoTwo(namespace, lowestChunk);
@@ -79,28 +83,33 @@ function testSplit(shardKey, collName) {
const splitObjWithHashedValue = buildObjWithAllShardKeyFields(shardKey, hashedFieldValue);
// Find the chunk to which 'splitObjWithHashedValue' belongs to.
- let chunkToBeSplit = configDB.chunks.findOne(
- {ns: namespace, min: {$lte: splitObjWithHashedValue}, max: {$gt: splitObjWithHashedValue}});
+ let chunkToBeSplit = findChunksUtil.findChunksByNs(
+ configDB,
+ namespace,
+ {min: {$lte: splitObjWithHashedValue}, max: {$gt: splitObjWithHashedValue}})[0];
assert(chunkToBeSplit);
// Split the 'chunkToBeSplit' using 'find'. Note that the object specified for 'find' is not a
// split point.
const splitObj = buildObjWithAllShardKeyFields(shardKey, nonHashedFieldValue);
assert.commandWorked(configDB.adminCommand({split: namespace, find: splitObj}));
- assert.eq(++totalChunksBefore, configDB.chunks.count({ns: namespace}));
+ assert.eq(++totalChunksBefore, findChunksUtil.countChunksForNs(configDB, namespace));
// Verify that a single chunk with the previous bounds no longer exists but split into two.
verifyChunkSplitIntoTwo(namespace, chunkToBeSplit);
- assert.eq(0, configDB.chunks.count({ns: namespace, min: splitObjWithHashedValue}));
+ assert.eq(0,
+ findChunksUtil.countChunksForNs(configDB, namespace, {min: splitObjWithHashedValue}));
// Get the new chunk in which 'splitObj' belongs.
- chunkToBeSplit = configDB.chunks.findOne(
- {ns: namespace, min: {$lte: splitObjWithHashedValue}, max: {$gt: splitObjWithHashedValue}});
+ chunkToBeSplit = findChunksUtil.findChunksByNs(
+ configDB,
+ namespace,
+ {min: {$lte: splitObjWithHashedValue}, max: {$gt: splitObjWithHashedValue}})[0];
// Use 'splitObj' as the middle point.
assert.commandWorked(
configDB.adminCommand({split: namespace, middle: splitObjWithHashedValue}));
- assert.eq(++totalChunksBefore, configDB.chunks.count({ns: namespace}));
+ assert.eq(++totalChunksBefore, findChunksUtil.countChunksForNs(configDB, namespace));
verifyChunkSplitIntoTwo(namespace, chunkToBeSplit);
// Cannot split on existing chunk boundary with 'middle'.
@@ -119,7 +128,7 @@ function testMoveChunk(shardKey) {
assert.commandWorked(st.s0.adminCommand({shardCollection: ns, key: shardKey}));
// Fetch a chunk from 'shard0'.
- const aChunk = configDB.chunks.findOne({ns: ns, shard: shard0});
+ const aChunk = findChunksUtil.findOneChunkByNs(configDB, ns, {shard: shard0});
assert(aChunk);
// Error if either of the bounds is not a valid shard key.
@@ -139,15 +148,18 @@ function testMoveChunk(shardKey) {
// Find the chunk to which 'moveObjWithHashedValue' belongs to.
const moveObjWithHashedValue = buildObjWithAllShardKeyFields(shardKey, hashedFieldValue);
- const chunk = st.config.chunks.findOne(
- {ns: ns, min: {$lte: moveObjWithHashedValue}, max: {$gt: moveObjWithHashedValue}});
+ const chunk = findChunksUtil.findChunksByNs(
+ st.config,
+ ns,
+ {min: {$lte: moveObjWithHashedValue}, max: {$gt: moveObjWithHashedValue}})[0];
assert(chunk);
// Verify that 'moveChunk' with 'find' works with pre-hashed value.
const otherShard = (chunk.shard === shard1) ? shard0 : shard1;
const moveObj = buildObjWithAllShardKeyFields(shardKey, nonHashedFieldValue);
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: moveObj, to: otherShard}));
- assert.eq(st.config.chunks.count({ns: ns, min: chunk.min, shard: otherShard}), 1);
+ assert.eq(findChunksUtil.countChunksForNs(st.config, ns, {min: chunk.min, shard: otherShard}),
+ 1);
// Fail if 'find' and 'bounds' are both set.
assert.commandFailed(st.s0.adminCommand({
diff --git a/jstests/sharding/compound_hashed_shard_key_targeting.js b/jstests/sharding/compound_hashed_shard_key_targeting.js
index 6066cb5a863..9e4858ee462 100644
--- a/jstests/sharding/compound_hashed_shard_key_targeting.js
+++ b/jstests/sharding/compound_hashed_shard_key_targeting.js
@@ -13,6 +13,7 @@
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand().
load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
+load("jstests/sharding/libs/find_chunks_util.js");
const st = new ShardingTest({shards: 2});
const kDbName = jsTestName();
@@ -186,16 +187,18 @@ function verifyProfilerEntryOnCorrectShard(fieldValue, filter) {
// Find the chunk to which 'hashedValue' belongs to. We use $expr here so that the $lte and $gt
// comparisons occurs across data types.
const hashedValue = convertShardKeyToHashed(fieldValue);
- const chunk = st.s.getDB('config').chunks.findOne({
- $expr: {
- $and: [
- {$lte: ['$min.a', hashedValue]},
- {$gt: ['$max.a', hashedValue]},
- {$eq: ['$ns', ns]}
- ]
+ const nsOrUUID = (function() {
+ const coll = st.s.getDB('config').collections.findOne({_id: ns});
+ if (coll.timestamp) {
+ return {$eq: ['$uuid', coll.uuid]};
+ } else {
+ return {$eq: ['$ns', ns]};
}
+ }());
+ const chunk = st.s.getDB('config').chunks.findOne({
+ $expr: {$and: [{$lte: ['$min.a', hashedValue]}, {$gt: ['$max.a', hashedValue]}, nsOrUUID]}
});
- assert(chunk, st.s.getDB('config').chunks.find({ns: ns}).toArray());
+ assert(chunk, findChunksUtil.findChunksByNs(st.s.getDB('config'), ns).toArray());
const [targetShard, otherShard] =
(chunk.shard == st.shard0.shardName) ? [st.shard0, st.shard1] : [st.shard1, st.shard0];
profilerHasSingleMatchingEntryOrThrow({profileDB: targetShard.getDB(kDbName), filter: filter});
diff --git a/jstests/sharding/compound_hashed_shard_key_zoning.js b/jstests/sharding/compound_hashed_shard_key_zoning.js
index 7bfa6f74f88..bb389578519 100644
--- a/jstests/sharding/compound_hashed_shard_key_zoning.js
+++ b/jstests/sharding/compound_hashed_shard_key_zoning.js
@@ -12,6 +12,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
const st = new ShardingTest({shards: 3});
const kDbName = 'test';
const kCollName = 'foo';
@@ -217,14 +219,13 @@ function testChunkSplits({collectionExists, shardKey, zoneRanges, expectedNumChu
// Shard the collection and validate the resulting chunks.
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
- const chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+ const chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
assert.eq(chunkDocs.length, expectedNumChunks, chunkDocs);
// Verify that each of the chunks corresponding to zones are in the right shard.
for (let i = 0; i < zoneRanges.length; i++) {
assert.eq(1,
- configDB.chunks.count({
- ns: ns,
+ findChunksUtil.countChunksForNs(configDB, ns, {
min: zoneRanges[i][0],
max: zoneRanges[i][1],
shard: shards[i % shards.length]._id
@@ -299,13 +300,15 @@ function testNonemptyZonedCollection() {
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
// Check that there is initially 1 chunk.
- assert.eq(1, configDB.chunks.count({ns: ns}));
+ assert.eq(1, findChunksUtil.countChunksForNs(configDB, ns));
st.startBalancer();
// Check that the chunks were moved properly.
- assert.soon(
- () => configDB.chunks.count({ns: ns}) === 5, 'balancer never ran', 5 * 60 * 1000, 1000);
+ assert.soon(() => findChunksUtil.countChunksForNs(configDB, ns) === 5,
+ 'balancer never ran',
+ 5 * 60 * 1000,
+ 1000);
assert.commandWorked(testDB.runCommand({drop: kCollName}));
}
diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js
index b1d6bad4bf3..d92cdbeebdd 100644
--- a/jstests/sharding/count2.js
+++ b/jstests/sharding/count2.js
@@ -1,5 +1,7 @@
(function() {
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s1 = new ShardingTest({name: "count2", shards: 2, mongos: 2});
var s2 = s1._mongos[1];
@@ -10,7 +12,7 @@ s1.adminCommand({shardcollection: "test.foo", key: {name: 1}});
var db1 = s1.getDB("test").foo;
var db2 = s2.getDB("test").foo;
-assert.eq(1, s1.config.chunks.count({"ns": "test.foo"}), "sanity check A");
+assert.eq(1, findChunksUtil.countChunksForNs(s1.config, "test.foo"), "sanity check A");
db1.save({name: "aaa"});
db1.save({name: "bbb"});
diff --git a/jstests/sharding/create_sharded_collection_util_test.js b/jstests/sharding/create_sharded_collection_util_test.js
index b97bc6edd20..d73f1cb6fc8 100644
--- a/jstests/sharding/create_sharded_collection_util_test.js
+++ b/jstests/sharding/create_sharded_collection_util_test.js
@@ -5,6 +5,7 @@
"use strict";
load("jstests/sharding/libs/create_sharded_collection_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
const st = new ShardingTest({mongos: 1, config: 1, shards: 3, rs: {nodes: 1}});
const collection = st.s.getCollection("test.create_sharded_collection_util");
@@ -15,7 +16,7 @@ function assertCreatedWithChunks(shardKey, chunks) {
const configDB = st.s.getDB("config");
const actualChunks =
- configDB.chunks.find({ns: collection.getFullName()}).sort({min: 1}).toArray();
+ findChunksUtil.findChunksByNs(configDB, collection.getFullName()).sort({min: 1}).toArray();
assert.eq(chunks.slice().sort((a, b) => bsonWoCompare(a.min, b.min)),
actualChunks.map(chunk => ({min: chunk.min, max: chunk.max, shard: chunk.shard})));
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 07497051b44..45af3d6f6dc 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -2,6 +2,8 @@
// checks that cursors survive a chunk's move
(function() {
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: "sharding_cursor1", shards: 2});
s.config.settings.find().forEach(printjson);
@@ -22,7 +24,7 @@ for (i = 0; i < numObjs; i++) {
}
assert.commandWorked(bulk.execute());
assert.eq(1,
- s.config.chunks.count({"ns": "test.foo"}),
+ findChunksUtil.countChunksForNs(s.config, "test.foo"),
"test requires collection to have one chunk initially");
// Cursor timeout only occurs outside of sessions. Otherwise we rely on the session timeout
@@ -42,7 +44,7 @@ assert.eq(7, cursor3.objsLeftInBatch());
s.adminCommand({split: "test.foo", middle: {_id: 5}});
s.adminCommand({movechunk: "test.foo", find: {_id: 5}, to: secondary.getMongo().name});
-assert.eq(2, s.config.chunks.count({"ns": "test.foo"}));
+assert.eq(2, findChunksUtil.countChunksForNs(s.config, "test.foo"));
// the cursors should not have been affected
assert.eq(numObjs, cursor1.itcount(), "c1");
diff --git a/jstests/sharding/disable_autosplit.js b/jstests/sharding/disable_autosplit.js
index 03d14cc970c..9b33623e9ec 100644
--- a/jstests/sharding/disable_autosplit.js
+++ b/jstests/sharding/disable_autosplit.js
@@ -2,6 +2,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js"); // for findChunksByNs
+
var chunkSizeMB = 1;
// Autosplit is disabled by default, but specify it anyway in case the default changes,
@@ -27,7 +29,7 @@ for (var i = 0; i < 20; i++) {
}
// Make sure we haven't split
-assert.eq(1, config.chunks.find({ns: coll + ""}).count());
+assert.eq(1, findChunksUtil.findChunksByNs(config, coll + "").count());
st.stop();
})();
diff --git a/jstests/sharding/enforce_zone_policy.js b/jstests/sharding/enforce_zone_policy.js
index 259b8443fee..11a43d2572d 100644
--- a/jstests/sharding/enforce_zone_policy.js
+++ b/jstests/sharding/enforce_zone_policy.js
@@ -3,6 +3,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({shards: 3, mongos: 1});
assert.commandWorked(st.s0.adminCommand({enablesharding: 'test'}));
@@ -65,7 +67,9 @@ st.addTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
assertBalanceCompleteAndStable(function() {
var chunksOnShard2 =
- configDB.chunks.find({ns: 'test.foo', shard: st.shard2.shardName}).sort({min: 1}).toArray();
+ findChunksUtil.findChunksByNs(configDB, 'test.foo', {shard: st.shard2.shardName})
+ .sort({min: 1})
+ .toArray();
jsTestLog('Chunks on shard2: ' + tojson(chunksOnShard2));
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index 75698ec45a0..e4edc3edc15 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({shards: 2});
// Make sure that findAndModify with upsert against a non-existent database and collection will
@@ -46,11 +48,15 @@ for (var i = 2; i < numObjs; i += 2) {
}
s.printChunks();
-assert.eq(numObjs / 2, s.config.chunks.count({"ns": "test.sharded_coll"}), 'Split was incorrect');
-assert.eq(numObjs / 4,
- s.config.chunks.count({shard: s.shard0.shardName, "ns": "test.sharded_coll"}));
-assert.eq(numObjs / 4,
- s.config.chunks.count({shard: s.shard1.shardName, "ns": "test.sharded_coll"}));
+assert.eq(numObjs / 2,
+ findChunksUtil.countChunksForNs(s.config, "test.sharded_coll"),
+ 'Split was incorrect');
+assert.eq(
+ numObjs / 4,
+ findChunksUtil.countChunksForNs(s.config, "test.sharded_coll", {shard: s.shard0.shardName}));
+assert.eq(
+ numObjs / 4,
+ findChunksUtil.countChunksForNs(s.config, "test.sharded_coll", {shard: s.shard1.shardName}));
// update
for (var i = 0; i < numObjs; i++) {
diff --git a/jstests/sharding/findandmodify_autosplit.js b/jstests/sharding/findandmodify_autosplit.js
index bfe6fd82584..f73a1cfa029 100644
--- a/jstests/sharding/findandmodify_autosplit.js
+++ b/jstests/sharding/findandmodify_autosplit.js
@@ -4,13 +4,13 @@
(function() {
'use strict';
load('jstests/sharding/autosplit_include.js');
+load("jstests/sharding/libs/find_chunks_util.js");
var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
/* Return total number of chunks for a specific collection */
function getNumChunksForColl(coll) {
- const chunks = st.getDB('config').getCollection('chunks');
- return chunks.countDocuments({ns: coll.getFullName()});
+ return findChunksUtil.countChunksForNs(st.getDB('config'), coll.getFullName());
}
/* Return a collection named @collName sharded on `_id` */
diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js
index 1435611e7fb..1d7fad47b30 100644
--- a/jstests/sharding/hash_basic.js
+++ b/jstests/sharding/hash_basic.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({shards: 2, chunkSize: 1});
assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
@@ -8,7 +10,7 @@ st.ensurePrimaryShard('test', st.shard1.shardName);
assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}}));
var configDB = st.s0.getDB('config');
-var chunkCountBefore = configDB.chunks.count({ns: 'test.user'});
+var chunkCountBefore = findChunksUtil.countChunksForNs(configDB, 'test.user');
assert.gt(chunkCountBefore, 1);
var testDB = st.s0.getDB('test');
@@ -16,7 +18,7 @@ for (var x = 0; x < 1000; x++) {
testDB.user.insert({x: x});
}
-var chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).next();
+var chunkDoc = findChunksUtil.findChunksByNs(configDB, 'test.user').sort({min: 1}).next();
var min = chunkDoc.min;
var max = chunkDoc.max;
@@ -26,7 +28,7 @@ var max = chunkDoc.max;
var cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', bounds: [min, max]}),
'Split on bounds failed for chunk [' + tojson(chunkDoc) + ']');
-chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).skip(1).next();
+chunkDoc = findChunksUtil.findChunksByNs(configDB, 'test.user').sort({min: 1}).skip(1).next();
var middle = NumberLong(chunkDoc.min.x + 1000000);
cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: middle}}),
@@ -35,7 +37,7 @@ cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x
cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', find: {x: 7}}),
'Split failed with find.');
-var chunkList = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).toArray();
+var chunkList = findChunksUtil.findChunksByNs(configDB, 'test.user').sort({min: 1}).toArray();
assert.eq(chunkCountBefore + 3, chunkList.length);
chunkList.forEach(function(chunkToMove) {
diff --git a/jstests/sharding/hash_crud.js b/jstests/sharding/hash_crud.js
index 85b9cf52ec0..e88c4fd0491 100644
--- a/jstests/sharding/hash_crud.js
+++ b/jstests/sharding/hash_crud.js
@@ -5,6 +5,7 @@
'use strict';
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
let st = new ShardingTest({shards: 3});
let dbName = "test";
@@ -17,7 +18,7 @@ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard1.shardName);
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 'hashed'}}));
-let chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+let chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
jsTest.log("Test 'insert'");
diff --git a/jstests/sharding/hash_crud_during_migration.js b/jstests/sharding/hash_crud_during_migration.js
index fe1982552d3..321011e607c 100644
--- a/jstests/sharding/hash_crud_during_migration.js
+++ b/jstests/sharding/hash_crud_during_migration.js
@@ -7,6 +7,7 @@
load('jstests/libs/chunk_manipulation_util.js');
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
let st = new ShardingTest({shards: 3});
let dbName = "test";
@@ -22,7 +23,7 @@ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard1.shardName);
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 'hashed'}}));
-let chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+let chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
jsTest.log("Test 'insert'");
@@ -44,7 +45,7 @@ assert.eq(1, toShard.getCollection(ns).find(doc).count());
// Clean up.
assert.commandWorked(testDB.user.remove({}));
-chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
// Insert docs that are expected to go to three different shards, check that the docs
diff --git a/jstests/sharding/hash_crud_txns_during_migration.js b/jstests/sharding/hash_crud_txns_during_migration.js
index 1bcafad854d..2da8dfcbb85 100644
--- a/jstests/sharding/hash_crud_txns_during_migration.js
+++ b/jstests/sharding/hash_crud_txns_during_migration.js
@@ -7,6 +7,7 @@
load('jstests/libs/chunk_manipulation_util.js');
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js"); // for findChunksForNs
function runCommandInTxn(cmdFunc) {
let session = st.s.startSession();
@@ -30,7 +31,7 @@ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard1.shardName);
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 'hashed'}}));
-let chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+let chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
jsTest.log("Test 'insert'");
@@ -55,7 +56,7 @@ assert.eq(1, toShard.getCollection(ns).find(doc).count());
// Clean up.
assert.commandWorked(testDB.user.remove({}));
-chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
// Insert docs that are expected to go to three different shards, check that the docs
diff --git a/jstests/sharding/hash_migration_oplog_filter.js b/jstests/sharding/hash_migration_oplog_filter.js
index 18a2dc88841..db75bdae39f 100644
--- a/jstests/sharding/hash_migration_oplog_filter.js
+++ b/jstests/sharding/hash_migration_oplog_filter.js
@@ -7,6 +7,7 @@
load('jstests/libs/chunk_manipulation_util.js');
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
/*
* Returns the oplog entry on the shard that matches the query. Asserts
@@ -29,7 +30,7 @@ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard1.shardName);
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 'hashed'}}));
-let chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+let chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
// Use docs that are expected to go to the same shards but different chunks.
diff --git a/jstests/sharding/hash_shard1.js b/jstests/sharding/hash_shard1.js
index 4939cc53c99..e69ff07f226 100644
--- a/jstests/sharding/hash_shard1.js
+++ b/jstests/sharding/hash_shard1.js
@@ -1,6 +1,8 @@
// Basic test of sharding with a hashed shard key
// - Test basic migrations with moveChunk, using different chunk specification methods
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: jsTestName(), shards: 3, mongos: 1, verbose: 1});
var dbname = "test";
var coll = "foo";
@@ -16,7 +18,7 @@ s.stopBalancer();
// shard a fresh collection using a hashed shard key
t.drop();
var res = db.adminCommand({shardcollection: ns, key: {a: "hashed"}});
-assert.gt(s.config.chunks.count({ns: ns}), 3);
+assert.gt(findChunksUtil.countChunksForNs(s.config, ns), 3);
assert.eq(res.ok, 1, "shardcollection didn't work");
s.printShardingStatus();
diff --git a/jstests/sharding/hash_shard_non_empty.js b/jstests/sharding/hash_shard_non_empty.js
index 01692518b66..1ea2f7a1bd4 100644
--- a/jstests/sharding/hash_shard_non_empty.js
+++ b/jstests/sharding/hash_shard_non_empty.js
@@ -1,5 +1,7 @@
// Hash sharding on a non empty collection should not pre-split.
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: jsTestName(), shards: 3, mongos: 1, verbose: 1});
var dbname = "test";
var coll = "foo";
@@ -16,7 +18,7 @@ db.getCollection(coll).createIndex({a: "hashed"});
var res = db.adminCommand({shardcollection: dbname + "." + coll, key: {a: "hashed"}});
assert.eq(res.ok, 1, "shardcollection didn't work");
s.printShardingStatus();
-var numChunks = s.config.chunks.count({"ns": "test.foo"});
+var numChunks = findChunksUtil.countChunksForNs(s.config, "test.foo");
assert.eq(numChunks, 1, "sharding non-empty collection should not pre-split");
s.stop();
diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js
index 03bd2da845d..69052235099 100644
--- a/jstests/sharding/hash_shard_num_chunks.js
+++ b/jstests/sharding/hash_shard_num_chunks.js
@@ -2,6 +2,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({shards: 3});
var dbname = "test";
@@ -16,7 +18,7 @@ assert.commandWorked(db.adminCommand(
s.printShardingStatus();
-var numChunks = s.config.chunks.count({"ns": "test.foo"});
+var numChunks = findChunksUtil.countChunksForNs(s.config, "test.foo");
assert.eq(numChunks, 500, "should be exactly 500 chunks");
s.config.shards.find().forEach(
diff --git a/jstests/sharding/hash_single_shard.js b/jstests/sharding/hash_single_shard.js
index f208731a71b..5af4ceddf88 100644
--- a/jstests/sharding/hash_single_shard.js
+++ b/jstests/sharding/hash_single_shard.js
@@ -1,5 +1,7 @@
// Test hashed presplit with 1 shard.
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({shards: 1});
var testDB = st.getDB('test');
@@ -9,6 +11,6 @@ testDB.adminCommand({shardCollection: "test.collection", key: {a: "hashed"}});
// check the number of initial chunks.
assert.eq(2,
- st.getDB('config').chunks.count({"ns": "test.collection"}),
+ findChunksUtil.countChunksForNs(st.getDB('config'), "test.collection"),
'Using hashed shard key but failing to do correct presplitting');
st.stop();
diff --git a/jstests/sharding/initial_split_validate_shard_collections.js b/jstests/sharding/initial_split_validate_shard_collections.js
index 9cbedf06053..a52dc4f1f7f 100644
--- a/jstests/sharding/initial_split_validate_shard_collections.js
+++ b/jstests/sharding/initial_split_validate_shard_collections.js
@@ -4,6 +4,7 @@
*/
load("jstests/libs/uuid_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
(function() {
'use strict';
@@ -57,7 +58,7 @@ assert.commandWorked(
assert.commandWorked(
st.shard1.adminCommand({_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
-const chunksOnConfigCount = st.config.chunks.count({ns: 'test.user'});
+const chunksOnConfigCount = findChunksUtil.countChunksForNs(st.config, 'test.user');
assert.eq(2, chunksOnConfigCount);
const cacheChunksOnShard0 =
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index 8946193861b..94803227a28 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({shards: 2, other: {chunkSize: 1}});
assert.commandWorked(s.s.adminCommand({enablesharding: "test"}));
@@ -31,7 +33,7 @@ s.startBalancer();
// Wait for the balancer to try to move the chunk and mark it as jumbo.
assert.soon(() => {
- let chunk = s.getDB('config').chunks.findOne({ns: 'test.foo', min: {x: 0}});
+ let chunk = findChunksUtil.findOneChunkByNs(s.getDB('config'), 'test.foo', {min: {x: 0}});
if (chunk == null) {
// Balancer hasn't run and enforce the zone boundaries yet.
return false;
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index e3ef862e8e8..f504383abab 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
// Values have to be sorted - you must have exactly 6 values in each array
var types = [
{name: "string", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "k"},
@@ -134,7 +136,9 @@ for (var i = 0; i < types.length; i++) {
var c = db[shortName];
s.adminCommand({shardcollection: longName, key: makeObjectDotted(1)});
- assert.eq(1, s.config.chunks.find({ns: longName}).count(), curT.name + " sanity check A");
+ assert.eq(1,
+ findChunksUtil.findChunksByNs(s.config, longName).count(),
+ curT.name + " sanity check A");
var unsorted = Array.shuffle(Object.extend([], curT.values));
c.insert(makeObject(unsorted[0]));
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index 78fa4c4d5dc..b3416158d41 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -1,5 +1,7 @@
(function() {
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: "keystring", shards: 2});
s.adminCommand({enablesharding: "test"});
@@ -9,7 +11,7 @@ s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
primary = s.getPrimaryShard("test").getDB("test");
seconday = s.getOther(primary).getDB("test");
-assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
+assert.eq(1, findChunksUtil.countChunksForNs(s.config, "test.foo"), "sanity check A");
var db = s.getDB("test");
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index eb5bc916196..11432b2d4c8 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -8,6 +8,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
// Starts a new sharding environment limiting the chunk size to 1GB (highest value allowed).
// Note that early splitting will start with a 1/4 of max size currently.
var s = new ShardingTest({name: 'large_chunk', shards: 2, other: {chunkSize: 1024}});
@@ -37,7 +39,8 @@ assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
-assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "step 1 - need one large chunk");
+assert.eq(
+ 1, findChunksUtil.countChunksForNs(s.config, "test.foo"), "step 1 - need one large chunk");
var primary = s.getPrimaryShard("test").getDB("test");
var secondary = s.getOther(primary).getDB("test");
@@ -57,11 +60,11 @@ assert.throws(function() {
// Move the chunk
print("checkpoint 1b");
-var before = s.config.chunks.find({ns: 'test.foo'}).toArray();
+var before = findChunksUtil.findChunksByNs(s.config, 'test.foo').toArray();
assert.commandWorked(
s.s0.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name}));
-var after = s.config.chunks.find({ns: 'test.foo'}).toArray();
+var after = findChunksUtil.findChunksByNs(s.config, 'test.foo').toArray();
assert.neq(before[0].shard, after[0].shard, "move chunk did not work");
s.config.changelog.find().forEach(printjson);
diff --git a/jstests/sharding/libs/find_chunks_util.js b/jstests/sharding/libs/find_chunks_util.js
new file mode 100644
index 00000000000..a28ea755418
--- /dev/null
+++ b/jstests/sharding/libs/find_chunks_util.js
@@ -0,0 +1,70 @@
+/*
+ * Utilities for looking up chunk metadata
+ */
+var findChunksUtil = (function() {
+ /**
+ * Performs a find() on config.chunks on 'configDB', targeting chunks for the collection 'ns',
+ * and the optional 'extraQuery' and 'projection'.
+ * Chooses to query chunks by their 'ns' or uuid' fields according to it's config.collection
+ * entry having 'timestamp' or not.
+ */
+ let findChunksByNs = function(configDB, ns, extraQuery = null, projection = null) {
+ const collection = configDB.collections.findOne({_id: ns});
+ if (collection.timestamp) {
+ const collectionUUID = configDB.collections.findOne({_id: ns}).uuid;
+ assert.neq(collectionUUID, null);
+ const chunksQuery = Object.assign({uuid: collectionUUID}, extraQuery);
+ return configDB.chunks.find(chunksQuery, projection);
+ } else {
+ const chunksQuery = Object.assign({ns: ns}, extraQuery);
+ return configDB.chunks.find(chunksQuery, projection);
+ }
+ };
+
+ /**
+ * Performs a findOne() on config.chunks on 'configDB', targeting chunks for the collection
+ * 'ns', and the optional 'extraQuery' and 'projection'. Chooses to query chunks by their 'ns'
+ * or uuid' fields according to it's config.collection entry having 'timestamp' or not.
+ */
+ let findOneChunkByNs = function(configDB, ns, extraQuery = null, projection = null) {
+ const collection = configDB.collections.findOne({_id: ns});
+ if (collection.timestamp) {
+ const collectionUUID = configDB.collections.findOne({_id: ns}).uuid;
+ assert.neq(collectionUUID, null);
+ const chunksQuery = Object.assign({uuid: collectionUUID}, extraQuery);
+ return configDB.chunks.findOne(chunksQuery, projection);
+ } else {
+ const chunksQuery = Object.assign({ns: ns}, extraQuery);
+ return configDB.chunks.findOne(chunksQuery, projection);
+ }
+ };
+
+ /**
+ * Performs a count() on config.chunks on 'configDB', targeting chunks for the collection 'ns',
+ * and the optional 'extraQuery' and 'projection'.
+ * Chooses to query chunks by their 'ns' or uuid' fields according to it's config.collection
+ * entry having 'timestamp' or not.
+ */
+ let countChunksForNs = function(configDB, ns, extraQuery = null) {
+ return findChunksByNs(configDB, ns, extraQuery).count();
+ };
+
+ /**
+ * Returns the appropriate chunks join clause for collection 'ns'.
+ */
+ let getChunksJoinClause = function(configDB, ns) {
+ const collMetadata = configDB.collections.findOne({_id: ns});
+ if (collMetadata.timestamp) {
+ return {uuid: collMetadata.uuid};
+ } else {
+ return {ns: collMetadata._id};
+ }
+ };
+
+ return {
+ findChunksByNs,
+ findOneChunkByNs,
+ countChunksForNs,
+ getChunksJoinClause,
+ };
+})(); \ No newline at end of file
diff --git a/jstests/sharding/libs/zone_changes_util.js b/jstests/sharding/libs/zone_changes_util.js
index f45a5e28ef7..7d28f482cae 100644
--- a/jstests/sharding/libs/zone_changes_util.js
+++ b/jstests/sharding/libs/zone_changes_util.js
@@ -1,4 +1,5 @@
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
/**
* Asserts that the given shards have the given chunks.
@@ -12,7 +13,8 @@ function assertChunksOnShards(configDB, ns, shardChunkBounds) {
for (let bounds of chunkBounds) {
assert.eq(
shardName,
- configDB.chunks.findOne({ns: ns, min: bounds[0], max: bounds[1]}).shard,
+ findChunksUtil.findOneChunkByNs(configDB, ns, {min: bounds[0], max: bounds[1]})
+ .shard,
"expected to find chunk " + tojson(bounds) + " on shard \"" + shardName + "\"");
}
}
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index a6773f1263e..f3cd1245a33 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -2,6 +2,8 @@
// See: http://jira.mongodb.org/browse/SERVER-1896
(function() {
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: "limit_push", shards: 2, mongos: 1});
var db = s.getDB("test");
@@ -27,7 +29,8 @@ s.adminCommand({
});
// Check that the chunck have split correctly
-assert.eq(2, s.config.chunks.count({"ns": "test.limit_push"}), "wrong number of chunks");
+assert.eq(
+ 2, findChunksUtil.countChunksForNs(s.config, "test.limit_push"), "wrong number of chunks");
// The query is asking for the maximum value below a given value
// db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
diff --git a/jstests/sharding/merge_chunk_hashed.js b/jstests/sharding/merge_chunk_hashed.js
index cd3d184f474..9be2f68a4c6 100644
--- a/jstests/sharding/merge_chunk_hashed.js
+++ b/jstests/sharding/merge_chunk_hashed.js
@@ -6,6 +6,7 @@
'use strict';
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
let st = new ShardingTest({shards: 2, mongos: 2});
// , configOptions: {verbose: 3}
@@ -30,8 +31,8 @@ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {x: 'hashed'}})
// 4611686018427387902 -> MAX
// Get the chunk -4611686018427387902 -> 0 on shard0.
-let chunkToSplit =
- configDB.chunks.findOne({ns: ns, min: {$ne: {x: MinKey}}, shard: st.shard0.shardName});
+let chunkToSplit = findChunksUtil.findOneChunkByNs(
+ configDB, ns, {min: {$ne: {x: MinKey}}, shard: st.shard0.shardName});
// Create chunks from that chunk and move some chunks to create holes.
// shard0: MIN -> chunkToSplit.min,
@@ -74,7 +75,7 @@ assert.commandWorked(admin.runCommand({
_waitForDelete: true
}));
-let chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+let chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
jsTest.log("Inserting docs...");
@@ -213,20 +214,18 @@ assert.commandWorked(admin.runCommand(
// shard0: MIN -> -2500000000000000000,
// shard1: -2500000000000000000 -> MAX
-assert.eq(2, configDB.chunks.find({ns: ns}).itcount());
+assert.eq(2, findChunksUtil.findChunksByNs(configDB, ns).itcount());
assert.eq(1,
- configDB.chunks
- .find({
- ns: ns,
+ findChunksUtil
+ .findChunksByNs(configDB, ns, {
min: {x: MinKey},
max: {x: NumberLong(-2500000000000000000)},
shard: st.shard0.shardName
})
.count());
assert.eq(1,
- configDB.chunks
- .find({
- ns: ns,
+ findChunksUtil
+ .findChunksByNs(configDB, ns, {
min: {x: NumberLong(-2500000000000000000)},
max: {x: MaxKey},
shard: st.shard1.shardName
diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js
index d4f74c26fb6..e801a430061 100644
--- a/jstests/sharding/merge_chunks_test.js
+++ b/jstests/sharding/merge_chunks_test.js
@@ -4,6 +4,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({shards: 2, mongos: 2});
var mongos = st.s0;
@@ -118,16 +120,18 @@ assert.commandWorked(
st.printShardingStatus(true);
-assert.eq(2, st.s0.getDB('config').chunks.find({'ns': 'foo.bar'}).itcount());
+assert.eq(2, findChunksUtil.findChunksByNs(st.s0.getDB('config'), 'foo.bar').itcount());
assert.eq(1,
- st.s0.getDB('config')
- .chunks
- .find({'ns': 'foo.bar', 'min._id': MinKey, 'max._id': 90, shard: st.shard0.shardName})
+ findChunksUtil
+ .findChunksByNs(st.s0.getDB('config'),
+ 'foo.bar',
+ {'min._id': MinKey, 'max._id': 90, shard: st.shard0.shardName})
.itcount());
assert.eq(1,
- st.s0.getDB('config')
- .chunks
- .find({'ns': 'foo.bar', 'min._id': 90, 'max._id': MaxKey, shard: st.shard1.shardName})
+ findChunksUtil
+ .findChunksByNs(st.s0.getDB('config'),
+ 'foo.bar',
+ {'min._id': 90, 'max._id': MaxKey, shard: st.shard1.shardName})
.itcount());
st.stop();
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index 465e5b40ce8..0c323b78e42 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -6,6 +6,8 @@
(function() {
"use strict";
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({name: 'migrateBig_balancer', shards: 2, other: {enableBalancer: true}});
var mongos = st.s;
var admin = mongos.getDB("admin");
@@ -37,13 +39,21 @@ assert.eq(40, coll.count(), "prep1");
assert.commandWorked(admin.runCommand({shardcollection: "" + coll, key: {_id: 1}}));
st.printShardingStatus();
-assert.lt(5, mongos.getDB("config").chunks.find({ns: "test.stuff"}).count(), "not enough chunks");
+assert.lt(5,
+ findChunksUtil.findChunksByNs(mongos.getDB("config"), "test.stuff").count(),
+ "not enough chunks");
assert.soon(() => {
+ const aggMatch = (function() {
+ const collMetadata = mongos.getDB("config").collections.findOne({_id: "test.stuff"});
+ if (collMetadata.timestamp) {
+ return {$match: {uuid: collMetadata.uuid}};
+ } else {
+ return {$match: {ns: "test.stuff"}};
+ }
+ }());
let res = mongos.getDB("config")
- .chunks
- .aggregate(
- [{$match: {ns: "test.stuff"}}, {$group: {_id: "$shard", nChunks: {$sum: 1}}}])
+ .chunks.aggregate([aggMatch, {$group: {_id: "$shard", nChunks: {$sum: 1}}}])
.toArray();
printjson(res);
return res.length > 1 && Math.abs(res[0].nChunks - res[1].nChunks) <= 3;
diff --git a/jstests/sharding/move_chunk_allowMigrations.js b/jstests/sharding/move_chunk_allowMigrations.js
index cc10fa77a7e..e9ef4fe1a40 100644
--- a/jstests/sharding/move_chunk_allowMigrations.js
+++ b/jstests/sharding/move_chunk_allowMigrations.js
@@ -11,6 +11,7 @@
load('jstests/libs/fail_point_util.js');
load('jstests/libs/parallel_shell_helpers.js');
+load("jstests/sharding/libs/find_chunks_util.js");
const st = new ShardingTest({config: 1, shards: 2});
const configDB = st.s.getDB("config");
@@ -90,8 +91,10 @@ const testBalancer = function testAllowMigrationsFalseDisablesBalancer(collBSetP
st.s0.adminCommand({balancerCollectionStatus: coll.getFullName()}));
assert.eq(balancerStatus.balancerCompliant, false);
assert.eq(balancerStatus.firstComplianceViolation, 'chunksImbalance');
- assert.eq(
- 4, configDB.chunks.find({ns: coll.getFullName(), shard: st.shard0.shardName}).count());
+ assert.eq(4,
+ findChunksUtil
+ .findChunksByNs(configDB, coll.getFullName(), {shard: st.shard0.shardName})
+ .count());
}
jsTestLog(
@@ -103,9 +106,13 @@ const testBalancer = function testAllowMigrationsFalseDisablesBalancer(collBSetP
assert.soon(() => {
st.awaitBalancerRound();
const shard0Chunks =
- configDB.chunks.find({ns: collA.getFullName(), shard: st.shard0.shardName}).itcount();
+ findChunksUtil
+ .findChunksByNs(configDB, collA.getFullName(), {shard: st.shard0.shardName})
+ .itcount();
const shard1Chunks =
- configDB.chunks.find({ns: collA.getFullName(), shard: st.shard1.shardName}).itcount();
+ findChunksUtil
+ .findChunksByNs(configDB, collA.getFullName(), {shard: st.shard1.shardName})
+ .itcount();
jsTestLog(`shard0 chunks ${shard0Chunks}, shard1 chunks ${shard1Chunks}`);
return shard0Chunks == 2 && shard1Chunks == 2;
}, `Balancer failed to balance ${collA.getFullName()}`, 1000 * 60 * 10);
@@ -120,8 +127,10 @@ const testBalancer = function testAllowMigrationsFalseDisablesBalancer(collBSetP
assert.commandWorked(st.s.adminCommand({balancerCollectionStatus: collB.getFullName()}));
assert.eq(collBBalanceStatus.balancerCompliant, false);
assert.eq(collBBalanceStatus.firstComplianceViolation, 'chunksImbalance');
- assert.eq(4,
- configDB.chunks.find({ns: collB.getFullName(), shard: st.shard0.shardName}).count());
+ assert.eq(
+ 4,
+ findChunksUtil.findChunksByNs(configDB, collB.getFullName(), {shard: st.shard0.shardName})
+ .count());
};
// Test cases that should disable the balancer.
diff --git a/jstests/sharding/move_chunk_basic.js b/jstests/sharding/move_chunk_basic.js
index bea97ab9ed3..cddec5c18aa 100644
--- a/jstests/sharding/move_chunk_basic.js
+++ b/jstests/sharding/move_chunk_basic.js
@@ -5,6 +5,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({mongos: 1, shards: 2});
var kDbName = 'db';
@@ -29,7 +31,7 @@ function testHashed() {
var ns = kDbName + '.fooHashed';
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
- var aChunk = mongos.getDB('config').chunks.findOne({ns: ns, shard: shard0});
+ var aChunk = findChunksUtil.findOneChunkByNs(mongos.getDB('config'), ns, {shard: shard0});
assert(aChunk);
// Error if either of the bounds is not a valid shard key (BSON object - 1 yields a NaN)
@@ -57,7 +59,7 @@ function testNotHashed(keyDoc) {
// Fail if find is not a valid shard key.
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- var chunkId = mongos.getDB('config').chunks.findOne({ns: ns, shard: shard0})._id;
+ var chunkId = findChunksUtil.findOneChunkByNs(mongos.getDB('config'), ns, {shard: shard0})._id;
assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {xxx: 1}, to: shard1}));
assert.eq(shard0, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
diff --git a/jstests/sharding/move_chunk_hashed.js b/jstests/sharding/move_chunk_hashed.js
index e2e35cf0fba..554b33e5a9c 100644
--- a/jstests/sharding/move_chunk_hashed.js
+++ b/jstests/sharding/move_chunk_hashed.js
@@ -7,6 +7,7 @@
load('jstests/libs/chunk_manipulation_util.js');
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
/*
* Returns the shard with the given shard name.
@@ -47,7 +48,7 @@ let docs = [{x: -10}, {x: -1}, {x: 0}, {x: 1}, {x: 10}];
assert.commandWorked(testDB.user.insert(docs));
// Find the chunks with docs.
-let chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+let chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).toArray();
let chunksWithDocs = [];
let numChunksWithMultipleDocs = 0;
@@ -78,8 +79,12 @@ for (let chunk of chunksWithDocs) {
{moveChunk: ns, bounds: chunk.bounds, to: toShard.shardName, _waitForDelete: true}));
// Check that the config database is updated correctly.
- assert.eq(0, configDB.chunks.count({ns: ns, _id: chunk.id, shard: chunk.shard.shardName}));
- assert.eq(1, configDB.chunks.count({ns: ns, _id: chunk.id, shard: toShard.shardName}));
+ assert.eq(0,
+ findChunksUtil.countChunksForNs(
+ configDB, ns, {_id: chunk.id, shard: chunk.shard.shardName}));
+ assert.eq(
+ 1,
+ findChunksUtil.countChunksForNs(configDB, ns, {_id: chunk.id, shard: toShard.shardName}));
// Check that the docs in the donated chunk are transferred to the recipient, and the
// other docs remain on the donor.
diff --git a/jstests/sharding/move_chunk_respects_maxtimems.js b/jstests/sharding/move_chunk_respects_maxtimems.js
index c1f331f2437..570a6817f85 100644
--- a/jstests/sharding/move_chunk_respects_maxtimems.js
+++ b/jstests/sharding/move_chunk_respects_maxtimems.js
@@ -9,6 +9,7 @@
load("jstests/libs/fail_point_util.js");
load('jstests/libs/parallel_shell_helpers.js');
+load("jstests/sharding/libs/find_chunks_util.js");
var st = new ShardingTest({shards: 2});
@@ -45,9 +46,9 @@ jsTestLog("Waiting for moveChunk to succeed in the background");
// interrupted.
assert.soon(() => {
var numChunksOnShard0 =
- st.config.chunks.find({"ns": ns, "shard": st.shard0.shardName}).itcount();
+ findChunksUtil.findChunksByNs(st.config, ns, {shard: st.shard0.shardName}).itcount();
var numChunksOnShard1 =
- st.config.chunks.find({"ns": ns, "shard": st.shard1.shardName}).itcount();
+ findChunksUtil.findChunksByNs(st.config, ns, {shard: st.shard1.shardName}).itcount();
return numChunksOnShard0 == 0 && numChunksOnShard1 == 1;
});
diff --git a/jstests/sharding/move_jumbo_chunk.js b/jstests/sharding/move_jumbo_chunk.js
index c4143c817d2..31d533f5e32 100644
--- a/jstests/sharding/move_jumbo_chunk.js
+++ b/jstests/sharding/move_jumbo_chunk.js
@@ -9,6 +9,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
let st = new ShardingTest({
shards: 2,
mongos: 1,
@@ -51,8 +53,9 @@ function migrateJumboChunk(failpointOn) {
forceJumbo: true,
waitForDelete: true
}));
- assert.eq(st.s.getDB("config").chunks.find({ns: kDbName + ".foo"}).toArray()[0].shard,
- st.shard1.shardName);
+ assert.eq(
+ findChunksUtil.findChunksByNs(st.s.getDB("config"), kDbName + ".foo").toArray()[0].shard,
+ st.shard1.shardName);
assert.eq(st.shard1.getDB(kDbName).foo.find().itcount(), 2000);
assert.eq(st.shard0.getDB(kDbName).foo.find().itcount(), 0);
assert.eq(
@@ -91,8 +94,8 @@ function migrateJumboChunk(failpointOn) {
}, 'Balancer failed to run for 3 rounds', 1000 * 60 * 10);
st.stopBalancer();
- let jumboChunk = st.getDB('config').chunks.findOne(
- {ns: 'test.foo', min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
+ let jumboChunk = findChunksUtil.findOneChunkByNs(
+ st.getDB('config'), 'test.foo', {min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
if (!failpointOn) {
assert.lte(diff(), 5);
@@ -154,8 +157,8 @@ assert.soon(function() {
return ("completed" == res.state);
}, "failed to remove shard");
-let jumboChunk =
- st.getDB('config').chunks.findOne({ns: 'test.foo', min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
+let jumboChunk = findChunksUtil.findOneChunkByNs(
+ st.getDB('config'), 'test.foo', {min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
assert.eq(
st.shard0.shardName, jumboChunk.shard, 'jumbo chunk ' + tojson(jumboChunk) + ' was not moved');
diff --git a/jstests/sharding/move_primary_with_drop_collection.js b/jstests/sharding/move_primary_with_drop_collection.js
index 9f5eb9aafad..3b16f3edc82 100644
--- a/jstests/sharding/move_primary_with_drop_collection.js
+++ b/jstests/sharding/move_primary_with_drop_collection.js
@@ -1,6 +1,8 @@
(function() {
"use strict";
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({shards: 2});
var configDB = st.s.getDB('config');
@@ -46,8 +48,10 @@ jsTest.log("Create sharded collection with two chunks on each shard");
st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
st.shardColl(coll, {skey: 1}, {skey: 0}, {skey: 0});
-assert.eq(1, configDB.chunks.count({ns: coll.getFullName(), shard: st.shard0.shardName}));
-assert.eq(1, configDB.chunks.count({ns: coll.getFullName(), shard: st.shard1.shardName}));
+assert.eq(
+ 1, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard0.shardName}));
+assert.eq(
+ 1, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard1.shardName}));
jsTest.log("Move all chunks to shard 0");
assert.commandWorked(st.s.adminCommand({
moveChunk: coll.getFullName(),
@@ -55,15 +59,18 @@ assert.commandWorked(st.s.adminCommand({
to: st.shard0.shardName,
_waitForDelete: true
}));
-assert.eq(2, configDB.chunks.count({ns: coll.getFullName(), shard: st.shard0.shardName}));
-assert.eq(0, configDB.chunks.count({ns: coll.getFullName(), shard: st.shard1.shardName}));
+assert.eq(
+ 2, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard0.shardName}));
+assert.eq(
+ 0, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard1.shardName}));
jsTest.log("Drop sharded collection");
coll.drop();
jsTest.log("Re-Create sharded collection with one chunk on shard 0");
st.shardColl(coll, {skey: 1}, false, false);
-assert.eq(1, configDB.chunks.count({ns: coll.getFullName(), shard: st.shard0.shardName}));
+assert.eq(
+ 1, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard0.shardName}));
jsTest.log("Move primary of DB to shard 1");
st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
diff --git a/jstests/sharding/movechunk_commit_changelog_stats.js b/jstests/sharding/movechunk_commit_changelog_stats.js
index a412077cf71..2bfb9160fba 100644
--- a/jstests/sharding/movechunk_commit_changelog_stats.js
+++ b/jstests/sharding/movechunk_commit_changelog_stats.js
@@ -5,6 +5,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({mongos: 1, shards: 2});
var kDbName = 'db';
@@ -26,7 +28,7 @@ function assertCountsInChangelog() {
var ns = kDbName + '.fooHashed';
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
-var aChunk = mongos.getDB('config').chunks.findOne({ns: ns, shard: shard0});
+var aChunk = findChunksUtil.findOneChunkByNs(mongos.getDB('config'), ns, {shard: shard0});
assert(aChunk);
// Assert counts field exists in the changelog entry for moveChunk.commit
diff --git a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
index 67dac54fdc7..55785113b13 100644
--- a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
+++ b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
@@ -7,6 +7,7 @@
// distributed lock.
load('./jstests/libs/chunk_manipulation_util.js');
+load("jstests/sharding/libs/find_chunks_util.js");
(function() {
'use strict';
@@ -46,14 +47,16 @@ function interruptMoveChunkAndRecover(fromShard, toShard, isJumbo) {
// Ensure a new primary is found promptly
st.configRS.getPrimary(30000);
- assert.eq(1,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: fromShard.shardName})
- .itcount());
- assert.eq(0,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: toShard.shardName})
- .itcount());
+ assert.eq(
+ 1,
+ findChunksUtil
+ .findChunksByNs(mongos.getDB('config'), 'TestDB.TestColl', {shard: fromShard.shardName})
+ .itcount());
+ assert.eq(
+ 0,
+ findChunksUtil
+ .findChunksByNs(mongos.getDB('config'), 'TestDB.TestColl', {shard: toShard.shardName})
+ .itcount());
// At this point, the balancer is in recovery mode. Ensure that stepdown can be done again and
// the recovery mode interrupted.
@@ -67,14 +70,16 @@ function interruptMoveChunkAndRecover(fromShard, toShard, isJumbo) {
// Ensure that migration succeeded
joinMoveChunk();
- assert.eq(0,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: fromShard.shardName})
- .itcount());
- assert.eq(1,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: toShard.shardName})
- .itcount());
+ assert.eq(
+ 0,
+ findChunksUtil
+ .findChunksByNs(mongos.getDB('config'), 'TestDB.TestColl', {shard: fromShard.shardName})
+ .itcount());
+ assert.eq(
+ 1,
+ findChunksUtil
+ .findChunksByNs(mongos.getDB('config'), 'TestDB.TestColl', {shard: toShard.shardName})
+ .itcount());
}
// We have one non-jumbo chunk initially
diff --git a/jstests/sharding/movechunk_parallel.js b/jstests/sharding/movechunk_parallel.js
index ca16d4caa8b..82e55501877 100644
--- a/jstests/sharding/movechunk_parallel.js
+++ b/jstests/sharding/movechunk_parallel.js
@@ -2,6 +2,7 @@
// long as they do not touch the same shards
load('./jstests/libs/chunk_manipulation_util.js');
+load("jstests/sharding/libs/find_chunks_util.js");
(function() {
'use strict';
@@ -31,14 +32,16 @@ assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
-assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
-assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
+assert.eq(
+ 2,
+ findChunksUtil
+ .findChunksByNs(st.s0.getDB('config'), 'TestDB.TestColl', {shard: st.shard0.shardName})
+ .itcount());
+assert.eq(
+ 2,
+ findChunksUtil
+ .findChunksByNs(st.s0.getDB('config'), 'TestDB.TestColl', {shard: st.shard1.shardName})
+ .itcount());
// Pause migrations at shards 2 and 3
pauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
@@ -59,22 +62,26 @@ unpauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
joinMoveChunk1();
joinMoveChunk2();
-assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
-assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
-assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
- .itcount());
-assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
- .itcount());
+assert.eq(
+ 1,
+ findChunksUtil
+ .findChunksByNs(st.s0.getDB('config'), 'TestDB.TestColl', {shard: st.shard0.shardName})
+ .itcount());
+assert.eq(
+ 1,
+ findChunksUtil
+ .findChunksByNs(st.s0.getDB('config'), 'TestDB.TestColl', {shard: st.shard1.shardName})
+ .itcount());
+assert.eq(
+ 1,
+ findChunksUtil
+ .findChunksByNs(st.s0.getDB('config'), 'TestDB.TestColl', {shard: st.shard2.shardName})
+ .itcount());
+assert.eq(
+ 1,
+ findChunksUtil
+ .findChunksByNs(st.s0.getDB('config'), 'TestDB.TestColl', {shard: st.shard3.shardName})
+ .itcount());
st.stop();
MongoRunner.stopMongod(staticMongod);
diff --git a/jstests/sharding/non_transaction_snapshot_errors.js b/jstests/sharding/non_transaction_snapshot_errors.js
index 875bea0f856..f7d02fcd109 100644
--- a/jstests/sharding/non_transaction_snapshot_errors.js
+++ b/jstests/sharding/non_transaction_snapshot_errors.js
@@ -19,6 +19,7 @@
"use strict";
load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/find_chunks_util.js");
const dbName = "test";
const collName = "foo";
@@ -113,8 +114,10 @@ assert.commandWorked(
jsTestLog("One shard snapshot read");
-assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
-assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.eq(2,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard0.shardName}));
+assert.eq(0,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard1.shardName}));
for (let errorCode of kSnapshotErrors) {
runTest(st, 1, errorCode, true /* isSharded */);
@@ -123,8 +126,10 @@ for (let errorCode of kSnapshotErrors) {
jsTestLog("Two shard snapshot read");
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
-assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
-assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.eq(1,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard0.shardName}));
+assert.eq(1,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard1.shardName}));
for (let errorCode of kSnapshotErrors) {
runTest(st, 2, errorCode, true /* isSharded */);
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index b25df2d838b..20b8224b790 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -9,6 +9,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
// Shard key index does not exactly match shard key, so it is not compatible with $min/$max.
TestData.skipCheckOrphans = true;
@@ -111,7 +113,7 @@ assert.commandWorked(admin.runCommand({
}));
var expectedShardCount = {};
-config.chunks.find({ns: 'test.user'}).forEach(function(chunkDoc) {
+findChunksUtil.findChunksByNs(config, 'test.user').forEach(function(chunkDoc) {
var min = chunkDoc.min.num;
var max = chunkDoc.max.num;
@@ -175,7 +177,7 @@ for (i = 0; i < 3; i++) {
// split on that key, and check it makes 2 chunks
var splitRes = admin.runCommand({split: coll2 + "", middle: {skey: 0}});
assert.eq(splitRes.ok, 1, "split didn't work");
- assert.eq(config.chunks.find({ns: coll2.getFullName()}).count(), 2);
+ assert.eq(findChunksUtil.findChunksByNs(config, coll2.getFullName()).count(), 2);
// movechunk should move ALL docs since they have same value for skey
moveRes = admin.runCommand(
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index 87d4e81a315..e7f340ae451 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -1,5 +1,7 @@
(function() {
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: "presplit", shards: 2, mongos: 1, other: {chunkSize: 1}});
s.adminCommand({enablesharding: "test"});
@@ -31,7 +33,7 @@ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
// Make sure the collection's original chunk got split
s.printChunks();
-assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "many chunks assertion");
+assert.lt(20, findChunksUtil.countChunksForNs(s.config, "test.foo"), "many chunks assertion");
assert.eq(num, primary.foo.count());
s.printChangeLog();
diff --git a/jstests/sharding/query/geo_near_sharded.js b/jstests/sharding/query/geo_near_sharded.js
index d68dcdec140..cdf222d5b4e 100644
--- a/jstests/sharding/query/geo_near_sharded.js
+++ b/jstests/sharding/query/geo_near_sharded.js
@@ -2,6 +2,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var coll = 'points';
function test(st, db, sharded, indexType) {
@@ -24,7 +26,7 @@ function test(st, db, sharded, indexType) {
}
var config = db.getSiblingDB("config");
- assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
+ assert.eq(findChunksUtil.countChunksForNs(config, db[coll].getFullName()), 10);
}
Random.setRandomSeed();
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index 2d92af83622..57c74010012 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -2,6 +2,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var getListCollectionsCursor = function(database, options, subsequentBatchSize) {
return new DBCommandCursor(
database, database.runCommand("listCollections", options), subsequentBatchSize);
@@ -207,7 +209,9 @@ var queryConfigChunks = function(st) {
st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 12}, to: shard2}));
// Find query.
- cursor = configDB.chunks.find({ns: testColl.getFullName()}, {_id: 0, min: 1, max: 1, shard: 1})
+ cursor = findChunksUtil
+ .findChunksByNs(
+ configDB, testColl.getFullName(), null, {_id: 0, min: 1, max: 1, shard: 1})
.sort({"min.e": 1});
assert.eq(cursor.next(), {min: {e: {"$minKey": 1}}, "max": {"e": 2}, shard: shard2});
assert.eq(cursor.next(), {min: {e: 2}, max: {e: 6}, shard: shard1});
@@ -217,21 +221,31 @@ var queryConfigChunks = function(st) {
assert(!cursor.hasNext());
// Count query with filter.
- assert.eq(configDB.chunks.count({ns: testColl.getFullName()}), 5);
+ assert.eq(findChunksUtil.countChunksForNs(configDB, testColl.getFullName()), 5);
// Distinct query.
assert.eq(configDB.chunks.distinct("shard").sort(), [shard1, shard2]);
// Map reduce query.
+ const coll = configDB.collections.findOne({_id: testColl.getFullName()});
var mapFunction = function() {
- if (this.ns == "test2.testColl") {
- emit(this.shard, 1);
+ if (xx.timestamp) {
+ if (this.uuid.toString() == xx.uuid.toString()) {
+ emit(this.shard, 1);
+ }
+ } else {
+ if (this.ns == "test2.testColl") {
+ emit(this.shard, 1);
+ }
}
};
var reduceFunction = function(key, values) {
return {chunks: values.length};
};
- result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
+ result = configDB.chunks.mapReduce(
+ mapFunction,
+ reduceFunction,
+ {out: {inline: 1}, scope: {xx: {timestamp: coll.timestamp, uuid: coll.uuid}}});
assert.eq(result.ok, 1);
assert.eq(sortArrayById(result.results),
[{_id: shard1, value: {chunks: 2}}, {_id: shard2, value: {chunks: 3}}]);
diff --git a/jstests/sharding/refine_collection_shard_key_atomic.js b/jstests/sharding/refine_collection_shard_key_atomic.js
index 19073086e88..2b099cf9386 100644
--- a/jstests/sharding/refine_collection_shard_key_atomic.js
+++ b/jstests/sharding/refine_collection_shard_key_atomic.js
@@ -6,6 +6,7 @@
(function() {
'use strict';
load('jstests/libs/fail_point_util.js');
+load("jstests/sharding/libs/find_chunks_util.js");
const st = new ShardingTest({shards: 1});
const mongos = st.s0;
@@ -58,7 +59,7 @@ assert.eq(oldKeyDoc, oldCollArr[0].key);
// Verify that 'config.chunks' is as expected before refineCollectionShardKey.
const oldChunkArr =
- mongos.getCollection(kConfigChunks).find({ns: kNsName}).sort({min: 1}).toArray();
+ findChunksUtil.findChunksByNs(mongos.getDB('config'), kNsName).sort({min: 1}).toArray();
assert.eq(3, oldChunkArr.length);
assert.eq({a: MinKey, b: MinKey}, oldChunkArr[0].min);
assert.eq({a: 0, b: 0}, oldChunkArr[0].max);
@@ -91,7 +92,8 @@ let newCollArr = mongos.getCollection(kConfigCollections).find({_id: kNsName}).t
assert.sameMembers(oldCollArr, newCollArr);
// Verify that 'config.chunks' has not been updated since we haven't committed the transaction.
-let newChunkArr = mongos.getCollection(kConfigChunks).find({ns: kNsName}).sort({min: 1}).toArray();
+let newChunkArr =
+ findChunksUtil.findChunksByNs(mongos.getDB('config'), kNsName).sort({min: 1}).toArray();
assert.sameMembers(oldChunkArr, newChunkArr);
// Verify that 'config.tags' has not been updated since we haven't committed the transaction.
@@ -108,7 +110,8 @@ assert.eq(1, newCollArr.length);
assert.eq(newKeyDoc, newCollArr[0].key);
// Verify that 'config.chunks' is as expected after refineCollectionShardKey.
-newChunkArr = mongos.getCollection(kConfigChunks).find({ns: kNsName}).sort({min: 1}).toArray();
+newChunkArr =
+ findChunksUtil.findChunksByNs(mongos.getDB('config'), kNsName).sort({min: 1}).toArray();
assert.eq(3, newChunkArr.length);
assert.eq({a: MinKey, b: MinKey, c: MinKey, d: MinKey}, newChunkArr[0].min);
assert.eq({a: 0, b: 0, c: MinKey, d: MinKey}, newChunkArr[0].max);
@@ -150,7 +153,14 @@ hangBeforeUpdatingChunksFailPoint.wait();
// Manually write to 'config.chunks' to force refineCollectionShardKey to throw a WriteConflict
// exception.
-assert.writeOK(mongos.getCollection(kConfigChunks).update({ns: kNsName}, {$set: {jumbo: true}}));
+const coll = mongos.getCollection(kNsName);
+if (coll.timestamp) {
+ assert.writeOK(
+ mongos.getCollection(kConfigChunks).update({uuid: coll.uuid}, {$set: {jumbo: true}}));
+} else {
+ assert.writeOK(
+ mongos.getCollection(kConfigChunks).update({ns: kNsName}, {$set: {jumbo: true}}));
+}
// Disable failpoint 'hangRefineCollectionShardKeyBeforeUpdatingChunks' and await parallel shell.
hangBeforeUpdatingChunksFailPoint.off();
@@ -162,7 +172,8 @@ assert.eq(1, newCollArr.length);
assert.eq(newKeyDoc, newCollArr[0].key);
// Verify that 'config.chunks' is as expected after refineCollectionShardKey.
-newChunkArr = mongos.getCollection(kConfigChunks).find({ns: kNsName}).sort({min: 1}).toArray();
+newChunkArr =
+ findChunksUtil.findChunksByNs(mongos.getDB('config'), kNsName).sort({min: 1}).toArray();
assert.eq(1, newChunkArr.length);
assert.eq({a: MinKey, b: MinKey, c: MinKey, d: MinKey}, newChunkArr[0].min);
assert.eq({a: MaxKey, b: MaxKey, c: MaxKey, d: MaxKey}, newChunkArr[0].max);
diff --git a/jstests/sharding/refine_collection_shard_key_basic.js b/jstests/sharding/refine_collection_shard_key_basic.js
index 64f3491c182..d753d1da061 100644
--- a/jstests/sharding/refine_collection_shard_key_basic.js
+++ b/jstests/sharding/refine_collection_shard_key_basic.js
@@ -13,6 +13,7 @@ load('jstests/libs/fail_point_util.js');
load('jstests/libs/profiler.js');
load('jstests/sharding/libs/shard_versioning_util.js');
load('jstests/sharding/libs/sharded_transactions_helpers.js');
+load("jstests/sharding/libs/find_chunks_util.js");
const st = new ShardingTest({mongos: 2, shards: 2, rs: {nodes: 3}});
const mongos = st.s0;
@@ -178,12 +179,12 @@ function setupConfigChunksBeforeRefine() {
assert.commandWorked(mongos.adminCommand({split: kNsName, middle: {a: 0, b: 0}}));
assert.commandWorked(mongos.adminCommand({split: kNsName, middle: {a: 5, b: 5}}));
- return mongos.getCollection(kConfigChunks).findOne({ns: kNsName}).lastmodEpoch;
+ return findChunksUtil.findOneChunkByNs(mongos.getDB('config'), kNsName).lastmodEpoch;
}
function validateConfigChunksAfterRefine(oldEpoch) {
const chunkArr =
- mongos.getCollection(kConfigChunks).find({ns: kNsName}).sort({min: 1}).toArray();
+ findChunksUtil.findChunksByNs(mongos.getDB('config'), kNsName).sort({min: 1}).toArray();
assert.eq(3, chunkArr.length);
assert.eq({a: MinKey, b: MinKey, c: MinKey, d: MinKey}, chunkArr[0].min);
assert.eq({a: 0, b: 0, c: MinKey, d: MinKey}, chunkArr[0].max);
@@ -234,7 +235,8 @@ function validateUnrelatedCollAfterRefine(oldCollArr, oldChunkArr, oldTagsArr) {
assert.eq(1, collArr.length);
assert.sameMembers(oldCollArr, collArr);
- const chunkArr = mongos.getCollection(kConfigChunks).find({ns: kUnrelatedName}).toArray();
+ const chunkArr =
+ findChunksUtil.findChunksByNs(mongos.getDB('config'), kUnrelatedName).toArray();
assert.eq(3, chunkArr.length);
assert.sameMembers(oldChunkArr, chunkArr);
@@ -608,7 +610,7 @@ assert.commandWorked(mongos.adminCommand({
}));
const oldCollArr = mongos.getCollection(kConfigCollections).find({_id: kUnrelatedName}).toArray();
-const oldChunkArr = mongos.getCollection(kConfigChunks).find({ns: kUnrelatedName}).toArray();
+const oldChunkArr = findChunksUtil.findChunksByNs(mongos.getDB('config'), kUnrelatedName).toArray();
const oldTagsArr = mongos.getCollection(kConfigTags).find({ns: kUnrelatedName}).toArray();
assert.eq(1, oldCollArr.length);
assert.eq(3, oldChunkArr.length);
@@ -754,9 +756,9 @@ function compareMinAndMaxFields(shardedArr, refinedArr) {
function compareBoundaries(conn, shardedNs, refinedNs) {
// Compare chunks.
const shardedChunks =
- conn.getDB("config").chunks.find({ns: shardedNs}).sort({max: 1}).toArray();
+ findChunksUtil.findChunksByNs(conn.getDB("config"), shardedNs).sort({max: 1}).toArray();
const refinedChunks =
- conn.getDB("config").chunks.find({ns: refinedNs}).sort({max: 1}).toArray();
+ findChunksUtil.findChunksByNs(conn.getDB("config"), refinedNs).sort({max: 1}).toArray();
compareMinAndMaxFields(shardedChunks, refinedChunks);
// Compare tags.
diff --git a/jstests/sharding/refine_collection_shard_key_jumbo.js b/jstests/sharding/refine_collection_shard_key_jumbo.js
index 82f882f34ca..8485046d2b3 100644
--- a/jstests/sharding/refine_collection_shard_key_jumbo.js
+++ b/jstests/sharding/refine_collection_shard_key_jumbo.js
@@ -5,6 +5,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
const st = new ShardingTest({mongos: 1, shards: 2, other: {chunkSize: 1, enableAutoSplit: true}});
const primaryShard = st.shard0.shardName;
const secondaryShard = st.shard1.shardName;
@@ -53,7 +55,7 @@ function validateBalancerBeforeRefine(ns) {
runBalancer();
// Confirm that the jumbo chunk has not been split or moved from the primary shard.
- const jumboChunk = st.s.getCollection(kConfigChunks).find({ns: ns}).toArray();
+ const jumboChunk = findChunksUtil.findChunksByNs(st.s.getDB('config'), ns).toArray();
assert.eq(1, jumboChunk.length);
assert.eq(true, jumboChunk[0].jumbo);
assert.eq(primaryShard, jumboChunk[0].shard);
@@ -63,13 +65,13 @@ function validateBalancerAfterRefine(ns, newField) {
runBalancer();
// Confirm that the jumbo chunk has been split and some chunks moved to the secondary shard.
- const chunks = st.s.getCollection(kConfigChunks)
- .find({
- ns: ns,
- min: {$lte: {x: 0, [newField]: MaxKey}},
- max: {$gt: {x: 0, [newField]: MinKey}}
- })
- .toArray();
+ const chunks =
+ findChunksUtil
+ .findChunksByNs(
+ st.s.getDB('config'),
+ ns,
+ {min: {$lte: {x: 0, [newField]: MaxKey}}, max: {$gt: {x: 0, [newField]: MinKey}}})
+ .toArray();
assert.lt(1, chunks.length);
assert.eq(true, chunks.some((chunk) => {
return (chunk.shard === secondaryShard);
@@ -82,7 +84,7 @@ function validateMoveChunkBeforeRefine(ns) {
ErrorCodes.ChunkTooBig);
// Confirm that the jumbo chunk has not been split or moved from the primary shard.
- const jumboChunk = st.s.getCollection(kConfigChunks).find({ns: ns}).toArray();
+ const jumboChunk = findChunksUtil.findChunksByNs(st.s.getDB('config'), ns).toArray();
assert.eq(1, jumboChunk.length);
assert.eq(primaryShard, jumboChunk[0].shard);
}
@@ -95,26 +97,26 @@ function validateMoveChunkAfterRefine(ns, newField) {
assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 0, [newField]: i * 125}}));
}
- const chunksToMove = st.s.getCollection(kConfigChunks)
- .find({
- ns: ns,
- min: {$lte: {x: 0, [newField]: MaxKey}},
- max: {$gt: {x: 0, [newField]: MinKey}}
- })
- .toArray();
+ const chunksToMove =
+ findChunksUtil
+ .findChunksByNs(
+ st.s.getDB('config'),
+ ns,
+ {min: {$lte: {x: 0, [newField]: MaxKey}}, max: {$gt: {x: 0, [newField]: MinKey}}})
+ .toArray();
chunksToMove.forEach((chunk) => {
assert.commandWorked(st.s.adminCommand(
{moveChunk: ns, find: {x: 0, [newField]: chunk.min[newField]}, to: secondaryShard}));
});
// Confirm that the jumbo chunk has been split and all chunks moved to the secondary shard.
- const chunks = st.s.getCollection(kConfigChunks)
- .find({
- ns: ns,
- min: {$lte: {x: 0, [newField]: MaxKey}},
- max: {$gt: {x: 0, [newField]: MinKey}}
- })
- .toArray();
+ const chunks =
+ findChunksUtil
+ .findChunksByNs(
+ st.s.getDB('config'),
+ ns,
+ {min: {$lte: {x: 0, [newField]: MaxKey}}, max: {$gt: {x: 0, [newField]: MinKey}}})
+ .toArray();
assert.lt(1, chunks.length);
chunks.forEach((chunk) => {
assert.eq(secondaryShard, chunk.shard);
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index 0257fc79352..b15d7ada11e 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -13,6 +13,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
'use strict';
load("jstests/replsets/rslib.js");
+load("jstests/sharding/libs/find_chunks_util.js");
function seedString(replTest) {
var members = replTest.getReplSetConfig().members.map(function(elem) {
@@ -45,12 +46,12 @@ function setupInitialData(st, coll) {
assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {i: 1}}));
assert.commandWorked(st.splitAt(coll.getFullName(), {i: 5}));
assert.commandWorked(st.moveChunk(coll.getFullName(), {i: 6}, st.shard1.shardName));
- assert.eq(
- 1,
- st.s0.getDB('config').chunks.count({ns: coll.getFullName(), shard: st.shard0.shardName}));
- assert.eq(
- 1,
- st.s0.getDB('config').chunks.count({ns: coll.getFullName(), shard: st.shard1.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ st.s0.getDB('config'), coll.getFullName(), {shard: st.shard0.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ st.s0.getDB('config'), coll.getFullName(), {shard: st.shard1.shardName}));
let str = 'a';
while (str.length < 1024 * 16) {
@@ -71,12 +72,12 @@ function removeShard(st, coll, replTest) {
jsTest.log("Moving chunk from shard1 to shard0");
assert.commandWorked(st.moveChunk(coll.getFullName(), {i: 6}, st.shard0.shardName));
- assert.eq(
- 2,
- st.s0.getDB('config').chunks.count({ns: coll.getFullName(), shard: st.shard0.shardName}));
- assert.eq(
- 0,
- st.s0.getDB('config').chunks.count({ns: coll.getFullName(), shard: st.shard1.shardName}));
+ assert.eq(2,
+ findChunksUtil.countChunksForNs(
+ st.s0.getDB('config'), coll.getFullName(), {shard: st.shard0.shardName}));
+ assert.eq(0,
+ findChunksUtil.countChunksForNs(
+ st.s0.getDB('config'), coll.getFullName(), {shard: st.shard1.shardName}));
jsTest.log("Removing shard with name: " + replTest.name);
var res = st.s.adminCommand({removeShard: replTest.name});
@@ -101,12 +102,12 @@ function addShard(st, coll, replTest) {
jsTest.log("Moving chunk from shard0 to shard1");
assert.commandWorked(st.moveChunk(coll.getFullName(), {i: 6}, st.shard1.shardName));
- assert.eq(
- 1,
- st.s0.getDB('config').chunks.count({ns: coll.getFullName(), shard: st.shard0.shardName}));
- assert.eq(
- 1,
- st.s0.getDB('config').chunks.count({ns: coll.getFullName(), shard: st.shard1.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ st.s0.getDB('config'), coll.getFullName(), {shard: st.shard0.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ st.s0.getDB('config'), coll.getFullName(), {shard: st.shard1.shardName}));
assert.eq(300, coll.find().itcount());
jsTest.log("Shard added successfully");
diff --git a/jstests/sharding/reshard_collection_basic.js b/jstests/sharding/reshard_collection_basic.js
index 495ac9d79c2..02668400626 100644
--- a/jstests/sharding/reshard_collection_basic.js
+++ b/jstests/sharding/reshard_collection_basic.js
@@ -7,6 +7,7 @@
//
load("jstests/libs/uuid_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
(function() {
'use strict';
@@ -119,7 +120,7 @@ let assertSuccessfulReshardCollection = (commandObj, presetReshardedChunks) => {
commandObj._presetReshardedChunks = presetReshardedChunks;
} else {
assert.eq(commandObj._presetReshardedChunks, null);
- const configChunksArray = mongosConfig.chunks.find({'ns': ns});
+ const configChunksArray = findChunksUtil.findChunksByNs(mongosConfig, ns);
presetReshardedChunks = [];
configChunksArray.forEach(chunk => {
presetReshardedChunks.push(
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index 12f326a32f6..c8bbb9cbcc9 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -4,6 +4,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({shards: 2});
var db = s.getDB("test");
@@ -34,7 +36,7 @@ assert.commandFailed(s.s0.adminCommand({shardCollection: '.foo', key: {x: 1}}));
assert(s.config.collections.findOne({_id: "test.foo"}), "No collection entry found for test.foo");
s.config.collections.find().forEach(printjson);
-assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "num chunks A");
+assert.eq(1, findChunksUtil.countChunksForNs(s.config, "test.foo"), "num chunks A");
assert.eq(3, db.foo.find().length(), "after sharding, no split count failed");
s.stop();
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index 79b473de29d..3882f666562 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
function placeCheck(num) {
print("shard2 step: " + num);
}
@@ -21,11 +23,11 @@ var db = s.getDB("test");
assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', s.shard1.shardName);
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
-assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check 1");
+assert.eq(1, findChunksUtil.countChunksForNs(s.config, "test.foo"), "sanity check 1");
assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 0}}));
-assert.eq(2, s.config.chunks.count({"ns": "test.foo"}), "should be 2 shards");
-var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
+assert.eq(2, findChunksUtil.countChunksForNs(s.config, "test.foo"), "should be 2 shards");
+var chunks = findChunksUtil.findChunksByNs(s.config, "test.foo").toArray();
assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
assert.commandWorked(db.foo.save({num: 1, name: "eliot"}));
@@ -56,9 +58,9 @@ assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move
assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
assert.eq(2,
- s.config.chunks.count({"ns": "test.foo"}),
+ findChunksUtil.countChunksForNs(s.config, "test.foo"),
"still should have 2 shards after move not:" + s.getChunksString());
-var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
+var chunks = findChunksUtil.findChunksByNs(s.config, "test.foo").toArray();
assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
placeCheck(3);
@@ -217,7 +219,7 @@ assert.eq(1, s.onNumShards("test", "foo"), "on 1 shards");
assert.commandWorked(s.s0.adminCommand(
{movechunk: "test.foo", find: {num: -2}, to: primary.getMongo().name, _waitForDelete: true}));
assert.eq(2, s.onNumShards("test", "foo"), "on 2 shards again");
-assert.eq(3, s.config.chunks.count({"ns": "test.foo"}), "only 3 chunks");
+assert.eq(3, findChunksUtil.countChunksForNs(s.config, "test.foo"), "only 3 chunks");
print("YO : " + tojson(db.runCommand("serverStatus")));
diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js
index f9d589ae4b5..8341a84ca87 100644
--- a/jstests/sharding/shard_collection_basic.js
+++ b/jstests/sharding/shard_collection_basic.js
@@ -2,6 +2,8 @@
// Basic tests for shardCollection.
//
+load("jstests/sharding/libs/find_chunks_util.js");
+
(function() {
'use strict';
@@ -346,7 +348,7 @@ assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
assert.commandWorked(
sh.shardCollection(kDbName + '.foo', {a: "hashed"}, false, {numInitialChunks: 5}));
st.printShardingStatus();
-var numChunks = st.config.chunks.find({ns: kDbName + '.foo'}).count();
+var numChunks = findChunksUtil.findChunksByNs(st.config, kDbName + '.foo').count();
assert.eq(numChunks, 5, "unexpected number of chunks");
st.stop();
diff --git a/jstests/sharding/shard_collection_existing_zones.js b/jstests/sharding/shard_collection_existing_zones.js
index eb7343f221c..cc52b416bc2 100644
--- a/jstests/sharding/shard_collection_existing_zones.js
+++ b/jstests/sharding/shard_collection_existing_zones.js
@@ -3,6 +3,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({mongos: 1, shards: 3});
var kDbName = 'test';
var kCollName = 'foo';
@@ -104,7 +106,7 @@ function testChunkSplits(collectionExists) {
{range: [{x: 30}, {x: 40}], shardId: st.shard2.shardName}, // pre-defined
{range: [{x: 40}, {x: {"$maxKey": 1}}], shardId: st.shard2.shardName}
];
- var chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+ var chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).sort({min: 1}).toArray();
assert.eq(chunkDocs.length,
expectedChunks.length,
"shardCollection failed to create chunk documents correctly");
@@ -145,13 +147,13 @@ function testNonemptyZonedCollection() {
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
// Check that there is initially 1 chunk.
- assert.eq(1, configDB.chunks.count({ns: ns}));
+ assert.eq(1, findChunksUtil.countChunksForNs(configDB, ns));
st.startBalancer();
// Check that the chunks were moved properly.
assert.soon(() => {
- let res = configDB.chunks.count({ns: ns});
+ let res = findChunksUtil.countChunksForNs(configDB, ns);
return res === 5;
}, 'balancer never ran', 10 * 60 * 1000, 1000);
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 6e3242647f8..7e2b1028bab 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: "shard_existing", shards: 2, mongos: 1, other: {chunkSize: 1}});
var db = s.getDB("test");
@@ -29,7 +31,7 @@ var res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
printjson(res);
// number of chunks should be approx equal to the total data size / half the chunk size
-var numChunks = s.config.chunks.find({ns: 'test.data'}).itcount();
+var numChunks = findChunksUtil.findChunksByNs(s.config, 'test.data').itcount();
var guess = Math.ceil(dataSize / (512 * 1024 + avgObjSize));
assert(Math.abs(numChunks - guess) < 2, "not right number of chunks");
diff --git a/jstests/sharding/shard_existing_coll_chunk_count.js b/jstests/sharding/shard_existing_coll_chunk_count.js
index 46e2cdba748..e5bdfde6fb0 100644
--- a/jstests/sharding/shard_existing_coll_chunk_count.js
+++ b/jstests/sharding/shard_existing_coll_chunk_count.js
@@ -7,6 +7,7 @@
(function() {
'use strict';
load('jstests/sharding/autosplit_include.js');
+load("jstests/sharding/libs/find_chunks_util.js");
var s = new ShardingTest({
name: "shard_existing_coll_chunk_count",
@@ -21,7 +22,7 @@ var collNum = 0;
var overhead = Object.bsonsize({_id: ObjectId(), i: 1, pad: ""});
var getNumberChunks = function(ns) {
- return s.getDB("config").getCollection("chunks").count({ns});
+ return findChunksUtil.countChunksForNs(s.getDB("config"), ns);
};
var runCase = function(opts) {
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index 18a86e8b76d..b3332dd32b3 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
const dbName = 'ShardingBalanceTest';
@@ -27,7 +29,7 @@ assert.commandWorked(bulk.execute());
assert.commandWorked(st.s.adminCommand({shardcollection: coll.getFullName(), key: {_id: 1}}));
jsTest.log("Checking initial chunk distribution: " + st.chunkCounts(collName, dbName));
assert.lt(minChunkNum,
- st.config.chunks.count({ns: coll.getFullName()}),
+ findChunksUtil.countChunksForNs(st.config, coll.getFullName()),
"Number of initial chunks is less then expected");
assert.lt(minChunkNum,
st.chunkDiff(collName, dbName),
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index a8eaad6ad71..4aa528ff6f7 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -4,6 +4,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var MaxSizeMB = 1;
var s = new ShardingTest({
@@ -38,7 +40,7 @@ while (inserted < (40 * 1024 * 1024)) {
assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
-assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10);
+assert.gt(findChunksUtil.countChunksForNs(s.config, "test.foo"), 10);
var getShardSize = function(conn) {
var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index 7639dbb8762..c40699d0f82 100644
--- a/jstests/sharding/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -2,6 +2,8 @@
(function() {
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({
name: "slow_sharding_balance3",
shards: 2,
@@ -35,7 +37,7 @@ while (inserted < (40 * 1024 * 1024)) {
assert.commandWorked(bulk.execute());
s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
+assert.lt(20, findChunksUtil.countChunksForNs(s.config, "test.foo"), "setup2");
function diff1() {
var x = s.chunkCounts("foo");
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 99c005bd09e..464a34b0658 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -7,6 +7,7 @@
*/
(function() {
load('jstests/sharding/autosplit_include.js');
+load("jstests/sharding/libs/find_chunks_util.js");
var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
@@ -87,7 +88,7 @@ for (var i = 0; i < 50; i++) {
}
check("initial at end");
-assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
+assert.lt(20, findChunksUtil.countChunksForNs(s.config, "test.foo"), "setup2");
function check(msg, dontAssert) {
for (var x in counts) {
diff --git a/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js b/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js
index 2a7ce0b3a60..f8f32541047 100644
--- a/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js
+++ b/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js
@@ -15,6 +15,7 @@
load("jstests/libs/global_snapshot_reads_util.js");
load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/find_chunks_util.js");
const nodeOptions = {
// Set a large snapshot window of 10 minutes for the test.
@@ -72,9 +73,15 @@ const setupSomeShardedColl = (collName) => {
assert.commandWorked(
mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
- assert.eq(0, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+ assert.eq(
+ 0,
+ findChunksUtil.countChunksForNs(mongos.getDB('config'), ns, {shard: st.shard0.shardName}));
+ assert.eq(
+ 1,
+ findChunksUtil.countChunksForNs(mongos.getDB('config'), ns, {shard: st.shard1.shardName}));
+ assert.eq(
+ 1,
+ findChunksUtil.countChunksForNs(mongos.getDB('config'), ns, {shard: st.shard2.shardName}));
flushRoutersAndRefreshShardMetadata(st, {ns});
};
setupSomeShardedColl(someShardedColl1);
@@ -94,9 +101,15 @@ const setupAllShardedColl = (collName) => {
assert.commandWorked(
mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+ assert.eq(
+ 1,
+ findChunksUtil.countChunksForNs(mongos.getDB('config'), ns, {shard: st.shard0.shardName}));
+ assert.eq(
+ 1,
+ findChunksUtil.countChunksForNs(mongos.getDB('config'), ns, {shard: st.shard1.shardName}));
+ assert.eq(
+ 1,
+ findChunksUtil.countChunksForNs(mongos.getDB('config'), ns, {shard: st.shard2.shardName}));
flushRoutersAndRefreshShardMetadata(st, {ns});
};
setupAllShardedColl(allShardedColl1);
diff --git a/jstests/sharding/sharding_non_transaction_snapshot_read.js b/jstests/sharding/sharding_non_transaction_snapshot_read.js
index dc11c46b0f3..f68cffc248f 100644
--- a/jstests/sharding/sharding_non_transaction_snapshot_read.js
+++ b/jstests/sharding/sharding_non_transaction_snapshot_read.js
@@ -14,6 +14,7 @@
load("jstests/libs/global_snapshot_reads_util.js");
load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/find_chunks_util.js");
const nodeOptions = {
// Set a large snapshot window of 10 minutes for the test.
@@ -72,9 +73,15 @@ let shardingScenarios = {
assert.commandWorked(
mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard0.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard1.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard2.shardName}));
flushRoutersAndRefreshShardMetadata(st, {ns});
@@ -106,9 +113,15 @@ let shardingScenarios = {
assert.commandWorked(
mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
- assert.eq(0, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+ assert.eq(0,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard0.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard1.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard2.shardName}));
flushRoutersAndRefreshShardMetadata(st, {ns});
diff --git a/jstests/sharding/snapshot_cursor_commands_mongos.js b/jstests/sharding/snapshot_cursor_commands_mongos.js
index e71fffdfdc4..95579edaf7b 100644
--- a/jstests/sharding/snapshot_cursor_commands_mongos.js
+++ b/jstests/sharding/snapshot_cursor_commands_mongos.js
@@ -9,6 +9,7 @@ TestData.disableImplicitSessions = true;
load("jstests/libs/global_snapshot_reads_util.js");
load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/find_chunks_util.js");
const dbName = "test";
const shardedCollName = "shardedColl";
@@ -94,9 +95,15 @@ let shardingScenarios = {
assert.commandWorked(
mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard0.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard1.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard2.shardName}));
flushRoutersAndRefreshShardMetadata(st, {ns});
@@ -128,9 +135,15 @@ let shardingScenarios = {
assert.commandWorked(
mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
- assert.eq(0, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+ assert.eq(0,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard0.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard1.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ mongos.getDB('config'), ns, {shard: st.shard2.shardName}));
flushRoutersAndRefreshShardMetadata(st, {ns});
diff --git a/jstests/sharding/snapshot_reads_target_at_point_in_time.js b/jstests/sharding/snapshot_reads_target_at_point_in_time.js
index 08fb104cf08..2185cb83147 100644
--- a/jstests/sharding/snapshot_reads_target_at_point_in_time.js
+++ b/jstests/sharding/snapshot_reads_target_at_point_in_time.js
@@ -12,11 +12,13 @@
"use strict";
load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/find_chunks_util.js");
function expectChunks(st, ns, chunks) {
for (let i = 0; i < chunks.length; i++) {
assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ findChunksUtil.countChunksForNs(
+ st.s.getDB("config"), ns, {shard: st["shard" + i].shardName}),
"unexpected number of chunks on shard " + i);
}
}
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index ab73928836e..8d81394c936 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: "sort1", shards: 2, mongos: 2});
s.adminCommand({enablesharding: "test"});
@@ -29,9 +31,9 @@ s.adminCommand({
_waitForDelete: true
});
-assert.lte(3, s.config.chunks.find({ns: 'test.data'}).itcount(), "A1");
+assert.lte(3, findChunksUtil.findChunksByNs(s.config, 'test.data').itcount(), "A1");
-var temp = s.config.chunks.find({ns: 'test.data'}).sort({min: 1}).toArray();
+var temp = findChunksUtil.findChunksByNs(s.config, 'test.data').sort({min: 1}).toArray();
temp.forEach(printjsononeline);
var z = 0;
diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js
index c90f792e189..e912a25f588 100644
--- a/jstests/sharding/split_large_key.js
+++ b/jstests/sharding/split_large_key.js
@@ -2,6 +2,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
// Tests
// - name: Name of test, used in collection name
// - key: key to test
@@ -38,7 +40,9 @@ tests.forEach(function(test) {
var res = configDB.adminCommand({split: "test." + collName, middle: midKey});
assert(res.ok, "Split: " + collName + " " + res.errmsg);
- assert.eq(2, configDB.chunks.find({"ns": "test." + collName}).count(), "Chunks count split");
+ assert.eq(2,
+ findChunksUtil.findChunksByNs(configDB, "test." + collName).count(),
+ "Chunks count split");
st.s0.getCollection("test." + collName).drop();
});
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index 46e9325052c..b4f9a7e03c9 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -2,13 +2,15 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({shards: 2, mongos: 1});
assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', s.shard1.shardName);
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
-assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(s.config, "test.foo").itcount());
s.addShardTag(s.shard0.shardName, "a");
s.addShardTag(s.shard0.shardName, "b");
@@ -19,17 +21,17 @@ s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
s.startBalancer();
assert.soon(function() {
- return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
+ return findChunksUtil.findChunksByNs(s.config, "test.foo").itcount() == 4;
}, 'Split did not occur', 3 * 60 * 1000);
s.awaitBalancerRound();
s.printShardingStatus(true);
-assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
+assert.eq(4, findChunksUtil.findChunksByNs(s.config, "test.foo").itcount(), 'Split points changed');
-assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey}}).itcount());
-assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5}}).itcount());
-assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10}}).itcount());
-assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15}}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(s.config, "test.foo", {min: {_id: MinKey}}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(s.config, "test.foo", {min: {_id: 5}}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(s.config, "test.foo", {min: {_id: 10}}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(s.config, "test.foo", {min: {_id: 15}}).itcount());
s.stop();
})();
diff --git a/jstests/sharding/tag_auto_split_partial_key.js b/jstests/sharding/tag_auto_split_partial_key.js
index dc19059b726..7215ec396ad 100644
--- a/jstests/sharding/tag_auto_split_partial_key.js
+++ b/jstests/sharding/tag_auto_split_partial_key.js
@@ -2,13 +2,15 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({shards: 2, mongos: 1});
assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', s.shard1.shardName);
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1, a: 1}}));
-assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(s.config, "test.foo").itcount());
s.addShardTag(s.shard0.shardName, "a");
s.addShardTag(s.shard0.shardName, "b");
@@ -19,14 +21,14 @@ s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
s.startBalancer();
assert.soon(function() {
- return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
+ return findChunksUtil.findChunksByNs(s.config, "test.foo").itcount() == 4;
}, 'Split did not occur', 3 * 60 * 1000);
s.awaitBalancerRound();
s.printShardingStatus(true);
-assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
+assert.eq(4, findChunksUtil.findChunksByNs(s.config, "test.foo").itcount(), 'Split points changed');
-s.config.chunks.find({"ns": "test.foo"}).forEach(function(chunk) {
+findChunksUtil.findChunksByNs(s.config, "test.foo").forEach(function(chunk) {
var numFields = 0;
for (var x in chunk.min) {
numFields++;
@@ -36,10 +38,15 @@ s.config.chunks.find({"ns": "test.foo"}).forEach(function(chunk) {
});
// Check chunk mins correspond exactly to tag range boundaries, extended to match shard key
-assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey, a: MinKey}}).itcount());
-assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5, a: MinKey}}).itcount());
-assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10, a: MinKey}}).itcount());
-assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15, a: MinKey}}).itcount());
+assert.eq(
+ 1,
+ findChunksUtil.findChunksByNs(s.config, "test.foo", {min: {_id: MinKey, a: MinKey}}).itcount());
+assert.eq(
+ 1, findChunksUtil.findChunksByNs(s.config, "test.foo", {min: {_id: 5, a: MinKey}}).itcount());
+assert.eq(
+ 1, findChunksUtil.findChunksByNs(s.config, "test.foo", {min: {_id: 10, a: MinKey}}).itcount());
+assert.eq(
+ 1, findChunksUtil.findChunksByNs(s.config, "test.foo", {min: {_id: 15, a: MinKey}}).itcount());
s.stop();
})();
diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js
index 0cfb3cd35a1..1e497a11b8c 100644
--- a/jstests/sharding/tag_range.js
+++ b/jstests/sharding/tag_range.js
@@ -2,6 +2,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
const st = new ShardingTest({shards: 2, mongos: 1});
assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
@@ -12,7 +14,7 @@ function countTags(num, message) {
assert.eq(st.config.tags.count(), num, message);
}
-assert.eq(1, st.config.chunks.count({"ns": "test.tag_range"}));
+assert.eq(1, findChunksUtil.countChunksForNs(st.config, "test.tag_range"));
st.addShardTag(st.shard0.shardName, 'a');
st.addShardTag(st.shard0.shardName, 'b');
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index b648e2afcab..7d969aed0c8 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -4,6 +4,7 @@
* @tags: [resource_intensive]
*/
load('jstests/sharding/autosplit_include.js');
+load("jstests/sharding/libs/find_chunks_util.js");
function shardSetup(shardConfig, dbName, collName) {
var st = new ShardingTest(shardConfig);
@@ -20,11 +21,11 @@ function shardSetup(shardConfig, dbName, collName) {
function getShardWithTopChunk(configDB, lowOrHigh, ns) {
// lowOrHigh: 1 low "top chunk", -1 high "top chunk"
print(ns);
- print(configDB.chunks.count({"ns": ns}));
+ print(findChunksUtil.countChunksForNs(configDB, ns));
print(configDB.chunks.count());
print(JSON.stringify(configDB.chunks.findOne()));
print(JSON.stringify(configDB.chunks.findOne({"ns": {$ne: "config.system.sessions"}})));
- return configDB.chunks.find({"ns": ns}).sort({min: lowOrHigh}).limit(1).next().shard;
+ return findChunksUtil.findChunksByNs(configDB, ns).sort({min: lowOrHigh}).limit(1).next().shard;
}
function getNumberOfChunks(configDB) {
diff --git a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
index d15d1d0e241..2abf64fef70 100644
--- a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
+++ b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
@@ -10,10 +10,13 @@
(function() {
"use strict";
+load("jstests/sharding/libs/find_chunks_util.js");
+
function expectChunks(st, ns, chunks) {
for (let i = 0; i < chunks.length; i++) {
assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ findChunksUtil.countChunksForNs(
+ st.s.getDB("config"), ns, {shard: st["shard" + i].shardName}),
"unexpected number of chunks on shard " + i);
}
}
diff --git a/jstests/sharding/transactions_snapshot_errors_first_statement.js b/jstests/sharding/transactions_snapshot_errors_first_statement.js
index 6ffb295d2a3..bf82d041acb 100644
--- a/jstests/sharding/transactions_snapshot_errors_first_statement.js
+++ b/jstests/sharding/transactions_snapshot_errors_first_statement.js
@@ -11,6 +11,7 @@
"use strict";
load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/find_chunks_util.js");
const dbName = "test";
const collName = "foo";
@@ -138,8 +139,10 @@ assert.commandWorked(
jsTestLog("One shard sharded transaction");
-assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
-assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.eq(2,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard0.shardName}));
+assert.eq(0,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard1.shardName}));
for (let errorCode of kSnapshotErrors) {
runTest(st, collName, 1, errorCode, true /* isSharded */);
@@ -148,8 +151,10 @@ for (let errorCode of kSnapshotErrors) {
jsTestLog("Two shard sharded transaction");
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
-assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
-assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.eq(1,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard0.shardName}));
+assert.eq(1,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard1.shardName}));
for (let errorCode of kSnapshotErrors) {
runTest(st, collName, 2, errorCode, true /* isSharded */);
diff --git a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
index 87a29e271fa..77e5771c542 100644
--- a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
+++ b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
@@ -10,6 +10,7 @@
"use strict";
load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/find_chunks_util.js");
const dbName = "test";
const collName = "foo";
@@ -97,8 +98,10 @@ assert.commandWorked(
jsTestLog("One shard transaction");
-assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
-assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.eq(2,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard0.shardName}));
+assert.eq(0,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard1.shardName}));
for (let errorCode of kSnapshotErrors) {
runTest(st, collName, errorCode, true /* isSharded */);
@@ -107,8 +110,10 @@ for (let errorCode of kSnapshotErrors) {
jsTestLog("Two shard transaction");
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
-assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
-assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.eq(1,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard0.shardName}));
+assert.eq(1,
+ findChunksUtil.countChunksForNs(st.s.getDB('config'), ns, {shard: st.shard1.shardName}));
// Multi shard case simulates adding a new participant that can no longer support the already
// chosen read timestamp.
diff --git a/jstests/sharding/transactions_stale_shard_version_errors.js b/jstests/sharding/transactions_stale_shard_version_errors.js
index 12debac39ab..172532dfe43 100644
--- a/jstests/sharding/transactions_stale_shard_version_errors.js
+++ b/jstests/sharding/transactions_stale_shard_version_errors.js
@@ -10,11 +10,13 @@
load("jstests/sharding/libs/sharded_transactions_helpers.js");
load("jstests/multiVersion/libs/verify_versions.js");
+load("jstests/sharding/libs/find_chunks_util.js");
function expectChunks(st, ns, chunks) {
for (let i = 0; i < chunks.length; i++) {
assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ findChunksUtil.countChunksForNs(
+ st.s.getDB("config"), ns, {shard: st["shard" + i].shardName}),
"unexpected number of chunks on shard " + i);
}
}
diff --git a/jstests/sharding/unowned_doc_filtering.js b/jstests/sharding/unowned_doc_filtering.js
index 07c42453ca9..422695a35dc 100644
--- a/jstests/sharding/unowned_doc_filtering.js
+++ b/jstests/sharding/unowned_doc_filtering.js
@@ -17,6 +17,7 @@ TestData.skipCheckOrphans = true;
"use strict";
load("jstests/sharding/libs/chunk_bounds_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
/*
* Asserts that find and count command filter out unowned documents.
@@ -35,7 +36,7 @@ function assertOrphanedDocsFiltered(coll, ownedDocs, unownedDocs, countFilters)
function runTest(st, coll, ownedDocs, unownedDocs, isHashed) {
let ns = coll.getFullName();
- let chunkDocs = st.s.getDB('config').chunks.find({ns: ns}).toArray();
+ let chunkDocs = findChunksUtil.findChunksByNs(st.s.getDB('config'), ns).toArray();
let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
// Do regular inserts.
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index 948ef8740ac..c03de0b2040 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -1,6 +1,8 @@
(function() {
'use strict';
+load("jstests/sharding/libs/find_chunks_util.js");
+
var s = new ShardingTest({name: "version1", shards: 1});
assert.commandWorked(s.s0.adminCommand({enablesharding: "alleyinsider"}));
@@ -39,7 +41,7 @@ assert.commandFailed(a.runCommand({
}),
"should have failed because version is config is 1|0");
-var epoch = s.getDB('config').chunks.findOne({"ns": "alleyinsider.foo"}).lastmodEpoch;
+var epoch = findChunksUtil.findOneChunkByNs(s.getDB('config'), "alleyinsider.foo").lastmodEpoch;
assert.commandWorked(a.runCommand({
setShardVersion: "alleyinsider.foo",
configdb: s._configDB,
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 94f82e8463f..0e2be7375d9 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -1,5 +1,8 @@
(function() {
'use strict';
+
+load("jstests/sharding/libs/find_chunks_util.js");
+
/**
* One-shard cluster test do not need to be tested in the multiversion suites.
* @tags: [multiversion_incompatible]
@@ -16,7 +19,7 @@ var a = s.shard0.getDB("admin");
assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i, 0);
assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.i, 0);
-var fooEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.foo'}).lastmodEpoch;
+var fooEpoch = findChunksUtil.findOneChunkByNs(s.getDB('config'), 'alleyinsider.foo').lastmodEpoch;
assert.commandWorked(a.runCommand({
setShardVersion: "alleyinsider.foo",
configdb: s._configDB,
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index 197d29ccc90..8dc25082002 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -4,6 +4,7 @@
(function() {
'use strict';
load('jstests/sharding/autosplit_include.js');
+load("jstests/sharding/libs/find_chunks_util.js");
var st = new ShardingTest({shards: 1, other: {chunkSize: 1, enableAutoSplit: true}});
@@ -16,7 +17,7 @@ var testDB = st.s.getDB('test');
jsTest.log('Test single batch insert should auto-split');
-assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.insert").itcount());
// This should result in a little over 3MB inserted into the chunk, so with
// a max chunk size of 1MB we'd expect the autosplitter to split this into
@@ -30,7 +31,7 @@ waitForOngoingChunkSplits(st);
// Inserted batch is a multiple of the chunkSize, expect the chunks to split into
// more than 2.
-assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 2);
+assert.gt(findChunksUtil.findChunksByNs(configDB, "test.insert").itcount(), 2);
testDB.dropDatabase();
jsTest.log('Test single batch update should auto-split');
@@ -38,7 +39,7 @@ jsTest.log('Test single batch update should auto-split');
assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
-assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.update").itcount());
for (var x = 0; x < 2100; x++) {
assert.commandWorked(testDB.runCommand({
@@ -51,7 +52,7 @@ for (var x = 0; x < 2100; x++) {
waitForOngoingChunkSplits(st);
-assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
+assert.gt(findChunksUtil.findChunksByNs(configDB, "test.update").itcount(), 1);
testDB.dropDatabase();
jsTest.log('Test single delete should not auto-split');
@@ -59,7 +60,7 @@ jsTest.log('Test single delete should not auto-split');
assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
-assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
for (var x = 0; x < 1100; x++) {
assert.commandWorked(testDB.runCommand({
@@ -75,7 +76,7 @@ for (var x = 0; x < 1100; x++) {
// done when really it was just in progress.
waitForOngoingChunkSplits(st);
-assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
testDB.dropDatabase();
jsTest.log('Test batched insert should auto-split');
@@ -83,7 +84,7 @@ jsTest.log('Test batched insert should auto-split');
assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
-assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.insert").itcount());
// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
// we are going to be conservative.
@@ -100,7 +101,7 @@ for (var x = 0; x < 2100; x += 400) {
waitForOngoingChunkSplits(st);
-assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 1);
+assert.gt(findChunksUtil.findChunksByNs(configDB, "test.insert").itcount(), 1);
testDB.dropDatabase();
jsTest.log('Test batched update should auto-split');
@@ -108,7 +109,7 @@ jsTest.log('Test batched update should auto-split');
assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
-assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.update").itcount());
for (var x = 0; x < 2100; x += 400) {
var docs = [];
@@ -124,7 +125,7 @@ for (var x = 0; x < 2100; x += 400) {
waitForOngoingChunkSplits(st);
-assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
+assert.gt(findChunksUtil.findChunksByNs(configDB, "test.update").itcount(), 1);
testDB.dropDatabase();
jsTest.log('Test batched delete should not auto-split');
@@ -132,7 +133,7 @@ jsTest.log('Test batched delete should not auto-split');
assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
-assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
for (var x = 0; x < 2100; x += 400) {
var docs = [];
@@ -155,7 +156,7 @@ for (var x = 0; x < 2100; x += 400) {
// done when really it was just in progress.
waitForOngoingChunkSplits(st);
-assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
st.stop();
})();
diff --git a/jstests/sharding/zone_changes_compound.js b/jstests/sharding/zone_changes_compound.js
index f843ff467de..f351a736689 100644
--- a/jstests/sharding/zone_changes_compound.js
+++ b/jstests/sharding/zone_changes_compound.js
@@ -5,6 +5,7 @@
'use strict';
load("jstests/sharding/libs/zone_changes_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
let st = new ShardingTest({shards: 3});
let primaryShard = st.shard0;
@@ -22,7 +23,7 @@ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: shardKey}));
jsTest.log("Insert docs and check that they end up on the primary shard.");
let docs = [{x: -10, y: -10}, {x: -1, y: -1}, {x: 0, y: 0}];
assert.commandWorked(coll.insert(docs));
-assert.eq(1, configDB.chunks.count({ns: ns}));
+assert.eq(1, findChunksUtil.countChunksForNs(configDB, ns));
assert.eq(docs.length, primaryShard.getCollection(ns).count());
jsTest.log("Add shards to zones and assign zone key ranges.");
diff --git a/jstests/sharding/zone_changes_hashed.js b/jstests/sharding/zone_changes_hashed.js
index ee74a293a4d..83265fac92f 100644
--- a/jstests/sharding/zone_changes_hashed.js
+++ b/jstests/sharding/zone_changes_hashed.js
@@ -5,6 +5,7 @@
'use strict';
load("jstests/sharding/libs/zone_changes_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
/**
* Adds each shard to the corresponding zone in zoneTags, and makes the zone range equal
@@ -61,7 +62,7 @@ st.ensurePrimaryShard(dbName, primaryShard.shardName);
jsTest.log(
"Shard the collection. The command creates two chunks on each of the shards by default.");
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: shardKey}));
-let chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+let chunkDocs = findChunksUtil.findChunksByNs(configDB, ns).sort({min: 1}).toArray();
let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs);
jsTest.log("Insert docs (one for each chunk) and check that they end up on the right shards.");
@@ -81,7 +82,7 @@ docs.forEach(function(doc) {
}
});
assert.eq(docs.length, (new Set(docChunkBounds)).size);
-assert.eq(docs.length, configDB.chunks.count({ns: ns}));
+assert.eq(docs.length, findChunksUtil.countChunksForNs(configDB, ns));
jsTest.log(
"Assign each shard a zone, make each zone range equal to the chunk range for the shard, " +
diff --git a/jstests/sharding/zone_changes_range.js b/jstests/sharding/zone_changes_range.js
index 49710624f47..2f2963da220 100644
--- a/jstests/sharding/zone_changes_range.js
+++ b/jstests/sharding/zone_changes_range.js
@@ -5,6 +5,7 @@
'use strict';
load("jstests/sharding/libs/zone_changes_util.js");
+load("jstests/sharding/libs/find_chunks_util.js");
let st = new ShardingTest({shards: 3});
let primaryShard = st.shard0;
@@ -27,7 +28,7 @@ assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 20}}));
jsTest.log("Insert docs (one for each chunk) and check that they end up on the primary shard.");
let docs = [{x: -15}, {x: -5}, {x: 5}, {x: 15}, {x: 25}];
-assert.eq(docs.length, configDB.chunks.count({ns: ns}));
+assert.eq(docs.length, findChunksUtil.countChunksForNs(configDB, ns));
assert.commandWorked(coll.insert(docs));
assert.eq(docs.length, primaryShard.getCollection(ns).count());