diff options
41 files changed, 269 insertions, 196 deletions
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js index 0c9a5236541..4fcfde18f83 100644 --- a/jstests/sharding/addshard1.js +++ b/jstests/sharding/addshard1.js @@ -67,7 +67,7 @@ s.adminCommand({shardcollection: "testDB.foo", key: {a: 1}}); s.adminCommand({split: "testDB.foo", middle: {a: Math.floor(numObjs / 2)}}); assert.eq(2, - s.config.chunks.count(), + s.config.chunks.count({"ns": "testDB.foo"}), "wrong chunk number after splitting collection that existed before"); assert.eq( numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before"); diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js index c2cb0cbcf91..f598c43da8a 100644 --- a/jstests/sharding/auth.js +++ b/jstests/sharding/auth.js @@ -178,9 +178,9 @@ s.startBalancer(60000); assert.soon(function() { - var d1Chunks = s.getDB("config").chunks.count({shard: "d1"}); - var d2Chunks = s.getDB("config").chunks.count({shard: "d2"}); - var totalChunks = s.getDB("config").chunks.count({ns: "test.foo"}); + var d1Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d1"}); + var d2Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d2"}); + var totalChunks = s.getDB("config").chunks.count({ns: 'test.foo'}); print("chunks: " + d1Chunks + " " + d2Chunks + " " + totalChunks); diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js index 840241bd410..497a5748466 100644 --- a/jstests/sharding/authCommands.js +++ b/jstests/sharding/authCommands.js @@ -211,7 +211,7 @@ checkCommandSucceeded(adminDB, {isdbgrid: 1}); checkCommandSucceeded(adminDB, {ismaster: 1}); checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}}); - var chunk = configDB.chunks.findOne({shard: st.rs0.name}); + var chunk = configDB.chunks.findOne({ns: 'test.foo', shard: st.rs0.name}); checkCommandSucceeded( adminDB, {moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true}); diff --git a/jstests/sharding/auto_rebalance_parallel.js b/jstests/sharding/auto_rebalance_parallel.js index 955319b8c5d..4971fc19f7f 100644 --- a/jstests/sharding/auto_rebalance_parallel.js +++ b/jstests/sharding/auto_rebalance_parallel.js @@ -26,18 +26,36 @@ assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName)); assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName)); - assert.eq(2, st.s0.getDB('config').chunks.find({shard: st.shard0.shardName}).itcount()); - assert.eq(2, st.s0.getDB('config').chunks.find({shard: st.shard1.shardName}).itcount()); + assert.eq(2, + st.s0.getDB('config') + .chunks.find({ns: "TestDB.TestColl", shard: st.shard0.shardName}) + .itcount()); + assert.eq(2, + st.s0.getDB('config') + .chunks.find({ns: "TestDB.TestColl", shard: st.shard1.shardName}) + .itcount()); // Do enable the balancer and wait for a single balancer round st.startBalancer(); st.awaitBalancerRound(); st.stopBalancer(); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard0.shardName}).itcount()); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard1.shardName}).itcount()); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard2.shardName}).itcount()); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard3.shardName}).itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName}) + .itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName}) + .itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName}) + .itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName}) + .itcount()); st.stop(); })(); diff --git a/jstests/sharding/auto_rebalance_parallel_replica_sets.js b/jstests/sharding/auto_rebalance_parallel_replica_sets.js index ddc533b843b..a5a6d1bde09 100644 --- a/jstests/sharding/auto_rebalance_parallel_replica_sets.js +++ b/jstests/sharding/auto_rebalance_parallel_replica_sets.js @@ -26,18 +26,36 @@ assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName)); assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName)); - assert.eq(2, st.s0.getDB('config').chunks.find({shard: st.shard0.shardName}).itcount()); - assert.eq(2, st.s0.getDB('config').chunks.find({shard: st.shard1.shardName}).itcount()); + assert.eq(2, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName}) + .itcount()); + assert.eq(2, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName}) + .itcount()); // Do enable the balancer and wait for a single balancer round st.startBalancer(); st.awaitBalancerRound(); st.stopBalancer(); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard0.shardName}).itcount()); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard1.shardName}).itcount()); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard2.shardName}).itcount()); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard3.shardName}).itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName}) + .itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName}) + .itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName}) + .itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName}) + .itcount()); // Ensure the range deleter quiesces st.rs0.awaitReplication(); diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js index 0eba386b6a3..52cec4ac859 100644 --- a/jstests/sharding/autosplit.js +++ b/jstests/sharding/autosplit.js @@ -35,7 +35,7 @@ var counts = []; s.printChunks(); - counts.push(s.config.chunks.count()); + counts.push(s.config.chunks.count({"ns": "test.foo"})); assert.eq(100, db.foo.find().itcount()); print("datasize: " + @@ -49,7 +49,7 @@ s.printChunks(); s.printChangeLog(); - counts.push(s.config.chunks.count()); + counts.push(s.config.chunks.count({"ns": "test.foo"})); bulk = coll.initializeUnorderedBulkOp(); for (; i < 400; i++) { @@ -59,7 +59,7 @@ s.printChunks(); s.printChangeLog(); - counts.push(s.config.chunks.count()); + counts.push(s.config.chunks.count({"ns": "test.foo"})); bulk = coll.initializeUnorderedBulkOp(); for (; i < 700; i++) { @@ -69,7 +69,7 @@ s.printChunks(); s.printChangeLog(); - counts.push(s.config.chunks.count()); + counts.push(s.config.chunks.count({"ns": "test.foo"})); assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts)); var sorted = counts.slice(0); diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js index 659f74725b0..6b7f0e2b290 100644 --- a/jstests/sharding/autosplit_heuristics.js +++ b/jstests/sharding/autosplit_heuristics.js @@ -73,7 +73,7 @@ // Check that all chunks (except the two extreme chunks) // have been split at least once + 1 extra chunk as reload buffer - assert.gte(config.chunks.count(), numChunks * 2 + 3); + assert.gte(config.chunks.count({"ns": "foo.hashBar"}), numChunks * 2 + 3); jsTest.log("DONE!"); diff --git a/jstests/sharding/autosplit_with_balancer.js b/jstests/sharding/autosplit_with_balancer.js index f1853211e4c..7130925edf2 100644 --- a/jstests/sharding/autosplit_with_balancer.js +++ b/jstests/sharding/autosplit_with_balancer.js @@ -1,6 +1,7 @@ (function() { + 'use strict'; - var s = new ShardingTest({name: "auto2", shards: 2, mongos: 2, other: {enableAutoSplit: true}}); + var s = new ShardingTest({shards: 2, mongos: 2, other: {enableAutoSplit: true}}); s.adminCommand({enablesharding: "test"}); s.ensurePrimaryShard('test', 'shard0001'); @@ -39,6 +40,8 @@ tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"}))); s.printChunks(); + var counta, countb; + function doCountsGlobal() { counta = s._connections[0].getDB("test").foo.count(); countb = s._connections[1].getDB("test").foo.count(); @@ -90,7 +93,7 @@ print("checkpoint C"); - assert(Array.unique(s.config.chunks.find().toArray().map(function(z) { + assert(Array.unique(s.config.chunks.find({ns: 'test.foo'}).toArray().map(function(z) { return z.shard; })).length == 2, "should be using both servers"); @@ -103,9 +106,9 @@ print("checkpoint D"); - // test not-sharded cursors + // Test non-sharded cursors db = s.getDB("test2"); - t = db.foobar; + var t = db.foobar; for (i = 0; i < 100; i++) t.save({_id: i}); for (i = 0; i < 100; i++) { @@ -126,7 +129,7 @@ for (i = 0; i < 20; i++) { var conn = new Mongo(db.getMongo().host); - temp2 = conn.getDB("test2").foobar; + var temp2 = conn.getDB("test2").foobar; assert.eq(conn._fullNameSpace, t._fullNameSpace, "check close 1"); assert(temp2.findOne(), "check close 2"); conn.close(); @@ -143,5 +146,4 @@ print("checkpoint G"); s.stop(); - })(); diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js index 499df7c74dc..1468fb79f1e 100644 --- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js +++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js @@ -22,7 +22,7 @@ load('./jstests/libs/cleanup_orphaned_util.js'); assert.commandWorked(admin.runCommand({shardCollection: ns, key: {key: 'hashed'}})); // Makes four chunks by default, two on each shard. - var chunks = st.config.chunks.find().sort({min: 1}).toArray(); + var chunks = st.config.chunks.find({ns: ns}).sort({min: 1}).toArray(); assert.eq(4, chunks.length); var chunkWithDoc = chunks[1]; diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js index 4686d317f6d..3aaa1b25162 100644 --- a/jstests/sharding/count1.js +++ b/jstests/sharding/count1.js @@ -35,7 +35,7 @@ primary = s.getPrimaryShard("test").getDB("test"); secondary = s.getOther(primary).getDB("test"); - assert.eq(1, s.config.chunks.count(), "sanity check A"); + assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A"); db.foo.save({_id: 1, name: "eliot"}); db.foo.save({_id: 2, name: "sara"}); diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js index 8b1346fd0d4..22d4a80a175 100644 --- a/jstests/sharding/count2.js +++ b/jstests/sharding/count2.js @@ -10,7 +10,7 @@ var db1 = s1.getDB("test").foo; var db2 = s2.getDB("test").foo; - assert.eq(1, s1.config.chunks.count(), "sanity check A"); + assert.eq(1, s1.config.chunks.count({"ns": "test.foo"}), "sanity check A"); db1.save({name: "aaa"}); db1.save({name: "bbb"}); diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js index 0644aee1dca..cf5bfb27c1e 100644 --- a/jstests/sharding/cursor1.js +++ b/jstests/sharding/cursor1.js @@ -21,7 +21,9 @@ bulk.insert({_id: i}); } assert.writeOK(bulk.execute()); - assert.eq(1, s.config.chunks.count(), "test requires collection to have one chunk initially"); + assert.eq(1, + s.config.chunks.count({"ns": "test.foo"}), + "test requires collection to have one chunk initially"); // we'll split the collection in two and move the second chunk while three cursors are open // cursor1 still has more data in the first chunk, the one that didn't move @@ -36,7 +38,7 @@ s.adminCommand({split: "test.foo", middle: {_id: 5}}); s.adminCommand({movechunk: "test.foo", find: {_id: 5}, to: secondary.getMongo().name}); - assert.eq(2, s.config.chunks.count()); + assert.eq(2, s.config.chunks.count({"ns": "test.foo"})); // the cursors should not have been affected assert.eq(numObjs, cursor1.itcount(), "c1"); diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js index d1b707b0b01..59d2745861f 100644 --- a/jstests/sharding/findandmodify1.js +++ b/jstests/sharding/findandmodify1.js @@ -47,9 +47,12 @@ } s.printChunks(); - assert.eq(numObjs / 2, s.config.chunks.count(), 'Split was incorrect'); - assert.eq(numObjs / 4, s.config.chunks.count({shard: s.shard0.shardName})); - assert.eq(numObjs / 4, s.config.chunks.count({shard: s.shard1.shardName})); + assert.eq( + numObjs / 2, s.config.chunks.count({"ns": "test.sharded_coll"}), 'Split was incorrect'); + assert.eq(numObjs / 4, + s.config.chunks.count({shard: s.shard0.shardName, "ns": "test.sharded_coll"})); + assert.eq(numObjs / 4, + s.config.chunks.count({shard: s.shard1.shardName, "ns": "test.sharded_coll"})); // update for (var i = 0; i < numObjs; i++) { diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js index 1bd3376eecb..fa4947474a8 100644 --- a/jstests/sharding/hash_basic.js +++ b/jstests/sharding/hash_basic.js @@ -3,55 +3,54 @@ var st = new ShardingTest({shards: 2, chunkSize: 1}); - var testDB = st.s.getDB('test'); - assert.commandWorked(testDB.adminCommand({enableSharding: 'test'})); + assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'})); st.ensurePrimaryShard('test', 'shard0001'); - assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}})); + assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}})); - var configDB = st.s.getDB('config'); - var chunkCountBefore = configDB.chunks.count(); + var configDB = st.s0.getDB('config'); + var chunkCountBefore = configDB.chunks.count({ns: 'test.user'}); assert.gt(chunkCountBefore, 1); + var testDB = st.s0.getDB('test'); for (var x = 0; x < 1000; x++) { testDB.user.insert({x: x}); } - var chunkDoc = configDB.chunks.find().sort({min: 1}).next(); + var chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).next(); var min = chunkDoc.min; var max = chunkDoc.max; - // Assumption: There are documents in the MinKey chunk, otherwise, splitVector will - // fail. Note: This chunk will have 267 documents if collection was presplit to 4. - var cmdRes = testDB.adminCommand({split: 'test.user', bounds: [min, max]}); - assert(cmdRes.ok, - 'split on bounds failed on chunk[' + tojson(chunkDoc) + ']: ' + tojson(cmdRes)); + // Assumption: There are documents in the MinKey chunk, otherwise, splitVector will fail. + // + // Note: This chunk will have 267 documents if collection was presplit to 4. + var cmdRes = + assert.commandWorked(st.s0.adminCommand({split: 'test.user', bounds: [min, max]}), + 'Split on bounds failed for chunk [' + tojson(chunkDoc) + ']'); - chunkDoc = configDB.chunks.find().sort({min: 1}).skip(1).next(); - var middle = NumberLong(chunkDoc.min.x + 1000000); + chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).skip(1).next(); - cmdRes = testDB.adminCommand({split: 'test.user', middle: {x: middle}}); - assert(cmdRes.ok, 'split failed with middle [' + middle + ']: ' + tojson(cmdRes)); + var middle = NumberLong(chunkDoc.min.x + 1000000); + cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: middle}}), + 'Split failed with middle [' + middle + ']'); - cmdRes = testDB.adminCommand({split: 'test.user', find: {x: 7}}); - assert(cmdRes.ok, 'split failed with find: ' + tojson(cmdRes)); + cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', find: {x: 7}}), + 'Split failed with find.'); - var chunkList = configDB.chunks.find().sort({min: 1}).toArray(); + var chunkList = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).toArray(); assert.eq(chunkCountBefore + 3, chunkList.length); chunkList.forEach(function(chunkToMove) { var toShard = configDB.shards.findOne({_id: {$ne: chunkToMove.shard}})._id; - print(jsTestName() + " - moving chunk " + chunkToMove._id + " from shard " + - chunkToMove.shard + " to " + toShard + "..."); + print('Moving chunk ' + chunkToMove._id + ' from shard ' + chunkToMove.shard + ' to ' + + toShard + ' ...'); - var cmdRes = testDB.adminCommand({ + assert.commandWorked(st.s0.adminCommand({ moveChunk: 'test.user', bounds: [chunkToMove.min, chunkToMove.max], to: toShard, _waitForDelete: true - }); - print(jsTestName() + " - result from moving chunk " + chunkToMove._id + ": " + - tojson(cmdRes)); + })); }); st.stop(); diff --git a/jstests/sharding/hash_shard_non_empty.js b/jstests/sharding/hash_shard_non_empty.js index 35c7572bb75..8ebf53cf7f2 100644 --- a/jstests/sharding/hash_shard_non_empty.js +++ b/jstests/sharding/hash_shard_non_empty.js @@ -16,7 +16,7 @@ db.getCollection(coll).ensureIndex({a: "hashed"}); var res = db.adminCommand({shardcollection: dbname + "." + coll, key: {a: "hashed"}}); assert.eq(res.ok, 1, "shardcollection didn't work"); s.printShardingStatus(); -var numChunks = s.config.chunks.count(); +var numChunks = s.config.chunks.count({"ns": "test.foo"}); assert.eq(numChunks, 1, "sharding non-empty collection should not pre-split"); s.stop(); diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js index 69b164d7f36..19bf3066de2 100644 --- a/jstests/sharding/hash_shard_num_chunks.js +++ b/jstests/sharding/hash_shard_num_chunks.js @@ -16,7 +16,7 @@ s.printShardingStatus(); - var numChunks = s.config.chunks.count(); + var numChunks = s.config.chunks.count({"ns": "test.foo"}); assert.eq(numChunks, 500, "should be exactly 500 chunks"); s.config.shards.find().forEach( diff --git a/jstests/sharding/hash_single_shard.js b/jstests/sharding/hash_single_shard.js index 8018a1ab640..f208731a71b 100644 --- a/jstests/sharding/hash_single_shard.js +++ b/jstests/sharding/hash_single_shard.js @@ -9,6 +9,6 @@ testDB.adminCommand({shardCollection: "test.collection", key: {a: "hashed"}}); // check the number of initial chunks. assert.eq(2, - st.getDB('config').chunks.count(), + st.getDB('config').chunks.count({"ns": "test.collection"}), 'Using hashed shard key but failing to do correct presplitting'); st.stop(); diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js index 43a270e0175..a71120e06c3 100644 --- a/jstests/sharding/key_string.js +++ b/jstests/sharding/key_string.js @@ -9,7 +9,7 @@ primary = s.getPrimaryShard("test").getDB("test"); seconday = s.getOther(primary).getDB("test"); - assert.eq(1, s.config.chunks.count(), "sanity check A"); + assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A"); var db = s.getDB("test"); diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js index 34b0cb1d1cf..6fe2041004d 100644 --- a/jstests/sharding/large_chunk.js +++ b/jstests/sharding/large_chunk.js @@ -31,7 +31,7 @@ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}})); - assert.eq(1, s.config.chunks.count(), "step 1 - need one large chunk"); + assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "step 1 - need one large chunk"); var primary = s.getPrimaryShard("test").getDB("test"); var secondary = s.getOther(primary).getDB("test"); @@ -51,15 +51,14 @@ // Move the chunk print("checkpoint 1b"); - var before = s.config.chunks.find().toArray(); + var before = s.config.chunks.find({ns: 'test.foo'}).toArray(); assert.commandWorked( s.s0.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name})); - var after = s.config.chunks.find().toArray(); + var after = s.config.chunks.find({ns: 'test.foo'}).toArray(); assert.neq(before[0].shard, after[0].shard, "move chunk did not work"); s.config.changelog.find().forEach(printjson); s.stop(); - })(); diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js index b681f328298..787576ba6ff 100644 --- a/jstests/sharding/limit_push.js +++ b/jstests/sharding/limit_push.js @@ -27,7 +27,7 @@ }); // Check that the chunck have split correctly - assert.eq(2, s.config.chunks.count(), "wrong number of chunks"); + assert.eq(2, s.config.chunks.count({"ns": "test.limit_push"}), "wrong number of chunks"); // The query is asking for the maximum value below a given value // db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1) diff --git a/jstests/sharding/max_time_ms_sharded_new_commands.js b/jstests/sharding/max_time_ms_sharded_new_commands.js index 48425438266..3b4bec81551 100644 --- a/jstests/sharding/max_time_ms_sharded_new_commands.js +++ b/jstests/sharding/max_time_ms_sharded_new_commands.js @@ -35,6 +35,9 @@ admin.runCommand({setFeatureCompatibilityVersion: '3.4', maxTimeMS: 1000 * 60 * 60 * 24}), "expected setFeatureCompatibilityVersion to not hit time limit in mongod"); - st.stop(); + assert.commandWorked( + admin.runCommand({setFeatureCompatibilityVersion: '3.6', maxTimeMS: 1000 * 60 * 60 * 24}), + "expected setFeatureCompatibilityVersion to not hit time limit in mongod"); + st.stop(); })(); diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js index 1725657b0ad..5d3bfbbb97b 100644 --- a/jstests/sharding/merge_chunks_test.js +++ b/jstests/sharding/merge_chunks_test.js @@ -128,15 +128,19 @@ st.printShardingStatus(true); - assert.eq(2, st.s0.getDB('config').chunks.find({}).itcount()); - assert.eq(1, - st.s0.getDB('config') - .chunks.find({'min._id': MinKey, 'max._id': 90, shard: st.shard0.shardName}) - .itcount()); - assert.eq(1, - st.s0.getDB('config') - .chunks.find({'min._id': 90, 'max._id': MaxKey, shard: st.shard1.shardName}) - .itcount()); + assert.eq(2, st.s0.getDB('config').chunks.find({'ns': 'foo.bar'}).itcount()); + assert.eq( + 1, + st.s0.getDB('config') + .chunks + .find({'ns': 'foo.bar', 'min._id': MinKey, 'max._id': 90, shard: st.shard0.shardName}) + .itcount()); + assert.eq( + 1, + st.s0.getDB('config') + .chunks + .find({'ns': 'foo.bar', 'min._id': 90, 'max._id': MaxKey, shard: st.shard1.shardName}) + .itcount()); st.stop(); })(); diff --git a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js index 99c42cb74fc..de7a4f9e4bc 100644 --- a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js +++ b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js @@ -45,8 +45,14 @@ load('./jstests/libs/chunk_manipulation_util.js'); // Ensure a new primary is found promptly st.configRS.getPrimary(30000); - assert.eq(1, mongos.getDB('config').chunks.find({shard: st.shard0.shardName}).itcount()); - assert.eq(0, mongos.getDB('config').chunks.find({shard: st.shard1.shardName}).itcount()); + assert.eq(1, + mongos.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName}) + .itcount()); + assert.eq(0, + mongos.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName}) + .itcount()); // At this point, the balancer is in recovery mode. Ensure that stepdown can be done again and // the recovery mode interrupted. @@ -63,8 +69,14 @@ load('./jstests/libs/chunk_manipulation_util.js'); // Ensure that migration succeeded joinMoveChunk(); - assert.eq(0, mongos.getDB('config').chunks.find({shard: st.shard0.shardName}).itcount()); - assert.eq(1, mongos.getDB('config').chunks.find({shard: st.shard1.shardName}).itcount()); + assert.eq(0, + mongos.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName}) + .itcount()); + assert.eq(1, + mongos.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName}) + .itcount()); st.stop(); })(); diff --git a/jstests/sharding/movechunk_parallel.js b/jstests/sharding/movechunk_parallel.js index 763ee11cec8..9de8da268f1 100644 --- a/jstests/sharding/movechunk_parallel.js +++ b/jstests/sharding/movechunk_parallel.js @@ -31,8 +31,14 @@ load('./jstests/libs/chunk_manipulation_util.js'); assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName)); assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName)); - assert.eq(2, st.s0.getDB('config').chunks.find({shard: st.shard0.shardName}).itcount()); - assert.eq(2, st.s0.getDB('config').chunks.find({shard: st.shard1.shardName}).itcount()); + assert.eq(2, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName}) + .itcount()); + assert.eq(2, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName}) + .itcount()); // Pause migrations at shards 2 and 3 pauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange); @@ -53,10 +59,22 @@ load('./jstests/libs/chunk_manipulation_util.js'); joinMoveChunk1(); joinMoveChunk2(); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard0.shardName}).itcount()); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard1.shardName}).itcount()); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard2.shardName}).itcount()); - assert.eq(1, st.s0.getDB('config').chunks.find({shard: st.shard3.shardName}).itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName}) + .itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName}) + .itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName}) + .itcount()); + assert.eq(1, + st.s0.getDB('config') + .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName}) + .itcount()); st.stop(); })(); diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js index b59dc4aa901..de9efe410a0 100644 --- a/jstests/sharding/presplit.js +++ b/jstests/sharding/presplit.js @@ -24,14 +24,14 @@ // Make sure that there's only one chunk holding all the data. s.printChunks(); primary = s.getPrimaryShard("test").getDB("test"); - assert.eq(0, s.config.chunks.count(), "single chunk assertion"); + assert.eq(0, s.config.chunks.count({"ns": "test.foo"}), "single chunk assertion"); assert.eq(num, primary.foo.count()); s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); // Make sure the collection's original chunk got split s.printChunks(); - assert.lt(20, s.config.chunks.count(), "many chunks assertion"); + assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "many chunks assertion"); assert.eq(num, primary.foo.count()); s.printChangeLog(); diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js index 486e81637ce..64743e84e11 100644 --- a/jstests/sharding/query_config.js +++ b/jstests/sharding/query_config.js @@ -178,9 +178,6 @@ assert.eq(cursor.next(), {_id: testNamespaces[4], keyb: 1}); assert.eq(cursor.next(), {_id: testNamespaces[5], keyb: 1, keyc: 1}); assert(!cursor.hasNext()); - - // Count query without filter. - assert.eq(configDB.collections.count(), testNamespaces.length); }; /** diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js index baacbd96ea4..de962489afe 100644 --- a/jstests/sharding/shard1.js +++ b/jstests/sharding/shard1.js @@ -1,55 +1,47 @@ /** * this tests some of the ground work */ +(function() { + 'use strict'; -s = new ShardingTest({name: "shard1", shards: 2}); + var s = new ShardingTest({shards: 2}); + var db = s.getDB("test"); -db = s.getDB("test"); -db.foo.insert({num: 1, name: "eliot"}); -db.foo.insert({num: 2, name: "sara"}); -db.foo.insert({num: -1, name: "joe"}); -db.foo.ensureIndex({num: 1}); -assert.eq(3, db.foo.find().length(), "A"); + assert.writeOK(db.foo.insert({num: 1, name: "eliot"})); + assert.writeOK(db.foo.insert({num: 2, name: "sara"})); + assert.writeOK(db.foo.insert({num: -1, name: "joe"})); -shardCommand = { - shardcollection: "test.foo", - key: {num: 1} -}; + assert.commandWorked(db.foo.ensureIndex({num: 1})); -assert.throws(function() { - s.adminCommand(shardCommand); -}); + assert.eq(3, db.foo.find().length(), "A"); -s.adminCommand({enablesharding: "test"}); -s.ensurePrimaryShard('test', 'shard0001'); -assert.eq(3, db.foo.find().length(), "after partitioning count failed"); + const shardCommand = {shardcollection: "test.foo", key: {num: 1}}; -s.adminCommand(shardCommand); + assert.commandFailed(s.s0.adminCommand(shardCommand)); -assert.throws(function() { - s.adminCommand({shardCollection: 'test', key: {x: 1}}); -}); -assert.throws(function() { - s.adminCommand({shardCollection: '.foo', key: {x: 1}}); -}); + assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); + s.ensurePrimaryShard('test', 'shard0001'); -var cconfig = s.config.collections.findOne({_id: "test.foo"}); -assert(cconfig, "why no collection entry for test.foo"); + assert.eq(3, db.foo.find().length(), "after partitioning count failed"); -delete cconfig.lastmod; -delete cconfig.dropped; -delete cconfig.lastmodEpoch; -delete cconfig.uuid; + assert.commandWorked(s.s0.adminCommand(shardCommand)); + assert.commandFailed(s.s0.adminCommand({shardCollection: 'test', key: {x: 1}})); + assert.commandFailed(s.s0.adminCommand({shardCollection: '.foo', key: {x: 1}})); -assert.eq(cconfig, {_id: "test.foo", key: {num: 1}, unique: false}, "Sharded content mismatch"); + var cconfig = s.config.collections.findOne({_id: "test.foo"}); + assert(cconfig, "No collection entry found for test.foo"); -s.config.collections.find().forEach(printjson); + delete cconfig.lastmod; + delete cconfig.dropped; + delete cconfig.lastmodEpoch; + delete cconfig.uuid; -assert.eq(1, s.config.chunks.count(), "num chunks A"); -si = s.config.chunks.findOne(); -assert(si); -assert.eq(si.ns, "test.foo"); + assert.eq(cconfig, {_id: "test.foo", key: {num: 1}, unique: false}, "Sharded content mismatch"); -assert.eq(3, db.foo.find().length(), "after sharding, no split count failed"); + s.config.collections.find().forEach(printjson); -s.stop(); + assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "num chunks A"); + assert.eq(3, db.foo.find().length(), "after sharding, no split count failed"); + + s.stop(); +})(); diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js index cfe39916053..527fad07be5 100644 --- a/jstests/sharding/shard2.js +++ b/jstests/sharding/shard2.js @@ -21,11 +21,11 @@ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}})); - assert.eq(1, s.config.chunks.count(), "sanity check 1"); + assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check 1"); assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 0}})); - assert.eq(2, s.config.chunks.count(), "should be 2 shards"); - var chunks = s.config.chunks.find().toArray(); + assert.eq(2, s.config.chunks.count({"ns": "test.foo"}), "should be 2 shards"); + var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray(); assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split"); assert.writeOK(db.foo.save({num: 1, name: "eliot"})); @@ -62,9 +62,9 @@ assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard"); assert.eq(2, - s.config.chunks.count(), + s.config.chunks.count({"ns": "test.foo"}), "still should have 2 shards after move not:" + s.getChunksString()); - var chunks = s.config.chunks.find().toArray(); + var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray(); assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move"); placeCheck(3); @@ -232,7 +232,7 @@ _waitForDelete: true })); assert.eq(2, s.onNumShards("foo"), "on 2 shards again"); - assert.eq(3, s.config.chunks.count(), "only 3 chunks"); + assert.eq(3, s.config.chunks.count({"ns": "test.foo"}), "only 3 chunks"); print("YO : " + tojson(db.runCommand("serverStatus"))); diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js index a1b328cfe1f..754d2a8bea8 100644 --- a/jstests/sharding/shard_existing.js +++ b/jstests/sharding/shard_existing.js @@ -1,8 +1,8 @@ (function() { + 'use strict'; var s = new ShardingTest({name: "shard_existing", shards: 2, mongos: 1, other: {chunkSize: 1}}); - - db = s.getDB("test"); + var db = s.getDB("test"); var stringSize = 10000; var numDocs = 2000; @@ -14,7 +14,7 @@ print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize); var bulk = db.data.initializeUnorderedBulkOp(); - for (i = 0; i < numDocs; i++) { + for (var i = 0; i < numDocs; i++) { bulk.insert({_id: i, s: bigString}); } assert.writeOK(bulk.execute()); @@ -25,14 +25,13 @@ s.adminCommand({enablesharding: "test"}); s.ensurePrimaryShard('test', 'shard0001'); - res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}}); + var res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}}); printjson(res); // number of chunks should be approx equal to the total data size / half the chunk size - var numChunks = s.config.chunks.find().itcount(); + var numChunks = s.config.chunks.find({ns: 'test.data'}).itcount(); var guess = Math.ceil(dataSize / (512 * 1024 + avgObjSize)); assert(Math.abs(numChunks - guess) < 2, "not right number of chunks"); s.stop(); - })(); diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js index 22a8aaa6210..4213b47d6d7 100644 --- a/jstests/sharding/sharding_balance1.js +++ b/jstests/sharding/sharding_balance1.js @@ -23,7 +23,7 @@ assert.writeOK(bulk.execute()); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}})); - assert.lt(20, s.config.chunks.count(), "setup2"); + assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2"); function diff1() { var x = s.chunkCounts("foo"); diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js index 7de9a97aa3e..e72bcdba955 100644 --- a/jstests/sharding/sharding_balance2.js +++ b/jstests/sharding/sharding_balance2.js @@ -31,7 +31,7 @@ assert.writeOK(bulk.execute()); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}})); - assert.gt(s.config.chunks.count(), 10); + assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10); var getShardSize = function(conn) { var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1}); diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js index 384ba55461c..fa4ad9e96d1 100644 --- a/jstests/sharding/sharding_balance3.js +++ b/jstests/sharding/sharding_balance3.js @@ -30,7 +30,7 @@ assert.writeOK(bulk.execute()); s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); - assert.lt(20, s.config.chunks.count(), "setup2"); + assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2"); function diff1() { var x = s.chunkCounts("foo"); diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js index d8b08b46a60..23905ffcb2d 100644 --- a/jstests/sharding/sharding_balance4.js +++ b/jstests/sharding/sharding_balance4.js @@ -66,7 +66,7 @@ } check("initial at end"); - assert.lt(20, s.config.chunks.count(), "setup2"); + assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2"); function check(msg, dontAssert) { for (var x in counts) { diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js index c9979895108..26491d46ded 100644 --- a/jstests/sharding/sort1.js +++ b/jstests/sharding/sort1.js @@ -1,4 +1,5 @@ (function() { + 'use strict'; var s = new ShardingTest({name: "sort1", shards: 2, mongos: 2}); @@ -6,13 +7,13 @@ s.ensurePrimaryShard('test', 'shard0001'); s.adminCommand({shardcollection: "test.data", key: {'sub.num': 1}}); - db = s.getDB("test"); + var db = s.getDB("test"); - N = 100; + const N = 100; - forward = []; - backward = []; - for (i = 0; i < N; i++) { + var forward = []; + var backward = []; + for (var i = 0; i < N; i++) { db.data.insert({_id: i, sub: {num: i, x: N - i}}); forward.push(i); backward.push((N - 1) - i); @@ -28,12 +29,12 @@ _waitForDelete: true }); - assert.lte(3, s.config.chunks.find().itcount(), "A1"); + assert.lte(3, s.config.chunks.find({ns: 'test.data'}).itcount(), "A1"); - temp = s.config.chunks.find().sort({min: 1}).toArray(); + var temp = s.config.chunks.find({ns: 'test.data'}).sort({min: 1}).toArray(); temp.forEach(printjsononeline); - z = 0; + var z = 0; for (; z < temp.length; z++) if (temp[z].min["sub.num"] <= 50 && temp[z].max["sub.num"] > 50) break; @@ -50,11 +51,12 @@ db.data.find().sort({'sub.num': 1}).toArray(); s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray(); - a = Date.timeFunc(function() { + var a = Date.timeFunc(function() { z = db.data.find().sort({'sub.num': 1}).toArray(); }, 200); assert.eq(100, z.length, "C1"); - b = 1.5 * Date.timeFunc(function() { + + var b = 1.5 * Date.timeFunc(function() { z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray(); }, 200); assert.eq(67, z.length, "C2"); @@ -105,5 +107,4 @@ assert.eq(forward, getSorted("sub.x", -1, {'_id': 0, 'sub.num': 1}), "D12"); s.stop(); - })(); diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js index 0468fce757b..5a5504594d2 100644 --- a/jstests/sharding/split_large_key.js +++ b/jstests/sharding/split_large_key.js @@ -3,16 +3,16 @@ (function() { 'use strict'; - function verifyChunk(keys, expectFail) { + function verifyChunk(keys, expectFail, ns) { // If split failed then there's only 1 chunk // With a min & max for the shardKey if (expectFail) { - assert.eq(1, configDB.chunks.find().count(), "Chunks count no split"); - var chunkDoc = configDB.chunks.findOne(); + assert.eq(1, configDB.chunks.find({"ns": ns}).count(), "Chunks count no split"); + var chunkDoc = configDB.chunks.findOne({"ns": ns}); assert.eq(0, bsonWoCompare(chunkDoc.min, keys.min), "Chunks min"); assert.eq(0, bsonWoCompare(chunkDoc.max, keys.max), "Chunks max"); } else { - assert.eq(2, configDB.chunks.find().count(), "Chunks count split"); + assert.eq(2, configDB.chunks.find({"ns": ns}).count(), "Chunks count split"); } } @@ -58,7 +58,7 @@ assert(res.ok, "Split: " + collName + " " + res.errmsg); } - verifyChunk(chunkKeys, test.expectFail); + verifyChunk(chunkKeys, test.expectFail, "test." + collName); st.s0.getCollection("test." + collName).drop(); }); diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js index 9462400864f..ddee6706544 100644 --- a/jstests/sharding/tag_auto_split.js +++ b/jstests/sharding/tag_auto_split.js @@ -8,7 +8,7 @@ s.ensurePrimaryShard('test', 'shard0001'); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}})); - assert.eq(1, s.config.chunks.find().itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount()); s.addShardTag("shard0000", "a"); s.addShardTag("shard0000", "b"); @@ -19,17 +19,17 @@ s.startBalancer(); assert.soon(function() { - return s.config.chunks.find().itcount() == 4; + return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4; }, 'Split did not occur', 3 * 60 * 1000); s.awaitBalancerRound(); s.printShardingStatus(true); - assert.eq(4, s.config.chunks.find().itcount(), 'Split points changed'); + assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed'); - assert.eq(1, s.config.chunks.find({min: {_id: MinKey}}).itcount()); - assert.eq(1, s.config.chunks.find({min: {_id: 5}}).itcount()); - assert.eq(1, s.config.chunks.find({min: {_id: 10}}).itcount()); - assert.eq(1, s.config.chunks.find({min: {_id: 15}}).itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey}}).itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5}}).itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10}}).itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15}}).itcount()); s.stop(); })(); diff --git a/jstests/sharding/tag_auto_split_partial_key.js b/jstests/sharding/tag_auto_split_partial_key.js index 5ca9b237845..51ddd69cc78 100644 --- a/jstests/sharding/tag_auto_split_partial_key.js +++ b/jstests/sharding/tag_auto_split_partial_key.js @@ -8,7 +8,7 @@ s.ensurePrimaryShard('test', 'shard0001'); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1, a: 1}})); - assert.eq(1, s.config.chunks.find().itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount()); s.addShardTag("shard0000", "a"); s.addShardTag("shard0000", "b"); @@ -19,14 +19,14 @@ s.startBalancer(); assert.soon(function() { - return s.config.chunks.find().itcount() == 4; + return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4; }, 'Split did not occur', 3 * 60 * 1000); s.awaitBalancerRound(); s.printShardingStatus(true); - assert.eq(4, s.config.chunks.find().itcount(), 'Split points changed'); + assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed'); - s.config.chunks.find().forEach(function(chunk) { + s.config.chunks.find({"ns": "test.foo"}).forEach(function(chunk) { var numFields = 0; for (var x in chunk.min) { numFields++; @@ -36,10 +36,10 @@ }); // Check chunk mins correspond exactly to tag range boundaries, extended to match shard key - assert.eq(1, s.config.chunks.find({min: {_id: MinKey, a: MinKey}}).itcount()); - assert.eq(1, s.config.chunks.find({min: {_id: 5, a: MinKey}}).itcount()); - assert.eq(1, s.config.chunks.find({min: {_id: 10, a: MinKey}}).itcount()); - assert.eq(1, s.config.chunks.find({min: {_id: 15, a: MinKey}}).itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey, a: MinKey}}).itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5, a: MinKey}}).itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10, a: MinKey}}).itcount()); + assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15, a: MinKey}}).itcount()); s.stop(); })(); diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js index d4fdbb1e183..0dec96f52d8 100644 --- a/jstests/sharding/tag_range.js +++ b/jstests/sharding/tag_range.js @@ -12,7 +12,7 @@ assert.eq(st.config.tags.count(), num, message); } - assert.eq(1, st.config.chunks.count()); + assert.eq(1, st.config.chunks.count({"ns": "test.tag_range"})); st.addShardTag('shard0000', 'a'); st.addShardTag('shard0000', 'b'); diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js index 5daf46c2f67..834eb8c55b7 100644 --- a/jstests/sharding/top_chunk_autosplit.js +++ b/jstests/sharding/top_chunk_autosplit.js @@ -10,9 +10,14 @@ function shardSetup(shardConfig, dbName, collName) { return st; } -function getShardWithTopChunk(configDB, lowOrHigh) { +function getShardWithTopChunk(configDB, lowOrHigh, ns) { // lowOrHigh: 1 low "top chunk", -1 high "top chunk" - return configDB.chunks.find({}).sort({min: lowOrHigh}).limit(1).next().shard; + print(ns); + print(configDB.chunks.count({"ns": ns})); + print(configDB.chunks.count()); + print(JSON.stringify(configDB.chunks.findOne())); + print(JSON.stringify(configDB.chunks.findOne({"ns": {$ne: "config.system.sessions"}}))); + return configDB.chunks.find({"ns": ns}).sort({min: lowOrHigh}).limit(1).next().shard; } function getNumberOfChunks(configDB) { @@ -79,8 +84,9 @@ function runTest(test) { xval += test.inserts.inc; } while (getNumberOfChunks(configDB) <= numChunks); + printShardingStatus(configDB); // Test for where new top chunk should reside - assert.eq(getShardWithTopChunk(configDB, test.lowOrHigh), + assert.eq(getShardWithTopChunk(configDB, test.lowOrHigh, db + "." + collName), test.movedToShard, test.name + " chunk in the wrong shard"); diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js index 0e15e6180b1..0df7ae3cb86 100644 --- a/jstests/sharding/version1.js +++ b/jstests/sharding/version1.js @@ -44,7 +44,7 @@ }), "should have failed because version is config is 1|0"); - var epoch = s.getDB('config').chunks.findOne().lastmodEpoch; + var epoch = s.getDB('config').chunks.findOne({"ns": "alleyinsider.foo"}).lastmodEpoch; assert.commandWorked(a.runCommand({ setShardVersion: "alleyinsider.foo", configdb: s._configDB, diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js index 1cf9b5ab39a..ab5ca26c84f 100644 --- a/jstests/sharding/write_cmd_auto_split.js +++ b/jstests/sharding/write_cmd_auto_split.js @@ -15,7 +15,7 @@ jsTest.log('Test single batch insert should auto-split'); - assert.eq(1, configDB.chunks.find().itcount()); + assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount()); // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so // we are going to be conservative. @@ -30,7 +30,7 @@ // Inserted batch is a multiple of the chunkSize, expect the chunks to split into // more than 2. - assert.gt(configDB.chunks.find().itcount(), 2); + assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 2); testDB.dropDatabase(); jsTest.log('Test single batch update should auto-split'); @@ -38,7 +38,7 @@ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'})); assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}})); - assert.eq(1, configDB.chunks.find().itcount()); + assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount()); for (var x = 0; x < 1100; x++) { assert.writeOK(testDB.runCommand({ @@ -49,7 +49,7 @@ })); } - assert.gt(configDB.chunks.find().itcount(), 1); + assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1); testDB.dropDatabase(); jsTest.log('Test single delete should not auto-split'); @@ -57,7 +57,7 @@ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'})); assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}})); - assert.eq(1, configDB.chunks.find().itcount()); + assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount()); for (var x = 0; x < 1100; x++) { assert.writeOK(testDB.runCommand({ @@ -68,7 +68,7 @@ })); } - assert.eq(1, configDB.chunks.find().itcount()); + assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount()); testDB.dropDatabase(); jsTest.log('Test batched insert should auto-split'); @@ -76,7 +76,7 @@ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'})); assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}})); - assert.eq(1, configDB.chunks.find().itcount()); + assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount()); // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so // we are going to be conservative. @@ -91,7 +91,7 @@ {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}})); } - assert.gt(configDB.chunks.find().itcount(), 1); + assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 1); testDB.dropDatabase(); jsTest.log('Test batched update should auto-split'); @@ -99,7 +99,7 @@ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'})); assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}})); - assert.eq(1, configDB.chunks.find().itcount()); + assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount()); for (var x = 0; x < 1100; x += 400) { var docs = []; @@ -113,7 +113,7 @@ {update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}})); } - assert.gt(configDB.chunks.find().itcount(), 1); + assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1); testDB.dropDatabase(); jsTest.log('Test batched delete should not auto-split'); @@ -121,7 +121,7 @@ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'})); assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}})); - assert.eq(1, configDB.chunks.find().itcount()); + assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount()); for (var x = 0; x < 1100; x += 400) { var docs = []; @@ -139,7 +139,7 @@ })); } - assert.eq(1, configDB.chunks.find().itcount()); + assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount()); st.stop(); })(); |