diff options
30 files changed, 161 insertions, 196 deletions
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js index cedbe721177..128c52252a1 100644 --- a/jstests/sharding/addshard5.js +++ b/jstests/sharding/addshard5.js @@ -7,20 +7,13 @@ var mongos = st.s; var admin = mongos.getDB('admin'); - var config = mongos.getDB('config'); var coll = mongos.getCollection('foo.bar'); - // Get all the shard info and connections - var shards = []; - config.shards.find().sort({_id: 1}).forEach(function(doc) { - shards.push(Object.merge(doc, {conn: new Mongo(doc.host)})); - }); - // Shard collection assert.commandWorked(mongos.adminCommand({enableSharding: coll.getDB() + ''})); // Just to be sure what primary we start from - st.ensurePrimaryShard(coll.getDB().getName(), shards[0]._id); + st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName); assert.commandWorked(mongos.adminCommand({shardCollection: coll + '', key: {_id: 1}})); // Insert one document @@ -28,23 +21,23 @@ // Migrate the collection to and from shard1 so shard0 loads the shard1 host assert.commandWorked(mongos.adminCommand( - {moveChunk: coll + '', find: {_id: 0}, to: shards[1]._id, _waitForDelete: true})); + {moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true})); assert.commandWorked(mongos.adminCommand( - {moveChunk: coll + '', find: {_id: 0}, to: shards[0]._id, _waitForDelete: true})); + {moveChunk: coll + '', find: {_id: 0}, to: st.shard0.shardName, _waitForDelete: true})); // Drop and re-add shard with the same name but a new host. - assert.commandWorked(mongos.adminCommand({removeShard: shards[1]._id})); - assert.commandWorked(mongos.adminCommand({removeShard: shards[1]._id})); + assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName})); + assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName})); var shard2 = MongoRunner.runMongod({'shardsvr': ''}); - assert.commandWorked(mongos.adminCommand({addShard: shard2.host, name: shards[1]._id})); + assert.commandWorked(mongos.adminCommand({addShard: shard2.host, name: st.shard1.shardName})); jsTest.log('Shard was dropped and re-added with same name...'); st.printShardingStatus(); // Try a migration assert.commandWorked( - mongos.adminCommand({moveChunk: coll + '', find: {_id: 0}, to: shards[1]._id})); + mongos.adminCommand({moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName})); assert.eq('world', shard2.getCollection(coll + '').findOne().hello); diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js index 884d5bb85bb..7ede1900455 100644 --- a/jstests/sharding/batch_write_command_sharded.js +++ b/jstests/sharding/batch_write_command_sharded.js @@ -11,7 +11,6 @@ var mongos = st.s0; var admin = mongos.getDB("admin"); var config = mongos.getDB("config"); - var shards = config.shards.find().toArray(); var configConnStr = st._configDB; jsTest.log("Starting sharding batch write tests..."); @@ -106,7 +105,7 @@ // START SETUP var brokenColl = mongos.getCollection("broken.coll"); assert.commandWorked(admin.runCommand({enableSharding: brokenColl.getDB().toString()})); - st.ensurePrimaryShard(brokenColl.getDB().toString(), shards[0]._id); + st.ensurePrimaryShard(brokenColl.getDB().toString(), st.shard0.shardName); assert.commandWorked(admin.runCommand({shardCollection: brokenColl.toString(), key: {_id: 1}})); assert.commandWorked(admin.runCommand({split: brokenColl.toString(), middle: {_id: 0}})); @@ -120,8 +119,8 @@ // Modify the chunks to make shards at a higher version - assert.commandWorked( - admin.runCommand({moveChunk: brokenColl.toString(), find: {_id: 0}, to: shards[1]._id})); + assert.commandWorked(admin.runCommand( + {moveChunk: brokenColl.toString(), find: {_id: 0}, to: st.shard1.shardName})); // Rewrite the old chunks back to the config server diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js index f00218f5dfb..d73190f4744 100644 --- a/jstests/sharding/bouncing_count.js +++ b/jstests/sharding/bouncing_count.js @@ -15,10 +15,21 @@ var collB = mongosB.getCollection("" + collA); var collC = mongosB.getCollection("" + collA); - var shards = config.shards.find().sort({_id: 1}).toArray(); + var shards = [ + st.shard0, + st.shard1, + st.shard2, + st.shard3, + st.shard4, + st.shard5, + st.shard6, + st.shard7, + st.shard8, + st.shard9 + ]; assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()})); - st.ensurePrimaryShard(collA.getDB().getName(), shards[1]._id); + st.ensurePrimaryShard(collA.getDB().getName(), st.shard1.shardName); assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}})); jsTestLog("Splitting up the collection..."); @@ -27,7 +38,7 @@ for (var i = 0; i < shards.length; i++) { assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}})); assert.commandWorked( - admin.runCommand({moveChunk: "" + collA, find: {_id: i}, to: shards[i]._id})); + admin.runCommand({moveChunk: "" + collA, find: {_id: i}, to: shards[i].shardName})); } mongosB.getDB("admin").runCommand({flushRouterConfig: 1}); @@ -38,8 +49,11 @@ // Change up all the versions... for (var i = 0; i < shards.length; i++) { - assert.commandWorked(admin.runCommand( - {moveChunk: "" + collA, find: {_id: i}, to: shards[(i + 1) % shards.length]._id})); + assert.commandWorked(admin.runCommand({ + moveChunk: "" + collA, + find: {_id: i}, + to: shards[(i + 1) % shards.length].shardName + })); } // Make sure mongos A is up-to-date diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js index 715660fa67f..9284b9e9d97 100644 --- a/jstests/sharding/bulk_insert.js +++ b/jstests/sharding/bulk_insert.js @@ -6,17 +6,11 @@ var mongos = st.s; var staleMongos = st.s1; - var config = mongos.getDB("config"); var admin = mongos.getDB("admin"); - var shards = config.shards.find().toArray(); - - for (var i = 0; i < shards.length; i++) { - shards[i].conn = new Mongo(shards[i].host); - } var collSh = mongos.getCollection(jsTestName() + ".collSharded"); var collUn = mongos.getCollection(jsTestName() + ".collUnsharded"); - var collDi = shards[0].conn.getCollection(jsTestName() + ".collDirect"); + var collDi = st.shard0.getCollection(jsTestName() + ".collDirect"); jsTest.log('Checking write to config collections...'); assert.writeOK(admin.TestColl.insert({SingleDoc: 1})); @@ -25,9 +19,10 @@ jsTest.log("Setting up collections..."); assert.commandWorked(admin.runCommand({enableSharding: collSh.getDB() + ""})); - st.ensurePrimaryShard(collSh.getDB() + "", shards[0]._id); + st.ensurePrimaryShard(collSh.getDB() + "", st.shard0.shardName); - assert.commandWorked(admin.runCommand({movePrimary: collUn.getDB() + "", to: shards[1]._id})); + assert.commandWorked( + admin.runCommand({movePrimary: collUn.getDB() + "", to: st.shard1.shardName})); printjson(collSh.ensureIndex({ukey: 1}, {unique: true})); printjson(collUn.ensureIndex({ukey: 1}, {unique: true})); @@ -36,7 +31,7 @@ assert.commandWorked(admin.runCommand({shardCollection: collSh + "", key: {ukey: 1}})); assert.commandWorked(admin.runCommand({split: collSh + "", middle: {ukey: 0}})); assert.commandWorked(admin.runCommand( - {moveChunk: collSh + "", find: {ukey: 0}, to: shards[0]._id, _waitForDelete: true})); + {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true})); var resetColls = function() { assert.writeOK(collSh.remove({})); @@ -248,9 +243,9 @@ assert.eq(null, staleCollSh.findOne(), 'Collections should be empty'); assert.commandWorked(admin.runCommand( - {moveChunk: collSh + "", find: {ukey: 0}, to: shards[1]._id, _waitForDelete: true})); + {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true})); assert.commandWorked(admin.runCommand( - {moveChunk: collSh + "", find: {ukey: 0}, to: shards[0]._id, _waitForDelete: true})); + {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true})); assert.writeOK(staleCollSh.insert(inserts)); @@ -274,9 +269,9 @@ assert.eq(null, staleCollSh.findOne(), 'Collections should be empty'); assert.commandWorked(admin.runCommand( - {moveChunk: collSh + "", find: {ukey: 0}, to: shards[1]._id, _waitForDelete: true})); + {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true})); assert.commandWorked(admin.runCommand( - {moveChunk: collSh + "", find: {ukey: 0}, to: shards[0]._id, _waitForDelete: true})); + {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true})); assert.writeOK(staleCollSh.insert(inserts)); diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js index e928eaebcf2..499df7c74dc 100644 --- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js +++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js @@ -14,12 +14,11 @@ load('./jstests/libs/cleanup_orphaned_util.js'); var staticMongod = MongoRunner.runMongod({}); // For startParallelOps. var st = new ShardingTest({shards: 2, other: {separateConfig: true}}); - var mongos = st.s0, admin = mongos.getDB('admin'), - shards = mongos.getCollection('config.shards').find().toArray(), dbName = 'foo', - ns = dbName + '.bar', coll = mongos.getCollection(ns); + var mongos = st.s0, admin = mongos.getDB('admin'), dbName = 'foo', ns = dbName + '.bar', + coll = mongos.getCollection(ns); assert.commandWorked(admin.runCommand({enableSharding: dbName})); - printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id})); + printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName})); assert.commandWorked(admin.runCommand({shardCollection: ns, key: {key: 'hashed'}})); // Makes four chunks by default, two on each shard. diff --git a/jstests/sharding/cleanup_orphaned_cmd_prereload.js b/jstests/sharding/cleanup_orphaned_cmd_prereload.js index 05fbd8b741a..a5077faa7eb 100644 --- a/jstests/sharding/cleanup_orphaned_cmd_prereload.js +++ b/jstests/sharding/cleanup_orphaned_cmd_prereload.js @@ -6,11 +6,10 @@ var st = new ShardingTest({shards: 2}); var mongos = st.s0; var admin = mongos.getDB("admin"); -var shards = mongos.getCollection("config.shards").find().toArray(); var coll = mongos.getCollection("foo.bar"); assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok); -printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id})); +printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName})); assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok); jsTest.log("Moving some chunks to shard1..."); @@ -20,11 +19,13 @@ assert(admin.runCommand({split: coll + "", middle: {_id: 1}}).ok); assert( admin - .runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id, _waitForDelete: true}) + .runCommand( + {moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true}) .ok); assert( admin - .runCommand({moveChunk: coll + "", find: {_id: 1}, to: shards[1]._id, _waitForDelete: true}) + .runCommand( + {moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}) .ok); var metadata = @@ -43,7 +44,8 @@ assert(!st.shard1.getDB("admin") jsTest.log("Moving some chunks back to shard0 after empty..."); -admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: shards[1]._id, _waitForDelete: true}); +admin.runCommand( + {moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName, _waitForDelete: true}); var metadata = st.shard0.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata; @@ -56,7 +58,8 @@ assert.eq(metadata.pending.length, 0); assert( admin - .runCommand({moveChunk: coll + "", find: {_id: 1}, to: shards[0]._id, _waitForDelete: true}) + .runCommand( + {moveChunk: coll + "", find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}) .ok); var metadata = diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js index 2cc7c26c60b..2203bed6641 100644 --- a/jstests/sharding/coll_epoch_test1.js +++ b/jstests/sharding/coll_epoch_test1.js @@ -14,10 +14,7 @@ var staleMongos = st.s1; var insertMongos = st.s2; - var shards = []; - config.shards.find().forEach(function(doc) { - shards.push(doc._id); - }); + var shards = [st.shard0, st.shard1, st.shard2]; // // Test that inserts and queries go to the correct shard even when the collection has been @@ -29,7 +26,7 @@ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); // TODO(PM-85): Make sure we *always* move the primary after collection lifecyle project is // complete - st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); + st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); st.configRS.awaitLastOpCommitted(); // TODO: Remove after collection lifecyle project (PM-85) @@ -49,7 +46,7 @@ jsTest.log("Re-enabling sharding with a different key..."); - st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); + st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); assert.commandWorked(coll.ensureIndex({notId: 1})); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {notId: 1}})); @@ -88,10 +85,10 @@ jsTest.log("Re-creating sharded collection with different primary..."); - var getOtherShard = function(shard) { - for (var id in shards) { - if (shards[id] != shard) - return shards[id]; + var getOtherShard = function(shardId) { + for (var i = 0; i < shards.length; ++i) { + if (shards[i].shardName != shardId) + return shards[i].shardName; } }; diff --git a/jstests/sharding/coll_epoch_test2.js b/jstests/sharding/coll_epoch_test2.js index dbed610cad6..c2106ebefbe 100644 --- a/jstests/sharding/coll_epoch_test2.js +++ b/jstests/sharding/coll_epoch_test2.js @@ -19,10 +19,7 @@ var coll = st.s.getCollection("foo.bar"); insertMongos.getDB("admin").runCommand({setParameter: 1, traceExceptions: true}); -var shards = {}; -config.shards.find().forEach(function(doc) { - shards[doc._id] = new Mongo(doc.host); -}); +var shards = [st.shard0, st.shard1]; // // Set up a sharded collection @@ -38,10 +35,10 @@ assert.writeOK(coll.insert({hello: "world"})); jsTest.log("Sharding collection across multiple shards..."); -var getOtherShard = function(shard) { - for (id in shards) { - if (id != shard) - return id; +var getOtherShard = function(shardId) { + for (shard in shards) { + if (shard.shardName != shardId) + return shard.shardName; } }; diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js index 8435cc8ab41..b00daa01c38 100644 --- a/jstests/sharding/covered_shard_key_indexes.js +++ b/jstests/sharding/covered_shard_key_indexes.js @@ -10,7 +10,6 @@ var st = new ShardingTest({shards: 1}); var mongos = st.s0; var admin = mongos.getDB("admin"); -var shards = mongos.getCollection("config.shards").find().toArray(); var coll = mongos.getCollection("foo.bar"); // @@ -18,7 +17,7 @@ var coll = mongos.getCollection("foo.bar"); // Tests with _id : 1 shard key assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok); -printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id})); +printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName})); assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok); st.printShardingStatus(); diff --git a/jstests/sharding/cursor_cleanup.js b/jstests/sharding/cursor_cleanup.js index 5d0ce46f532..741c7f48b3c 100644 --- a/jstests/sharding/cursor_cleanup.js +++ b/jstests/sharding/cursor_cleanup.js @@ -6,17 +6,15 @@ var st = new ShardingTest({shards: 2, mongos: 1}); var mongos = st.s0; var admin = mongos.getDB("admin"); -var config = mongos.getDB("config"); -var shards = config.shards.find().toArray(); var coll = mongos.getCollection("foo.bar"); var collUnsharded = mongos.getCollection("foo.baz"); // Shard collection printjson(admin.runCommand({enableSharding: coll.getDB() + ""})); -printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id})); +printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName})); printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); printjson(admin.runCommand({split: coll + "", middle: {_id: 0}})); -printjson(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id})); +printjson(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName})); jsTest.log("Collection set up..."); st.printShardingStatus(true); diff --git a/jstests/sharding/dump_coll_metadata.js b/jstests/sharding/dump_coll_metadata.js index eb60af37cb4..dbce60e1290 100644 --- a/jstests/sharding/dump_coll_metadata.js +++ b/jstests/sharding/dump_coll_metadata.js @@ -9,11 +9,10 @@ var mongos = st.s0; var coll = mongos.getCollection("foo.bar"); var admin = mongos.getDB("admin"); - var shards = mongos.getCollection("config.shards").find().toArray(); var shardAdmin = st.shard0.getDB("admin"); assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); - st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id); + st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""})); diff --git a/jstests/sharding/empty_doc_results.js b/jstests/sharding/empty_doc_results.js index 8f75d65eb7d..0ee44a76988 100644 --- a/jstests/sharding/empty_doc_results.js +++ b/jstests/sharding/empty_doc_results.js @@ -7,15 +7,14 @@ var mongos = st.s0; var coll = mongos.getCollection("foo.bar"); var admin = mongos.getDB("admin"); - var shards = mongos.getDB("config").shards.find().toArray(); assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()})); - printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: shards[0]._id})); + printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: st.shard0.shardName})); assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}})); assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}})); assert.commandWorked( - admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: shards[1]._id})); + admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName})); st.printShardingStatus(); diff --git a/jstests/sharding/exact_shard_key_target.js b/jstests/sharding/exact_shard_key_target.js index 885647ec96e..aef428fc8fc 100644 --- a/jstests/sharding/exact_shard_key_target.js +++ b/jstests/sharding/exact_shard_key_target.js @@ -9,14 +9,13 @@ var st = new ShardingTest({shards: 2, verbose: 4}); var mongos = st.s0; var coll = mongos.getCollection("foo.bar"); var admin = mongos.getDB("admin"); -var shards = mongos.getDB("config").shards.find().toArray(); assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()})); -printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: shards[0]._id})); +printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: st.shard0.shardName})); assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {"a.b": 1}})); assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {"a.b": 0}})); assert.commandWorked( - admin.runCommand({moveChunk: coll.getFullName(), find: {"a.b": 0}, to: shards[1]._id})); + admin.runCommand({moveChunk: coll.getFullName(), find: {"a.b": 0}, to: st.shard1.shardName})); st.printShardingStatus(); diff --git a/jstests/sharding/geo_shardedgeonear.js b/jstests/sharding/geo_shardedgeonear.js index 123b4b174cc..9a6a5480f61 100644 --- a/jstests/sharding/geo_shardedgeonear.js +++ b/jstests/sharding/geo_shardedgeonear.js @@ -2,16 +2,13 @@ var coll = 'points'; -function test(db, sharded, indexType) { +function test(st, db, sharded, indexType) { printjson(db); db[coll].drop(); if (sharded) { - var shards = []; + var shards = [st.shard0, st.shard1, st.shard2]; var config = shardedDB.getSiblingDB("config"); - config.shards.find().forEach(function(shard) { - shards.push(shard._id); - }); shardedDB.adminCommand({shardCollection: shardedDB[coll].getFullName(), key: {rand: 1}}); for (var i = 1; i < 10; i++) { @@ -20,7 +17,7 @@ function test(db, sharded, indexType) { shardedDB.adminCommand({ moveChunk: shardedDB[coll].getFullName(), find: {rand: i / 10}, - to: shards[i % shards.length] + to: shards[i % shards.length].shardName }); } @@ -50,5 +47,5 @@ var shardedDB = sharded.getDB('test'); sharded.ensurePrimaryShard('test', 'shard0001'); printjson(shardedDB); -test(shardedDB, true, '2dsphere'); +test(sharded, shardedDB, true, '2dsphere'); sharded.stop(); diff --git a/jstests/sharding/large_skip_one_shard.js b/jstests/sharding/large_skip_one_shard.js index 99c73eb99b3..e1f717a5f5a 100644 --- a/jstests/sharding/large_skip_one_shard.js +++ b/jstests/sharding/large_skip_one_shard.js @@ -4,7 +4,6 @@ var st = new ShardingTest({shards: 2, mongos: 1}); var mongos = st.s0; -var shards = mongos.getDB("config").shards.find().toArray(); var admin = mongos.getDB("admin"); var collSharded = mongos.getCollection("testdb.collSharded"); @@ -12,10 +11,10 @@ var collUnSharded = mongos.getCollection("testdb.collUnSharded"); // Set up a sharded and unsharded collection assert(admin.runCommand({enableSharding: collSharded.getDB() + ""}).ok); -printjson(admin.runCommand({movePrimary: collSharded.getDB() + "", to: shards[0]._id})); +printjson(admin.runCommand({movePrimary: collSharded.getDB() + "", to: st.shard0.shardName})); assert(admin.runCommand({shardCollection: collSharded + "", key: {_id: 1}}).ok); assert(admin.runCommand({split: collSharded + "", middle: {_id: 0}}).ok); -assert(admin.runCommand({moveChunk: collSharded + "", find: {_id: 0}, to: shards[1]._id}).ok); +assert(admin.runCommand({moveChunk: collSharded + "", find: {_id: 0}, to: st.shard1.shardName}).ok); function testSelectWithSkip(coll) { for (var i = -100; i < 100; i++) { diff --git a/jstests/sharding/merge_chunks_test_with_md_ops.js b/jstests/sharding/merge_chunks_test_with_md_ops.js index 591413a109c..63b2504521f 100644 --- a/jstests/sharding/merge_chunks_test_with_md_ops.js +++ b/jstests/sharding/merge_chunks_test_with_md_ops.js @@ -7,11 +7,10 @@ var mongos = st.s0; var admin = mongos.getDB("admin"); - var shards = mongos.getCollection("config.shards").find().toArray(); var coll = mongos.getCollection("foo.bar"); assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); - st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id); + st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); st.printShardingStatus(); @@ -30,7 +29,7 @@ jsTest.log("Moving to another shard..."); assert.commandWorked( - admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id})); + admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName})); // Split and merge the chunk repeatedly jsTest.log("Splitting and merging repeatedly (again)..."); @@ -46,7 +45,7 @@ jsTest.log("Moving to original shard..."); assert.commandWorked( - admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[0]._id})); + admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard0.shardName})); st.printShardingStatus(); diff --git a/jstests/sharding/migrate_overwrite_id.js b/jstests/sharding/migrate_overwrite_id.js index 1d5bc2f3236..8060a2de8b4 100644 --- a/jstests/sharding/migrate_overwrite_id.js +++ b/jstests/sharding/migrate_overwrite_id.js @@ -6,17 +6,14 @@ var st = new ShardingTest({shards: 2, mongos: 1}); st.stopBalancer(); var mongos = st.s0; -var shards = mongos.getDB("config").shards.find().toArray(); -shards[0].conn = st.shard0; -shards[1].conn = st.shard1; var admin = mongos.getDB("admin"); var coll = mongos.getCollection("foo.bar"); assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok); -printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id})); +printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName})); assert(admin.runCommand({shardCollection: coll + "", key: {skey: 1}}).ok); assert(admin.runCommand({split: coll + "", middle: {skey: 0}}).ok); -assert(admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: shards[1]._id}).ok); +assert(admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: st.shard1.shardName}).ok); var id = 12345; @@ -25,17 +22,17 @@ jsTest.log("Inserting a document with id : 12345 into both shards with diff shar assert.writeOK(coll.insert({_id: id, skey: -1})); assert.writeOK(coll.insert({_id: id, skey: 1})); -printjson(shards[0].conn.getCollection(coll + "").find({_id: id}).toArray()); -printjson(shards[1].conn.getCollection(coll + "").find({_id: id}).toArray()); +printjson(st.shard0.getCollection(coll + "").find({_id: id}).toArray()); +printjson(st.shard1.getCollection(coll + "").find({_id: id}).toArray()); assert.eq(2, coll.find({_id: id}).itcount()); jsTest.log("Moving both chunks to same shard..."); -var result = admin.runCommand({moveChunk: coll + "", find: {skey: -1}, to: shards[1]._id}); +var result = admin.runCommand({moveChunk: coll + "", find: {skey: -1}, to: st.shard1.shardName}); printjson(result); -printjson(shards[0].conn.getCollection(coll + "").find({_id: id}).toArray()); -printjson(shards[1].conn.getCollection(coll + "").find({_id: id}).toArray()); +printjson(st.shard0.getCollection(coll + "").find({_id: id}).toArray()); +printjson(st.shard1.getCollection(coll + "").find({_id: id}).toArray()); assert.eq(2, coll.find({_id: id}).itcount()); st.stop(); diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js index 55dbca8b5fa..a61e2efd7e6 100644 --- a/jstests/sharding/migration_sets_fromMigrate_flag.js +++ b/jstests/sharding/migration_sets_fromMigrate_flag.js @@ -28,12 +28,10 @@ load('./jstests/libs/chunk_manipulation_util.js'); var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 3}}); st.stopBalancer(); - var mongos = st.s0, admin = mongos.getDB('admin'), - shards = mongos.getCollection('config.shards').find().toArray(), dbName = "testDB", - ns = dbName + ".foo", coll = mongos.getCollection(ns), donor = st.shard0, - recipient = st.shard1, donorColl = donor.getCollection(ns), - recipientColl = recipient.getCollection(ns), donorLocal = donor.getDB('local'), - recipientLocal = recipient.getDB('local'); + var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns = dbName + ".foo", + coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1, + donorColl = donor.getCollection(ns), recipientColl = recipient.getCollection(ns), + donorLocal = donor.getDB('local'), recipientLocal = recipient.getDB('local'); // Two chunks // Donor: [0, 2) [2, 5) @@ -41,7 +39,7 @@ load('./jstests/libs/chunk_manipulation_util.js'); jsTest.log('Enable sharding of the collection and pre-split into two chunks....'); assert.commandWorked(admin.runCommand({enableSharding: dbName})); - st.ensurePrimaryShard(dbName, shards[0]._id); + st.ensurePrimaryShard(dbName, st.shard0.shardName); assert.commandWorked(donorColl.createIndex({_id: 1})); assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}})); assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}})); @@ -76,7 +74,7 @@ load('./jstests/libs/chunk_manipulation_util.js'); jsTest.log('Starting chunk migration, pause after cloning...'); var joinMoveChunk = moveChunkParallel( - staticMongod, st.s0.host, {_id: 2}, null, coll.getFullName(), shards[1]._id); + staticMongod, st.s0.host, {_id: 2}, null, coll.getFullName(), st.shard1.shardName); /** * Wait for recipient to finish cloning. diff --git a/jstests/sharding/migration_with_source_ops.js b/jstests/sharding/migration_with_source_ops.js index 31b6fff75e9..b837191c4ee 100644 --- a/jstests/sharding/migration_with_source_ops.js +++ b/jstests/sharding/migration_with_source_ops.js @@ -29,11 +29,9 @@ load('./jstests/libs/chunk_manipulation_util.js'); var st = new ShardingTest({shards: 2, mongos: 1}); st.stopBalancer(); - var mongos = st.s0, admin = mongos.getDB('admin'), - shards = mongos.getCollection('config.shards').find().toArray(), dbName = "testDB", - ns = dbName + ".foo", coll = mongos.getCollection(ns), donor = st.shard0, - recipient = st.shard1, donorColl = donor.getCollection(ns), - recipientColl = recipient.getCollection(ns); + var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns = dbName + ".foo", + coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1, + donorColl = donor.getCollection(ns), recipientColl = recipient.getCollection(ns); /** * Exable sharding, and split collection into two chunks. @@ -44,7 +42,7 @@ load('./jstests/libs/chunk_manipulation_util.js'); // Recipient: jsTest.log('Enabling sharding of the collection and pre-splitting into two chunks....'); assert.commandWorked(admin.runCommand({enableSharding: dbName})); - st.ensurePrimaryShard(dbName, shards[0]._id); + st.ensurePrimaryShard(dbName, st.shard0.shardName); assert.commandWorked(admin.runCommand({shardCollection: ns, key: {a: 1}})); assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}})); @@ -84,7 +82,7 @@ load('./jstests/libs/chunk_manipulation_util.js'); // Recipient: [20, 40) jsTest.log('Starting migration, pause after cloning...'); var joinMoveChunk = moveChunkParallel( - staticMongod, st.s0.host, {a: 20}, null, coll.getFullName(), shards[1]._id); + staticMongod, st.s0.host, {a: 20}, null, coll.getFullName(), st.shard1.shardName); /** * Wait for recipient to finish cloning step. diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js index 0acb2cc5609..8bd7fca65c8 100644 --- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js +++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js @@ -29,7 +29,6 @@ admin.createUser({user: adminUser, pwd: password, roles: ["root"]}); admin.auth(adminUser, password); st.stopBalancer(); -var shards = mongos.getDB("config").shards.find().toArray(); assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true})); @@ -39,15 +38,16 @@ var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded"); // Create the unsharded database with shard0 primary assert.writeOK(collUnsharded.insert({some: "doc"})); assert.writeOK(collUnsharded.remove({})); -printjson(admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: shards[0]._id})); +printjson( + admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName})); // Create the sharded database with shard1 primary assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()})); -printjson(admin.runCommand({movePrimary: collSharded.getDB().toString(), to: shards[1]._id})); +printjson(admin.runCommand({movePrimary: collSharded.getDB().toString(), to: st.shard1.shardName})); assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}})); assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}})); -assert.commandWorked( - admin.runCommand({moveChunk: collSharded.toString(), find: {_id: -1}, to: shards[0]._id})); +assert.commandWorked(admin.runCommand( + {moveChunk: collSharded.toString(), find: {_id: -1}, to: st.shard0.shardName})); st.printShardingStatus(); var shardedDBUser = "shardedDBUser"; diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js index f811c9ad443..4c9ac91294d 100644 --- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js +++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js @@ -17,7 +17,6 @@ var mongos = st.s0; var admin = mongos.getDB("admin"); - var shards = mongos.getDB("config").shards.find().toArray(); assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true})); @@ -27,16 +26,18 @@ // Create the unsharded database assert.writeOK(collUnsharded.insert({some: "doc"})); assert.writeOK(collUnsharded.remove({})); - printjson(admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: shards[0]._id})); + printjson( + admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName})); // Create the sharded database assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()})); - printjson(admin.runCommand({movePrimary: collSharded.getDB().toString(), to: shards[0]._id})); + printjson( + admin.runCommand({movePrimary: collSharded.getDB().toString(), to: st.shard0.shardName})); assert.commandWorked( admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}})); assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}})); - assert.commandWorked( - admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: shards[1]._id})); + assert.commandWorked(admin.runCommand( + {moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName})); st.printShardingStatus(); diff --git a/jstests/sharding/move_primary_basic.js b/jstests/sharding/move_primary_basic.js index 288d4fb03e5..1fd75364f15 100644 --- a/jstests/sharding/move_primary_basic.js +++ b/jstests/sharding/move_primary_basic.js @@ -11,10 +11,8 @@ var kDbName = 'db'; - var shards = mongos.getCollection('config.shards').find().toArray(); - - var shard0 = shards[0]._id; - var shard1 = shards[1]._id; + var shard0 = st.shard0.shardName; + var shard1 = st.shard1.shardName; assert.commandWorked(mongos.adminCommand({enableSharding: kDbName})); st.ensurePrimaryShard(kDbName, shard0); diff --git a/jstests/sharding/moveprimary_ignore_sharded.js b/jstests/sharding/moveprimary_ignore_sharded.js index f73f50939cc..8bad709bd1d 100644 --- a/jstests/sharding/moveprimary_ignore_sharded.js +++ b/jstests/sharding/moveprimary_ignore_sharded.js @@ -37,21 +37,20 @@ printjson(adminA.runCommand({shardCollection: "bar.coll1", key: {_id: 1}})); printjson(adminA.runCommand({shardCollection: "bar.coll2", key: {_id: 1}})); // All collections are now on primary shard -var fooPrimaryShard = configA.databases.findOne({_id: "foo"}).primary; -var barPrimaryShard = configA.databases.findOne({_id: "bar"}).primary; +var fooPrimaryShardId = configA.databases.findOne({_id: "foo"}).primary; +var barPrimaryShardId = configA.databases.findOne({_id: "bar"}).primary; -var shards = configA.shards.find().toArray(); -var fooPrimaryShard = fooPrimaryShard == shards[0]._id ? shards[0] : shards[1]; -var fooOtherShard = fooPrimaryShard._id == shards[0]._id ? shards[1] : shards[0]; -var barPrimaryShard = barPrimaryShard == shards[0]._id ? shards[0] : shards[1]; -var barOtherShard = barPrimaryShard._id == shards[0]._id ? shards[1] : shards[0]; +var fooPrimaryShard = (fooPrimaryShardId == st.shard0.shardName) ? st.shard0 : st.shard1; +var fooOtherShard = (fooPrimaryShard.shardName == st.shard0.shardName) ? st.shard1 : st.shard0; +var barPrimaryShard = (barPrimaryShardId == st.shard0.shardName) ? st.shard0 : st.shard1; +var barOtherShard = (barPrimaryShard.shardName == st.shard0.shardName) ? st.shard1 : st.shard0; st.printShardingStatus(); jsTest.log("Running movePrimary for foo through mongosA ..."); // MongosA should already know about all the collection states -printjson(adminA.runCommand({movePrimary: "foo", to: fooOtherShard._id})); +printjson(adminA.runCommand({movePrimary: "foo", to: fooOtherShard.shardName})); if (st.configRS) { // If we are in CSRS mode need to make sure that mongosB will actually get the most recent @@ -78,11 +77,11 @@ function realCollectionCount(mydb) { } // All collections sane -assert.eq(2, realCollectionCount(new Mongo(fooPrimaryShard.host).getDB("foo"))); -assert.eq(1, realCollectionCount(new Mongo(fooOtherShard.host).getDB("foo"))); +assert.eq(2, realCollectionCount(fooPrimaryShard.getDB("foo"))); +assert.eq(1, realCollectionCount(fooOtherShard.getDB("foo"))); jsTest.log("Running movePrimary for bar through mongosB ..."); -printjson(adminB.runCommand({movePrimary: "bar", to: barOtherShard._id})); +printjson(adminB.runCommand({movePrimary: "bar", to: barOtherShard.shardName})); // We need to flush the cluster config on mongosA, so it can discover that database 'bar' got // moved. Otherwise since the collections are not sharded, we have no way of discovering this. @@ -104,7 +103,7 @@ assert.neq(null, mongosB.getCollection("bar.coll1").findOne()); assert.neq(null, mongosB.getCollection("bar.coll2").findOne()); // All collections sane -assert.eq(2, realCollectionCount(new Mongo(barPrimaryShard.host).getDB("bar"))); -assert.eq(1, realCollectionCount(new Mongo(barOtherShard.host).getDB("bar"))); +assert.eq(2, realCollectionCount(barPrimaryShard.getDB("bar"))); +assert.eq(1, realCollectionCount(barOtherShard.getDB("bar"))); st.stop(); diff --git a/jstests/sharding/pending_chunk.js b/jstests/sharding/pending_chunk.js index 96089b6d491..14e7c3ebf61 100644 --- a/jstests/sharding/pending_chunk.js +++ b/jstests/sharding/pending_chunk.js @@ -9,24 +9,22 @@ var mongos = st.s0; var admin = mongos.getDB('admin'); - var shards = mongos.getCollection('config.shards').find().toArray(); var coll = mongos.getCollection('foo.bar'); var ns = coll.getFullName(); var dbName = coll.getDB().getName(); - var shard0 = st.shard0, shard1 = st.shard1; assert.commandWorked(admin.runCommand({enableSharding: dbName})); - printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id})); + printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName})); assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}})); jsTest.log('Moving some chunks to shard1...'); assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}})); assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}})); - assert.commandWorked( - admin.runCommand({moveChunk: ns, find: {_id: 0}, to: shards[1]._id, _waitForDelete: true})); - assert.commandWorked( - admin.runCommand({moveChunk: ns, find: {_id: 1}, to: shards[1]._id, _waitForDelete: true})); + assert.commandWorked(admin.runCommand( + {moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true})); + assert.commandWorked(admin.runCommand( + {moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true})); function getMetadata(shard) { var admin = shard.getDB('admin'), @@ -36,24 +34,24 @@ return metadata; } - var metadata = getMetadata(shard1); + var metadata = getMetadata(st.shard1); assert.eq(metadata.pending[0][0]._id, 1); assert.eq(metadata.pending[0][1]._id, MaxKey); jsTest.log('Moving some chunks back to shard0 after empty...'); assert.commandWorked(admin.runCommand( - {moveChunk: ns, find: {_id: -1}, to: shards[1]._id, _waitForDelete: true})); + {moveChunk: ns, find: {_id: -1}, to: st.shard1.shardName, _waitForDelete: true})); - metadata = getMetadata(shard0); + metadata = getMetadata(st.shard0); assert.eq(metadata.shardVersion.t, 0); assert.neq(metadata.collVersion.t, 0); assert.eq(metadata.pending.length, 0); - assert.commandWorked( - admin.runCommand({moveChunk: ns, find: {_id: 1}, to: shards[0]._id, _waitForDelete: true})); + assert.commandWorked(admin.runCommand( + {moveChunk: ns, find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true})); - metadata = getMetadata(shard0); + metadata = getMetadata(st.shard0); assert.eq(metadata.shardVersion.t, 0); assert.neq(metadata.collVersion.t, 0); assert.eq(metadata.pending[0][0]._id, 1); @@ -65,7 +63,7 @@ assert.eq(null, coll.findOne({_id: 1})); - metadata = getMetadata(shard0); + metadata = getMetadata(st.shard0); assert.neq(metadata.shardVersion.t, 0); assert.neq(metadata.collVersion.t, 0); assert.eq(metadata.chunks[0][0]._id, 1); diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js index a13b133e3ef..71a7ef03090 100644 --- a/jstests/sharding/prefix_shard_key.js +++ b/jstests/sharding/prefix_shard_key.js @@ -14,9 +14,6 @@ var db = s.getDB("test"); var admin = s.getDB("admin"); var config = s.getDB("config"); - var shards = config.shards.find().toArray(); - var shard0 = new Mongo(shards[0].host); - var shard1 = new Mongo(shards[1].host); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); s.ensurePrimaryShard('test', 'shard0001'); @@ -127,13 +124,13 @@ } }); - assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count()); - assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().count()); + assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count()); + assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count()); assert.commandWorked(admin.runCommand({split: 'test.user', middle: {num: 70}})); - assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count()); - assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().count()); + assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count()); + assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count()); //******************Part 3******************** @@ -144,8 +141,9 @@ // setup new collection on shard0 var coll2 = db.foo2; coll2.drop(); - if (s.getPrimaryShardIdForDatabase(coll2.getDB()) != shards[0]._id) { - var moveRes = admin.runCommand({movePrimary: coll2.getDB() + "", to: shards[0]._id}); + if (s.getPrimaryShardIdForDatabase(coll2.getDB()) != s.shard0.shardName) { + var moveRes = + admin.runCommand({movePrimary: coll2.getDB() + "", to: s.shard0.shardName}); assert.eq(moveRes.ok, 1, "primary not moved correctly"); } @@ -178,7 +176,7 @@ // movechunk should move ALL docs since they have same value for skey moveRes = admin.runCommand( - {moveChunk: coll2 + "", find: {skey: 0}, to: shards[1]._id, _waitForDelete: true}); + {moveChunk: coll2 + "", find: {skey: 0}, to: s.shard1.shardName, _waitForDelete: true}); assert.eq(moveRes.ok, 1, "movechunk didn't work"); // Make sure our migration eventually goes through before testing individual shards @@ -188,8 +186,8 @@ }); // check no orphaned docs on the shards - assert.eq(0, shard0.getCollection(coll2 + "").find().itcount()); - assert.eq(25, shard1.getCollection(coll2 + "").find().itcount()); + assert.eq(0, s.shard0.getCollection(coll2 + "").find().itcount()); + assert.eq(25, s.shard1.getCollection(coll2 + "").find().itcount()); // and check total assert.eq(25, coll2.find().itcount(), "bad total number of docs after move"); diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js index 2a8ca1ad7d5..e55e0f6cab9 100644 --- a/jstests/sharding/regex_targeting.js +++ b/jstests/sharding/regex_targeting.js @@ -6,7 +6,6 @@ var mongos = st.s0; var admin = mongos.getDB("admin"); - var shards = mongos.getDB("config").shards.find().toArray(); // // Set up multiple collections to target with regex shard keys on two shards @@ -19,7 +18,7 @@ var collHashed = mongos.getCollection("foo.barHashed"); assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()})); - st.ensurePrimaryShard(coll.getDB().toString(), shards[0]._id); + st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName); // // Split the collection so that "abcde-0" and "abcde-1" go on different shards when possible @@ -30,7 +29,7 @@ assert.commandWorked(admin.runCommand({ moveChunk: collSharded.toString(), find: {a: 0}, - to: shards[1]._id, + to: st.shard1.shardName, _waitForDelete: true })); @@ -41,7 +40,7 @@ assert.commandWorked(admin.runCommand({ moveChunk: collCompound.toString(), find: {a: 0, b: 0}, - to: shards[1]._id, + to: st.shard1.shardName, _waitForDelete: true })); @@ -52,7 +51,7 @@ assert.commandWorked(admin.runCommand({ moveChunk: collNested.toString(), find: {a: {b: 0}}, - to: shards[1]._id, + to: st.shard1.shardName, _waitForDelete: true })); diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js index a8eca975283..6d1c215127f 100644 --- a/jstests/sharding/return_partial_shards_down.js +++ b/jstests/sharding/return_partial_shards_down.js @@ -8,19 +8,13 @@ var st = new ShardingTest({shards: 3, mongos: 1, other: {mongosOptions: {verbose st.stopBalancer(); var mongos = st.s; -var config = mongos.getDB("config"); var admin = mongos.getDB("admin"); -var shards = config.shards.find().toArray(); - -for (var i = 0; i < shards.length; i++) { - shards[i].conn = new Mongo(shards[i].host); -} var collOneShard = mongos.getCollection("foo.collOneShard"); var collAllShards = mongos.getCollection("foo.collAllShards"); printjson(admin.runCommand({enableSharding: collOneShard.getDB() + ""})); -printjson(admin.runCommand({movePrimary: collOneShard.getDB() + "", to: shards[0]._id})); +printjson(admin.runCommand({movePrimary: collOneShard.getDB() + "", to: st.shard0.shardName})); printjson(admin.runCommand({shardCollection: collOneShard + "", key: {_id: 1}})); printjson(admin.runCommand({shardCollection: collAllShards + "", key: {_id: 1}})); @@ -29,8 +23,10 @@ printjson(admin.runCommand({shardCollection: collAllShards + "", key: {_id: 1}}) printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 0}})); printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 1000}})); -printjson(admin.runCommand({moveChunk: collAllShards + "", find: {_id: 0}, to: shards[1]._id})); -printjson(admin.runCommand({moveChunk: collAllShards + "", find: {_id: 1000}, to: shards[2]._id})); +printjson( + admin.runCommand({moveChunk: collAllShards + "", find: {_id: 0}, to: st.shard1.shardName})); +printjson( + admin.runCommand({moveChunk: collAllShards + "", find: {_id: 1000}, to: st.shard2.shardName})); // Collections are now distributed correctly jsTest.log("Collections now distributed correctly."); diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js index 523f5de1a0c..b8baba5f5b2 100644 --- a/jstests/sharding/test_stacked_migration_cleanup.js +++ b/jstests/sharding/test_stacked_migration_cleanup.js @@ -8,12 +8,11 @@ var mongos = st.s; var admin = mongos.getDB("admin"); - var shards = mongos.getDB("config").shards.find().toArray(); var coll = mongos.getCollection("foo.bar"); // Enable sharding of the collection assert.commandWorked(mongos.adminCommand({enablesharding: coll.getDB() + ""})); - st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id); + st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName); assert.commandWorked(mongos.adminCommand({shardcollection: coll + "", key: {_id: 1}})); var numChunks = 30; @@ -43,7 +42,7 @@ // Move a bunch of chunks, but don't close the cursor so they stack. for (var i = 0; i < numChunks; i++) { assert.commandWorked( - mongos.adminCommand({moveChunk: coll + "", find: {_id: i}, to: shards[1]._id})); + mongos.adminCommand({moveChunk: coll + "", find: {_id: i}, to: st.shard1.shardName})); } jsTest.log("Dropping and re-creating collection..."); diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js index 7e5eaf83cc6..b09003617d4 100644 --- a/jstests/sharding/trace_missing_docs_test.js +++ b/jstests/sharding/trace_missing_docs_test.js @@ -16,10 +16,9 @@ load('jstests/libs/trace_missing_docs.js'); var mongos = st.s0; var coll = mongos.getCollection("foo.bar"); var admin = mongos.getDB("admin"); - var shards = mongos.getCollection("config.shards").find().toArray(); assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); - st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id); + st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName); coll.ensureIndex({sk: 1}); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {sk: 1}})); @@ -29,7 +28,7 @@ load('jstests/libs/trace_missing_docs.js'); assert.writeOK(coll.update({sk: 67890}, {$set: {baz: 'boz'}})); assert.commandWorked(admin.runCommand( - {moveChunk: coll + "", find: {sk: 0}, to: shards[1]._id, _waitForDelete: true})); + {moveChunk: coll + "", find: {sk: 0}, to: st.shard1.shardName, _waitForDelete: true})); st.printShardingStatus(); diff --git a/jstests/sharding/upsert_sharded.js b/jstests/sharding/upsert_sharded.js index c8398142768..9ee8f72d1bc 100644 --- a/jstests/sharding/upsert_sharded.js +++ b/jstests/sharding/upsert_sharded.js @@ -9,7 +9,6 @@ var mongos = st.s0; var admin = mongos.getDB("admin"); - var shards = mongos.getCollection("config.shards").find().toArray(); var coll = mongos.getCollection("foo.bar"); assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok); @@ -33,11 +32,11 @@ return upsertedField(query, expr, "x"); }; - st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id); + st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1}})); assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0}})); assert.commandWorked(admin.runCommand( - {moveChunk: coll + "", find: {x: 0}, to: shards[1]._id, _waitForDelete: true})); + {moveChunk: coll + "", find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true})); st.printShardingStatus(); @@ -70,11 +69,11 @@ coll.drop(); - st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id); + st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {'x.x': 1}})); assert.commandWorked(admin.runCommand({split: coll + "", middle: {'x.x': 0}})); assert.commandWorked(admin.runCommand( - {moveChunk: coll + "", find: {'x.x': 0}, to: shards[1]._id, _waitForDelete: true})); + {moveChunk: coll + "", find: {'x.x': 0}, to: st.shard1.shardName, _waitForDelete: true})); st.printShardingStatus(); |