summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/multiVersion/upgrade_cluster_v5_to_v6.js11
-rw-r--r--jstests/sharding/balance_repl.js (renamed from jstests/noPassthroughWithMongod/balance_repl.js)8
-rw-r--r--jstests/sharding/bulk_shard_insert.js (renamed from jstests/noPassthroughWithMongod/bulk_shard_insert.js)18
-rw-r--r--jstests/sharding/count_slaveok.js25
-rw-r--r--jstests/sharding/features2.js4
-rw-r--r--jstests/sharding/features3.js8
-rw-r--r--jstests/sharding/geo_near_random1.js14
-rw-r--r--jstests/sharding/geo_near_random2.js14
-rw-r--r--jstests/sharding/group_slaveok.js18
-rw-r--r--jstests/sharding/jumbo1.js2
-rw-r--r--jstests/sharding/listshards.js109
-rw-r--r--jstests/sharding/migrateBig.js2
-rw-r--r--jstests/sharding/migrateBig_balancer.js (renamed from jstests/noPassthroughWithMongod/sharding_migrateBigObject.js)11
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js9
-rw-r--r--jstests/sharding/mongos_no_replica_set_refresh.js30
-rw-r--r--jstests/sharding/mrShardedOutputAuth.js14
-rw-r--r--jstests/sharding/multi_coll_drop.js17
-rw-r--r--jstests/sharding/multi_mongos2.js6
-rw-r--r--jstests/sharding/names.js10
-rw-r--r--jstests/sharding/noUpdateButN1inAnotherCollection.js2
-rw-r--r--jstests/sharding/parallel.js3
-rw-r--r--jstests/sharding/read_does_not_create_namespaces.js8
-rw-r--r--jstests/sharding/recovering_slaveok.js2
-rw-r--r--jstests/sharding/shard3.js12
-rw-r--r--jstests/sharding/shard_targeting.js4
-rw-r--r--jstests/sharding/tag_auto_split.js10
-rw-r--r--src/mongo/shell/shardingtest.js1276
27 files changed, 826 insertions, 821 deletions
diff --git a/jstests/multiVersion/upgrade_cluster_v5_to_v6.js b/jstests/multiVersion/upgrade_cluster_v5_to_v6.js
index 239c58bfbfb..aea6bda3e6b 100644
--- a/jstests/multiVersion/upgrade_cluster_v5_to_v6.js
+++ b/jstests/multiVersion/upgrade_cluster_v5_to_v6.js
@@ -2,8 +2,10 @@
* Tests upgrading a cluster which has 3.0 mongos.
*/
-load( './jstests/multiVersion/libs/multi_rs.js' )
-load( './jstests/multiVersion/libs/multi_cluster.js' )
+load('./jstests/multiVersion/libs/multi_rs.js');
+load('./jstests/multiVersion/libs/multi_cluster.js');
+
+(function() {
/**
* @param isRSCluster {bool} use replica set shards.
@@ -29,9 +31,6 @@ var options = {
var st = new ShardingTest({ shards : 2, mongos : 2, other : options });
-// Just stop balancer, to simulate race conds
-st.setBalancer(false);
-
var shards = st.s0.getDB("config").shards.find().toArray();
var configConnStr = st._configDB;
@@ -124,3 +123,5 @@ st.stop();
runTest(false);
runTest(true);
+
+})();
diff --git a/jstests/noPassthroughWithMongod/balance_repl.js b/jstests/sharding/balance_repl.js
index f54b391a7e3..16ae418eb0e 100644
--- a/jstests/noPassthroughWithMongod/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -1,9 +1,8 @@
(function() {
"use strict";
-var otherOptions = { rs: true , numReplicas: 2 , chunkSize: 1 , nopreallocj: true };
-var s = new ShardingTest({ shards: 2, verbose: 1, other: otherOptions });
-assert.writeOK(s.config.settings.update({ _id: "balancer" },
- { $set: { stopped: true }}, true ));
+
+var s = new ShardingTest({ shards: 2,
+ other: { rs: true , numReplicas: 2 , chunkSize: 1 } });
var db = s.getDB( "test" );
var bulk = db.foo.initializeUnorderedBulkOp();
@@ -42,4 +41,3 @@ for ( i=0; i<20; i++ ) {
s.stop();
}());
-
diff --git a/jstests/noPassthroughWithMongod/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 4ce7f555f36..a349d770a2f 100644
--- a/jstests/noPassthroughWithMongod/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -1,4 +1,5 @@
// Test bulk inserts with sharding
+(function() {
// Setup randomized test
var seed = new Date().getTime()
@@ -7,12 +8,8 @@ var seed = new Date().getTime()
Random.srand( seed )
print( "Seeded with " + seed )
-
var st = new ShardingTest({ name : jsTestName(), shards : 4, chunkSize: 1 })
-// Turn off balancer initially
-st.setBalancer( false )
-
// Setup sharded collection
var mongos = st.s0
var db = mongos.getDB( jsTestName() )
@@ -37,8 +34,7 @@ print( "\n\n\nDocument size is " + Object.bsonsize({ x : data }) )
var docsInserted = 0;
var balancerOn = false;
-while( docsInserted < numDocs ){
-
+while (docsInserted < numDocs) {
var currBulkSize = ( numDocs - docsInserted > bulkSize ) ? bulkSize : ( numDocs - docsInserted )
var bulk = []
@@ -57,7 +53,7 @@ while( docsInserted < numDocs ){
if( docsInserted > numDocs / 2 && ! balancerOn ){
print( "Turning on balancer after half documents inserted." )
- st.setBalancer( true )
+ st.startBalancer();
balancerOn = true;
}
}
@@ -68,10 +64,9 @@ st.printShardingStatus()
var count = coll.find().count()
var itcount = count //coll.find().itcount()
-print( "Inserted " + docsInserted + " count : " + count + " itcount : " + itcount )
+print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
-st.setBalancer( true )
-sleep( 10000 )
+st.startBalancer();
var count = coll.find().count()
var itcount = coll.find().itcount()
@@ -81,5 +76,6 @@ print( "Inserted " + docsInserted + " count : " + count + " itcount : " + itcoun
// SERVER-3645
// assert.eq( docsInserted, count )
-assert.eq( docsInserted, itcount )
+assert.eq(docsInserted, itcount);
+})();
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index c39162d4da6..470174890c4 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -1,16 +1,14 @@
-/* Tests count and distinct using slaveOk. Also tests a scenario querying a set
- * where only one secondary is up.
- */
+// Tests count and distinct using slaveOk. Also tests a scenario querying a set where only one
+// secondary is up.
+(function() {
-var st = new ShardingTest( testName = "countSlaveOk",
- numShards = 1,
- verboseLevel = 0,
- numMongos = 1,
- { rs : true,
- rs0 : { nodes : 2 }
- })
+var st = new ShardingTest({ name: "countSlaveOk",
+ shards: 1,
+ mongos: 1,
+ other: { rs : true,
+ rs0 : { nodes : 2 } } });
-var rst = st._rs[0].test
+var rst = st._rs[0].test;
// Insert data into replica set
var conn = new Mongo( st.s.host )
@@ -70,5 +68,6 @@ catch( e ){
print( "Non-slaveOk'd connection failed." )
}
-// Finish
-st.stop()
+st.stop();
+
+})();
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index 80a06ae6ba9..bf31e4448b2 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -23,7 +23,7 @@ assert( a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0 , "di
assert.eq( 1 , s.onNumShards( "foo" ) , "A1" );
-s.shardGo( "foo" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
+s.shardColl( "foo" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
assert.eq( 2 , s.onNumShards( "foo" ) , "A2" );
@@ -131,7 +131,7 @@ doMR = function( n ){
doMR( "before" );
assert.eq( 1 , s.onNumShards( "mr" ) , "E1" );
-s.shardGo( "mr" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
+s.shardColl( "mr" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
assert.eq( 2 , s.onNumShards( "mr" ) , "E1" );
doMR( "after" );
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index c1fec85d988..04f54655afd 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -9,13 +9,11 @@
var s = new ShardingTest({shards: 2,
mongos: 1,
verbose:1});
+
var db = s.getDB("test"); // db variable name is required due to startParallelShell()
var numDocs = 10000;
db.foo.drop();
-// stop the balancer
-s.stopBalancer()
-
// shard test.foo and add a split point
s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
@@ -27,7 +25,7 @@ s.adminCommand({moveChunk: "test.foo", find: {_id: 3},
to: s.getNonPrimaries("test")[0], _waitForDelete: true});
// restart balancer
-s.setBalancer(true)
+s.startBalancer();
// insert 10k small documents into the sharded collection
var bulk = db.foo.initializeUnorderedBulkOp();
@@ -150,4 +148,4 @@ if ( x.all.shard0000 > 0 ) {
x = db._adminCommand({"fsync" :1, lock:true});
assert(!x.ok, "lock should fail: " + tojson(x));
-s.stop()
+s.stop();
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
index c899ff8b776..ea34d428a4d 100644
--- a/jstests/sharding/geo_near_random1.js
+++ b/jstests/sharding/geo_near_random1.js
@@ -1,10 +1,10 @@
-// this tests all points using $near
+// This tests all points using $near
+(function() {
+
load("jstests/libs/geo_near_random.js");
var testName = "geo_near_random1";
-var s = new ShardingTest( testName , 3 );
-
-s.stopBalancer()
+var s = new ShardingTest({ name: testName, shards: 3 });
db = s.getDB("test"); // global db
@@ -30,7 +30,7 @@ for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
}
// Turn balancer back on, for actual tests
-// s.setBalancer( true ) // SERVER-13365
+// s.startBalancer() // SERVER-13365
printShardingSizes()
@@ -41,4 +41,6 @@ test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
index 1fd139017a6..92652292272 100644
--- a/jstests/sharding/geo_near_random2.js
+++ b/jstests/sharding/geo_near_random2.js
@@ -1,10 +1,10 @@
-// this tests 1% of all points using $near and $nearSphere
+// This tests 1% of all points using $near and $nearSphere
+(function() {
+
load("jstests/libs/geo_near_random.js");
var testName = "geo_near_random2";
-var s = new ShardingTest( testName , 3 );
-
-s.stopBalancer();
+var s = new ShardingTest({ name: testName, shards: 3 });
db = s.getDB("test"); // global db
@@ -29,7 +29,7 @@ for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
}
//Turn balancer back on, for actual tests
-// s.setBalancer( true ); // SERVER-13365
+// s.startBalancer(); // SERVER-13365
printShardingSizes()
@@ -47,4 +47,6 @@ test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
index 461daeb2973..a5c20f51ea5 100644
--- a/jstests/sharding/group_slaveok.js
+++ b/jstests/sharding/group_slaveok.js
@@ -1,12 +1,11 @@
// Tests group using slaveOk
+(function() {
-var st = new ShardingTest( testName = "groupSlaveOk",
- numShards = 1,
- verboseLevel = 0,
- numMongos = 1,
- { rs : true,
- rs0 : { nodes : 2 }
- })
+var st = new ShardingTest({ name: "groupSlaveOk",
+ shards: 1,
+ mongos: 1,
+ other :{ rs : true,
+ rs0 : { nodes : 2 } } });
var rst = st._rs[0].test
@@ -61,5 +60,6 @@ catch( e ){
print( "Non-slaveOk'd connection failed." + tojson(e) )
}
-// Finish
-st.stop()
+st.stop();
+
+})();
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index b55c41bbeb9..02f18530533 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -11,8 +11,6 @@ s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
db = s.getDB( "test" );
-sh.setBalancerState( false )
-
big = ""
while ( big.length < 10000 )
big += "."
diff --git a/jstests/sharding/listshards.js b/jstests/sharding/listshards.js
index 035837a752b..b4c87eda7ab 100644
--- a/jstests/sharding/listshards.js
+++ b/jstests/sharding/listshards.js
@@ -2,66 +2,69 @@
// Test the listShards command by adding stand-alone and replica-set shards to a cluster
//
(function() {
- 'use strict';
+'use strict';
- function checkShardName(shardName, shardsArray) {
- var found = false;
- shardsArray.forEach(function(shardObj) {
- if (shardObj._id === shardName) {
- found = true;
- return;
- }
- });
- return found;
- }
+function checkShardName(shardName, shardsArray) {
+ var found = false;
+ shardsArray.forEach(function(shardObj) {
+ if (shardObj._id === shardName) {
+ found = true;
+ return;
+ }
+ });
+ return found;
+}
- var shardTest = new ShardingTest('listShardsTest', 1, 0, 1, { useHostname: true });
+var shardTest = new ShardingTest({ name: 'listShardsTest',
+ shards: 1,
+ mongos: 1,
+ other: { useHostname: true } });
- var mongos = shardTest.s0;
- var res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- var shardsArray = res.shards;
- assert.eq(shardsArray.length, 1);
+var mongos = shardTest.s0;
+var res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+var shardsArray = res.shards;
+assert.eq(shardsArray.length, 1);
- // add standalone mongod
- var standaloneShard = MongoRunner.runMongod({useHostName: true});
- res = shardTest.admin.runCommand({ addShard: standaloneShard.host, name: 'standalone' });
- assert.commandWorked(res, 'addShard command failed');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 2);
- assert(checkShardName('standalone', shardsArray),
- 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
+// add standalone mongod
+var standaloneShard = MongoRunner.runMongod({useHostName: true});
+res = shardTest.admin.runCommand({ addShard: standaloneShard.host, name: 'standalone' });
+assert.commandWorked(res, 'addShard command failed');
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 2);
+assert(checkShardName('standalone', shardsArray),
+ 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
- // add replica set named 'repl'
- var rs1 = new ReplSetTest({ name: 'repl', nodes: 1, useHostName: true});
- rs1.startSet();
- rs1.initiate();
- res = shardTest.admin.runCommand({ addShard: rs1.getURL()});
- assert.commandWorked(res, 'addShard command failed');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 3);
- assert(checkShardName('repl', shardsArray),
- 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
+// add replica set named 'repl'
+var rs1 = new ReplSetTest({ name: 'repl', nodes: 1, useHostName: true});
+rs1.startSet();
+rs1.initiate();
+res = shardTest.admin.runCommand({ addShard: rs1.getURL()});
+assert.commandWorked(res, 'addShard command failed');
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 3);
+assert(checkShardName('repl', shardsArray),
+ 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
- // remove 'repl' shard
- assert.soon(function() {
- var res = shardTest.admin.runCommand({ removeShard: 'repl' });
- assert.commandWorked(res, 'removeShard command failed');
- return res.state === 'completed';
- }, 'failed to remove the replica set shard');
+// remove 'repl' shard
+assert.soon(function() {
+ var res = shardTest.admin.runCommand({ removeShard: 'repl' });
+ assert.commandWorked(res, 'removeShard command failed');
+ return res.state === 'completed';
+}, 'failed to remove the replica set shard');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 2);
- assert(!checkShardName('repl', shardsArray),
- 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 2);
+assert(!checkShardName('repl', shardsArray),
+ 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
- rs1.stopSet();
- shardTest.stop();
+rs1.stopSet();
+shardTest.stop();
})();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index bd3f4e9de0b..534c8476565 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -2,7 +2,6 @@
var s = new ShardingTest({ name: "migrateBig",
shards: 2,
- mongos: 1,
other: { chunkSize: 1 } });
s.config.settings.update( { _id: "balancer" }, { $set : { _waitForDelete : true } } , true);
@@ -58,7 +57,6 @@ for ( i=0; i<20; i+= 2 ) {
db.printShardingStatus()
-
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false } } , true );
assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 )
diff --git a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js b/jstests/sharding/migrateBig_balancer.js
index cfa91a6ec77..a46614a3699 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -1,4 +1,8 @@
-var st = new ShardingTest({numShards: 2, nopreallocj: "", enableBalancer: true});
+(function() {
+
+var st = new ShardingTest({ name: 'migrateBig_balancer',
+ shards: 2,
+ other: { enableBalancer: true } });
var mongos = st.s;
var admin = mongos.getDB("admin");
@@ -34,8 +38,7 @@ admin.runCommand({ shardcollection : "" + coll, key : { _id : 1 } })
assert.lt( 5 , mongos.getDB( "config" ).chunks.find( { ns : "test.stuff" } ).count() , "not enough chunks" );
assert.soon(
- function(){
-
+ function() {
// On *extremely* slow or variable systems, we've seen migrations fail in the critical section and
// kill the server. Do an explicit check for this. SERVER-8781
// TODO: Remove once we can better specify what systems to run what tests on.
@@ -61,3 +64,5 @@ assert.soon(
"never migrated" , 10 * 60 * 1000 , 1000 );
st.stop();
+
+})();
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index 1c001a6a76b..a0423381fc9 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -1,6 +1,9 @@
// Tests whether new sharding is detected on insert by mongos
+(function() {
-var st = new ShardingTest( name = "test", shards = 1, verbose = 2, mongos = 2 );
+var st = new ShardingTest({ name: "mongos_no_detect_sharding",
+ shards: 1,
+ mongos: 2 });
var mongos = st.s
var config = mongos.getDB("config")
@@ -40,4 +43,6 @@ config.printShardingStatus( true )
assert.eq( coll.getShardVersion().ok, 1 )
assert.eq( 101, coll.find().itcount() )
-st.stop()
+st.stop();
+
+})();
diff --git a/jstests/sharding/mongos_no_replica_set_refresh.js b/jstests/sharding/mongos_no_replica_set_refresh.js
index 6dbb54b44b2..fb853189cf0 100644
--- a/jstests/sharding/mongos_no_replica_set_refresh.js
+++ b/jstests/sharding/mongos_no_replica_set_refresh.js
@@ -1,21 +1,20 @@
// Tests whether new sharding is detected on insert by mongos
load("jstests/replsets/rslib.js");
+
(function () {
-var st = new ShardingTest(
- name = "test",
- shards = 1,
- verbose = 2,
- mongos = 2,
- other = {
- rs0: {
- nodes: [
- {rsConfig: {priority: 10}},
- {},
- {},
- ],
- },
- }
-);
+
+var st = new ShardingTest({ name: 'mongos_no_replica_set_refresh',
+ shards: 1,
+ mongos: 2,
+ other: {
+ rs0: {
+ nodes: [
+ {rsConfig: {priority: 10}},
+ {},
+ {},
+ ],
+ }
+ } });
var rsObj = st._rs[0].test;
assert.commandWorked(
@@ -94,4 +93,5 @@ assert.soon( function(){ return configServerURL().indexOf( removedNode.host ) >=
jsTestLog( "Done..." );
st.stop();
+
}());
diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js
index cf1bf612085..c8ea6d490ad 100644
--- a/jstests/sharding/mrShardedOutputAuth.js
+++ b/jstests/sharding/mrShardedOutputAuth.js
@@ -4,6 +4,8 @@
* from a separate input database while authenticated to both.
*/
+(function() {
+
function doMapReduce(connection, outputDb) {
// clean output db and run m/r
outputDb.numbers_out.drop();
@@ -43,12 +45,10 @@ function assertFailure(configDb, outputDb) {
}
-var st = new ShardingTest( testName = "mrShardedOutputAuth",
- numShards = 1,
- verboseLevel = 0,
- numMongos = 1,
- { extraOptions : {"keyFile" : "jstests/libs/key1"} }
- );
+var st = new ShardingTest({ name: "mrShardedOutputAuth",
+ shards: 1,
+ mongos: 1,
+ other: { extraOptions : {"keyFile" : "jstests/libs/key1"} } });
// Setup the users to the input, output and admin databases
var mongos = st.s;
@@ -94,3 +94,5 @@ doMapReduce(outputAuthConn, outputDb);
assertFailure(configDb, outputDb);
st.stop();
+
+})();
diff --git a/jstests/sharding/multi_coll_drop.js b/jstests/sharding/multi_coll_drop.js
index fc285f6a969..feb3cd41e60 100644
--- a/jstests/sharding/multi_coll_drop.js
+++ b/jstests/sharding/multi_coll_drop.js
@@ -1,14 +1,15 @@
// Tests the dropping and re-adding of a collection
+(function() {
-var st = new ShardingTest( name = "multidrop", shards = 1, verbose = 0, mongos = 2 )
+var st = new ShardingTest({ name: "multidrop", shards: 1, mongos: 2 });
-var mA = st.s0
-var mB = st.s1
+var mA = st.s0;
+var mB = st.s1;
-var coll = mA.getCollection( name + ".coll" )
-var collB = mB.getCollection( coll + "" )
+var coll = mA.getCollection('multidrop.coll');
+var collB = mB.getCollection('multidrop.coll');
-jsTestLog( "Shard and split collection..." )
+jsTestLog( "Shard and split collection..." );
var admin = mA.getDB( "admin" )
admin.runCommand({ enableSharding : coll.getDB() + "" })
@@ -40,6 +41,6 @@ collB.find().itcount()
jsTestLog( "Done." )
-st.stop()
-
+st.stop();
+})();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index bad6afd3a54..92c9993ecdd 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -26,7 +26,7 @@ res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing" , find : { _i
assert.eq(1 , res.ok, tojson(res));
-s1.setBalancer( true )
+s1.startBalancer();
printjson( s2.adminCommand( {"getShardVersion" : "test.existing" } ) )
printjson( new Mongo(s1.getServer( "test" ).name).getDB( "admin" ).adminCommand( {"getShardVersion" : "test.existing" } ) )
@@ -54,7 +54,7 @@ s1.getDB('test').existing3.insert({_id:1})
assert.eq(1, s1.getDB('test').existing3.count({_id:1}));
assert.eq(1, s2.getDB('test').existing3.count({_id:1}));
-s1.stopBalancer()
+s1.stopBalancer();
s2.adminCommand( { shardcollection : "test.existing3" , key : { _id : 1 } } );
assert.commandWorked(s2.adminCommand({ split: "test.existing3", middle: { _id: 5 }}));
@@ -62,7 +62,7 @@ assert.commandWorked(s2.adminCommand({ split: "test.existing3", middle: { _id: 5
res = s1.getDB( "admin" ).runCommand( { moveChunk: "test.existing3" , find : { _id : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
assert.eq(1 , res.ok, tojson(res));
-s1.setBalancer( true )
+s1.startBalancer();
s1.stop();
diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js
index 17e98f82b30..5b30dc436c4 100644
--- a/jstests/sharding/names.js
+++ b/jstests/sharding/names.js
@@ -1,6 +1,10 @@
// Test that having replica set names the same as the names of other shards works fine
+(function() {
-var st = new ShardingTest( name = "test", shards = 0, verbose = 2, mongos = 2, other = { rs : true } )
+var st = new ShardingTest({ name: "HostNames",
+ shards: 0,
+ mongos: 2,
+ other: { rs : true } });
var rsA = new ReplSetTest({ nodes : 2, name : "rsA" })
var rsB = new ReplSetTest({ nodes : 2, name : "rsB" })
@@ -41,4 +45,6 @@ assert.eq(2, config.shards.count(), "Error re-adding a shard");
assert.eq(rsB.getURL(), config.shards.findOne({_id:rsA.name})["host"], "Wrong host for shard rsA 3");
assert.eq(rsA.getURL(), config.shards.findOne({_id:rsB.name})["host"], "Wrong host for shard rsB 3");
-st.stop() \ No newline at end of file
+st.stop();
+
+})();
diff --git a/jstests/sharding/noUpdateButN1inAnotherCollection.js b/jstests/sharding/noUpdateButN1inAnotherCollection.js
index 1bfdb33a0d7..73b6dd8a9ec 100644
--- a/jstests/sharding/noUpdateButN1inAnotherCollection.js
+++ b/jstests/sharding/noUpdateButN1inAnotherCollection.js
@@ -6,7 +6,7 @@ function debug( str ) {
var name = "badNonUpdate";
debug("Starting sharded cluster test stuff");
-s = new ShardingTest( {name: name, shards : 2, mongos : 2, verbose:5, nopreallocj : true });
+var s = new ShardingTest({name: name, shards : 2, mongos : 2, verbose: 5 });
var mongosA=s.s0;
var mongosB=s.s1;
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index e4c2b462851..af0bdcc8d5b 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -20,7 +20,8 @@ for (var i=0; i<N; i+=(N/12)) {
to: "shard000" + Math.floor(Math.random() * numShards)});
}
-s.setBalancer( true )
+s.startBalancer();
+
var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<N; i++ )
bulk.insert({ _id: i });
diff --git a/jstests/sharding/read_does_not_create_namespaces.js b/jstests/sharding/read_does_not_create_namespaces.js
index d07280fc0d3..db3c098c0fc 100644
--- a/jstests/sharding/read_does_not_create_namespaces.js
+++ b/jstests/sharding/read_does_not_create_namespaces.js
@@ -1,6 +1,8 @@
// This test ensures that just attempting to read from a non-existent database or collection won't
// cause entries to be created in the catalog.
-var shardingTest = new ShardingTest('read_does_not_create_namespaces', 1);
+(function() {
+
+var shardingTest = new ShardingTest({ name: 'read_does_not_create_namespaces', shards: 1 });
var db = shardingTest.getDB('NonExistentDB');
assert.isnull(db.nonExistentColl.findOne({}));
@@ -9,4 +11,6 @@ assert.isnull(db.nonExistentColl.findOne({}));
assert.isnull(shardingTest.getDB('config').databases.findOne({ _id: 'NonExistentDB' }));
assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
-shardingTest.stop(); \ No newline at end of file
+shardingTest.stop();
+
+})();
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index b446c80918c..c4efc5bd666 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -38,7 +38,7 @@ coll.save({ _id : 1, b : "b", date : new Date() });
print("2: shard collection");
-shardTest.shardGo(coll, /* shardBy */ { _id : 1 }, /* splitAt */ { _id : 0 });
+shardTest.shardColl(coll, /* shardBy */ { _id : 1 }, /* splitAt */ { _id : 0 });
print("3: test normal and slaveOk queries");
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index fedf2ed72b8..abae4f19eb7 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,9 +1,9 @@
-// shard3.js
+(function() {
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: {enableBalancer: true}});
+var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: { enableBalancer: true }});
s2 = s._mongos[1];
@@ -17,11 +17,11 @@ if (s.configRS) {
}
assert( sh.getBalancerState() , "A1" )
-sh.setBalancerState( false )
+sh.setBalancerState(false);
assert( ! sh.getBalancerState() , "A2" )
-sh.setBalancerState( true )
+sh.setBalancerState(true);
assert( sh.getBalancerState() , "A3" )
-sh.setBalancerState( false )
+sh.setBalancerState(false);
assert( ! sh.getBalancerState() , "A4" )
s.config.databases.find().forEach( printjson )
@@ -173,3 +173,5 @@ y = dbb.foo.stats()
printjson( y )
s.stop();
+
+})();
diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js
index 98840c0c3ac..1189e4e6cf3 100644
--- a/jstests/sharding/shard_targeting.js
+++ b/jstests/sharding/shard_targeting.js
@@ -17,7 +17,7 @@ var res;
//
// Shard key is the same with command name.
-s.shardGo("foo", {count: 1}, { count: "" })
+s.shardColl("foo", {count: 1}, { count: "" })
for (var i=0; i<50; i++) {
db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
@@ -40,7 +40,7 @@ assert.eq(res.n, 100);
//
db.foo.drop();
// Shard key is the same with command name.
-s.shardGo("foo", {mapReduce: 1}, { mapReduce: "" })
+s.shardColl("foo", {mapReduce: 1}, { mapReduce: "" })
for (var i=0; i<50; i++) {
db.foo.insert({mapReduce: i}); // to the chunk including number
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index a239ad88c01..8d8e4a35ef7 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -1,4 +1,5 @@
-// test to make sure that tag ranges get split
+// Test to make sure that tag ranges get split
+(function() {
var s = new ShardingTest({ name: "tag_auto_split",
shards: 2,
@@ -28,7 +29,10 @@ printjson( sh.status() );
s.stop();
//test without full shard key on tags
-s = new ShardingTest( "tag_auto_split2", 2, 0, 1, { nopreallocj : true, enableBalancer : true } );
+s = new ShardingTest({ name: "tag_auto_split2",
+ shards: 2,
+ mongos: 1,
+ other: { enableBalancer : true } });
db = s.getDB( "test" );
@@ -66,3 +70,5 @@ assert.eq( 1, s.config.chunks.find( {min : {_id : 10 , a : MinKey} } ).count(),
"bad chunk range boundary" );
s.stop();
+
+})();
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index eee3bdbc791..ad93c6977b0 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -80,85 +80,606 @@
* c0, c1, ... {Mongo} - same as config0, config1, ...
* configRS - If the config servers are a replset, this will contain the config ReplSetTest object
*/
-ShardingTest = function( testName , numShards , verboseLevel , numMongos , otherParams ) {
- this._startTime = new Date();
+var ShardingTest = function(params) {
+
+ /**
+ * Attempts to open a connection to the specified connection string or throws if unable to
+ * connect.
+ */
+ function _connectWithRetry(url) {
+ var conn;
+ assert.soon(function() {
+ try {
+ conn = new Mongo(url);
+ return true;
+ } catch (e) {
+ print("Error connecting to " + url + ": " + e);
+ return false;
+ }
+ });
- // Check if testName is an object, if so, pull params from there
- var keyFile = undefined
- var numConfigs = 3;
- otherParams = Object.merge( otherParams || {}, {} )
+ return conn;
+ }
+
+ /**
+ * Constructs a human-readable string representing a chunk's range.
+ */
+ function _rangeToString(r) {
+ return tojsononeline(r.min) + " -> " + tojsononeline(r.max);
+ }
+
+ // Used for counting the test duration
+ var _startTime = new Date();
+
+ // ShardingTest API
+
+ this.getRSEntry = function(setName) {
+ for (var i=0; i<this._rs.length; i++)
+ if (this._rs[i].setName == setName)
+ return this._rs[i];
+ throw Error("can't find rs: " + setName);
+ };
+
+ this.getDB = function(name) {
+ return this.s.getDB(name);
+ };
+
+ this.getServerName = function(dbname) {
+ var x = this.config.databases.findOne({ _id : "" + dbname });
+ if (x)
+ return x.primary;
+ this.config.databases.find().forEach(printjson);
+ throw Error("couldn't find dbname: " + dbname + " total: " + this.config.databases.count());
+ };
+
+ this.getNonPrimaries = function(dbname) {
+ var x = this.config.databases.findOne({ _id : dbname });
+ if (! x) {
+ this.config.databases.find().forEach(printjson);
+ throw Error("couldn't find dbname: " + dbname + " total: " + this.config.databases.count());
+ }
- if( isObject( testName ) ) {
- var params = Object.merge( testName, {} )
+ return this.config.shards.find({ _id : { $ne : x.primary } }).map(function(z) { return z._id; })
+ };
- testName = params.name || "test"
- otherParams = Object.merge(otherParams, params);
- otherParams = Object.merge(params.other || {}, otherParams);
+ this.getConnNames = function() {
+ var names = [];
+ for (var i=0; i<this._connections.length; i++) {
+ names.push(this._connections[i].name);
+ }
+ return names;
+ };
- numShards = otherParams.hasOwnProperty('shards') ? otherParams.shards : 2;
- verboseLevel = otherParams.hasOwnProperty('verbose') ? otherParams.verbose : 0;
- numMongos = otherParams.hasOwnProperty('mongos') ? otherParams.mongos : 1;
- numConfigs = otherParams.hasOwnProperty('config') ? otherParams.config : numConfigs;
+ this.getServer = function(dbname) {
+ var name = this.getServerName(dbname);
- var tempCount = 0;
+ var x = this.config.shards.findOne({ _id : name });
+ if (x)
+ name = x.host;
+
+ var rsName = null;
+ if (name.indexOf("/") > 0)
+ rsName = name.substring(0 , name.indexOf("/"));
+
+ for (var i=0; i<this._connections.length; i++) {
+ var c = this._connections[i];
+ if (connectionURLTheSame(name , c.name) ||
+ connectionURLTheSame(rsName , c.name))
+ return c;
+ }
+
+ throw Error("can't find server for: " + dbname + " name:" + name);
+ };
+
+ this.normalize = function(x) {
+ var z = this.config.shards.findOne({ host : x });
+ if (z)
+ return z._id;
+ return x;
+ };
+
+ this.getOther = function(one) {
+ if (this._connections.length < 2)
+ throw Error("getOther only works with 2 servers");
+
+ if (one._mongo)
+ one = one._mongo
+
+ for(var i = 0; i < this._connections.length; i++) {
+ if (this._connections[i] != one) return this._connections[i]
+ }
- // Allow specifying options like :
- // { mongos : [ { noprealloc : "" } ], config : [ { smallfiles : "" } ], shards : { rs : true, d : true } }
- if( Array.isArray( numShards ) ){
- for( var i = 0; i < numShards.length; i++ ){
- otherParams[ "d" + i ] = numShards[i];
+ return null;
+ };
+
+ this.getAnother = function(one) {
+ if (this._connections.length < 2)
+ throw Error("getAnother() only works with multiple servers");
+
+ if (one._mongo)
+ one = one._mongo
+
+ for(var i = 0; i < this._connections.length; i++) {
+ if (this._connections[i] == one)
+ return this._connections[(i + 1) % this._connections.length];
+ }
+ };
+
+ this.getFirstOther = function(one) {
+ for (var i=0; i<this._connections.length; i++) {
+ if (this._connections[i] != one)
+ return this._connections[i];
+ }
+ throw Error("impossible");
+ };
+
+ this.stop = function() {
+ for (var i = 0; i < this._mongos.length; i++) {
+ MongoRunner.stopMongos(this._mongos[i].port);
+ }
+
+ for (var i = 0; i < this._connections.length; i++) {
+ if (this._rs[i]) {
+ this._rs[i].test.stopSet(15);
+ } else {
+ MongoRunner.stopMongod(this._connections[i].port);
}
+ }
+
+ if (this.configRS) {
+ this.configRS.stopSet();
+ } else {
+ // Old style config triplet
+ for (var i = 0; i < this._configServers.length; i++) {
+ MongoRunner.stopMongod(this._configServers[i]);
+ }
+ }
- numShards = numShards.length;
+ for (var i = 0; i < this._alldbpaths.length; i++) {
+ resetDbpath(MongoRunner.dataPath + this._alldbpaths[i]);
}
- else if( isObject( numShards ) ){
- tempCount = 0;
- for( var i in numShards ) {
- otherParams[ i ] = numShards[i];
- tempCount++;
+
+ var timeMillis = new Date().getTime() - _startTime.getTime();
+
+ print('*** ShardingTest ' + this._testName + " completed successfully in " + (timeMillis / 1000) + " seconds ***");
+ };
+
+ this.adminCommand = function(cmd) {
+ var res = this.admin.runCommand(cmd);
+ if (res && res.ok == 1)
+ return true;
+
+ throw _getErrorWithCode(res, "command " + tojson(cmd) + " failed: " + tojson(res));
+ };
+
+ this.printChangeLog = function() {
+ this.config.changelog.find().forEach(function(z) {
+ var msg = z.server + "\t" + z.time + "\t" + z.what;
+ for (var i = z.what.length; i < 15; i++)
+ msg += " ";
+
+ msg += " " + z.ns + "\t";
+ if (z.what == "split") {
+ msg += _rangeToString(z.details.before) + " -->> (" + _rangeToString(z.details.left) + "), (" + _rangeToString(z.details.right) + ")";
}
-
- numShards = tempCount;
+ else if (z.what == "multi-split") {
+ msg += _rangeToString(z.details.before) + " -->> (" + z.details.number + "/" + z.details.of + " " + _rangeToString(z.details.chunk) + ")";
+ }
+ else {
+ msg += tojsononeline(z.details);
+ }
+
+ print("ShardingTest " + msg);
+ });
+ };
+
+ this.getChunksString = function(ns) {
+ var q = {}
+ if (ns)
+ q.ns = ns;
+
+ var s = "";
+ this.config.chunks.find(q).sort({ ns : 1 , min : 1 }).forEach(
+ function(z) {
+ s += " " + z._id + "\t" + z.lastmod.t + "|" + z.lastmod.i + "\t" + tojson(z.min) + " -> " + tojson(z.max) + " " + z.shard + " " + z.ns + "\n";
+ }
+ );
+
+ return s;
+ };
+
+ this.printChunks = function(ns) {
+ print("ShardingTest " + this.getChunksString(ns));
+ };
+
+ this.printShardingStatus = function() {
+ printShardingStatus(this.config);
+ };
+
+ this.printCollectionInfo = function(ns , msg) {
+ var out = "";
+ if (msg)
+ out += msg + "\n";
+ out += "sharding collection info: " + ns + "\n";
+ for (var i=0; i<this._connections.length; i++) {
+ var c = this._connections[i];
+ out += " mongod " + c + " " + tojson(c.getCollection(ns).getShardVersion() , " " , true) + "\n";
+ }
+ for (var i=0; i<this._mongos.length; i++) {
+ var c = this._mongos[i];
+ out += " mongos " + c + " " + tojson(c.getCollection(ns).getShardVersion() , " " , true) + "\n";
}
- if( Array.isArray( numMongos ) ){
- for( var i = 0; i < numMongos.length; i++ ) {
- otherParams[ "s" + i ] = numMongos[i];
+ out += this.getChunksString(ns);
+
+ print("ShardingTest " + out);
+ };
+
+ this.sync = function() {
+ this.adminCommand("connpoolsync");
+ };
+
+ this.onNumShards = function(collName , dbName) {
+ this.sync(); // we should sync since we're going directly to mongod here
+ dbName = dbName || "test";
+ var num=0;
+ for (var i=0; i<this._connections.length; i++)
+ if (this._connections[i].getDB(dbName).getCollection(collName).count() > 0)
+ num++;
+ return num;
+ };
+
+ this.shardCounts = function(collName , dbName) {
+ this.sync(); // we should sync since we're going directly to mongod here
+ dbName = dbName || "test";
+ var counts = {}
+ for (var i=0; i<this._connections.length; i++)
+ counts[i] = this._connections[i].getDB(dbName).getCollection(collName).count();
+ return counts;
+ };
+
+ this.chunkCounts = function(collName , dbName) {
+ dbName = dbName || "test";
+ var x = {}
+
+ this.config.shards.find().forEach(
+ function(z) {
+ x[z._id] = 0;
+ }
+ );
+
+ this.config.chunks.find({ ns : dbName + "." + collName }).forEach(
+ function(z) {
+ if (x[z.shard])
+ x[z.shard]++
+ else
+ x[z.shard] = 1;
}
-
- numMongos = numMongos.length;
+ );
+
+ return x;
+ };
+
+ this.chunkDiff = function(collName , dbName) {
+ var c = this.chunkCounts(collName , dbName);
+ var min = 100000000;
+ var max = 0;
+ for (var s in c) {
+ if (c[s] < min)
+ min = c[s];
+ if (c[s] > max)
+ max = c[s];
}
- else if( isObject( numMongos ) ){
- tempCount = 0;
- for( var i in numMongos ) {
- otherParams[ i ] = numMongos[i];
- tempCount++;
+ print("ShardingTest input: " + tojson(c) + " min: " + min + " max: " + max );
+ return max - min;
+ };
+
+ // Waits up to one minute for the difference in chunks between the most loaded shard and least
+ // loaded shard to be 0 or 1, indicating that the collection is well balanced.
+ // This should only be called after creating a big enough chunk difference to trigger balancing.
+ this.awaitBalance = function(collName , dbName , timeToWait) {
+ timeToWait = timeToWait || 60000;
+ var shardingTest = this;
+ assert.soon(function() {
+ var x = shardingTest.chunkDiff(collName , dbName);
+ print("chunk diff: " + x);
+ return x < 2;
+ } , "no balance happened", 60000);
+ };
+
+ this.getShardNames = function() {
+ var shards = [];
+ this.s.getCollection("config.shards").find().forEach(function(shardDoc) {
+ shards.push(shardDoc._id);
+ });
+ return shards;
+ };
+
+ this.getShard = function(coll, query, includeEmpty) {
+ var shards = this.getShardsForQuery(coll, query, includeEmpty)
+ assert.eq(shards.length, 1)
+ return shards[0]
+ };
+
+ /**
+ * Returns the shards on which documents matching a particular query reside.
+ */
+ this.getShardsForQuery = function(coll, query, includeEmpty) {
+ if (! coll.getDB)
+ coll = this.s.getCollection(coll)
+
+ var explain = coll.find(query).explain("executionStats")
+ var shards = []
+
+ var execStages = explain.executionStats.executionStages;
+ var plannerShards = explain.queryPlanner.winningPlan.shards;
+
+ if (execStages.shards) {
+ for(var i = 0; i < execStages.shards.length; i++) {
+ var hasResults = execStages.shards[i].executionStages.nReturned &&
+ execStages.shards[i].executionStages.nReturned > 0;
+ if (includeEmpty || hasResults) {
+ shards.push(plannerShards[i].connectionString);
+ }
}
-
- numMongos = tempCount;
}
-
- if( Array.isArray( numConfigs ) ){
- for( var i = 0; i < numConfigs.length; i++ ){
- otherParams[ "c" + i ] = numConfigs[i];
+
+ for(var i = 0; i < shards.length; i++) {
+ for(var j = 0; j < this._connections.length; j++) {
+ if (connectionURLTheSame( this._connections[j] , shards[i])) {
+ shards[i] = this._connections[j]
+ break;
+ }
}
+ }
+
+ return shards;
+ };
- numConfigs = numConfigs.length
+ this.isSharded = function(collName) {
+ var collName = "" + collName
+ var dbName = undefined
+
+ if (typeof collName.getCollectionNames == 'function') {
+ dbName = "" + collName
+ collName = undefined
}
- else if( isObject( numConfigs ) ){
- tempCount = 0;
- for( var i in numConfigs ) {
- otherParams[ i ] = numConfigs[i];
- tempCount++;
- }
- numConfigs = tempCount;
+
+ if (dbName) {
+ var x = this.config.databases.findOne({ _id : dbname })
+ if (x) return x.partitioned
+ else return false
+ }
+
+ if (collName) {
+ var x = this.config.collections.findOne({ _id : collName })
+ if (x) return true
+ else return false
+ }
+ };
+
+ this.shardColl = function(collName , key , split , move , dbName, waitForDelete) {
+ split = (split != false ? (split || key) : split)
+ move = (split != false && move != false ? (move || split) : false)
+
+ if (collName.getDB)
+ dbName = "" + collName.getDB()
+ else
+ dbName = dbName || "test";
+
+ var c = dbName + "." + collName;
+ if (collName.getDB)
+ c = "" + collName
+
+ var isEmpty = (this.s.getCollection(c).count() == 0);
+
+ if (! this.isSharded(dbName))
+ this.s.adminCommand({ enableSharding : dbName })
+
+ var result = this.s.adminCommand({ shardcollection : c , key : key })
+ if (! result.ok) {
+ printjson(result)
+ assert(false)
+ }
+
+ if (split == false) return;
+
+ result = this.s.adminCommand({ split : c , middle : split });
+ if (! result.ok) {
+ printjson(result)
+ assert(false)
+ }
+
+ if (move == false) return;
+
+ var result = null
+ for(var i = 0; i < 5; i++) {
+ result = this.s.adminCommand({ movechunk : c , find : move , to : this.getOther(this.getServer(dbName)).name, _waitForDelete: waitForDelete });
+ if (result.ok) break;
+ sleep(5 * 1000);
+ }
+
+ printjson(result);
+ assert(result.ok);
+ };
+
+ this.stopBalancer = function(timeout, interval) {
+ if (typeof db == "undefined") db = undefined;
+ var oldDB = db;
+ db = this.config;
+
+ try {
+ sh.setBalancerState(false);
+ sh.waitForBalancer(false, timeout, interval);
+ }
+ finally {
+ db = oldDB;
+ }
+ };
+
+ this.startBalancer = function(timeout, interval) {
+ if (typeof db == "undefined") db = undefined;
+ var oldDB = db;
+ db = this.config;
+
+ try {
+ sh.setBalancerState(true);
+ sh.waitForBalancer(true, timeout, interval);
+ }
+ finally {
+ db = oldDB;
+ }
+ };
+
+ this.isAnyBalanceInFlight = function() {
+ if (this.config.locks.find({ _id : { $ne : "balancer" }, state : 2 }).count() > 0)
+ return true;
+
+ var allCurrent = this.s.getDB("admin").currentOp().inprog;
+ for (var i = 0; i < allCurrent.length; i++) {
+ if (allCurrent[i].desc &&
+ allCurrent[i].desc.indexOf("cleanupOldData") == 0)
+ return true;
+ }
+ return false;
+ };
+
+ /**
+ * Kills the mongos with index n.
+ */
+ this.stopMongos = function(n) {
+ MongoRunner.stopMongos(this['s' + n].port);
+ };
+
+ /**
+ * Kills the mongod with index n.
+ */
+ this.stopMongod = function(n) {
+ MongoRunner.stopMongod(this['d' + n].port);
+ };
+
+ /**
+ * Restarts a previously stopped mongos.
+ *
+ * If opts is specified, the new mongos is started using those options. Otherwise, it is started
+ * with its previous parameters.
+ *
+ * Warning: Overwrites the old s (if n = 0) admin, config, and sn member variables.
+ */
+ this.restartMongos = function(n, opts) {
+ var mongos = this['s' + n];
+
+ if (opts === undefined) {
+ opts = this['s' + n];
+ opts.restart = true;
+ }
+
+ MongoRunner.stopMongos(mongos);
+
+ var newConn = MongoRunner.runMongos(opts);
+
+ this['s' + n] = newConn;
+ if (n == 0) {
+ this.s = newConn;
+ this.admin = newConn.getDB('admin');
+ this.config = newConn.getDB('config');
+ }
+ };
+
+ /**
+ * Restarts a previously stopped mongod using the same parameters as before.
+ *
+ * Warning: Overwrites the old dn member variables.
+ */
+ this.restartMongod = function(n) {
+ var mongod = this['d' + n];
+ MongoRunner.stopMongod(mongod);
+ mongod.restart = true;
+
+ var newConn = MongoRunner.runMongod(mongod);
+
+ this['d' + n] = newConn;
+ };
+
+ /**
+ * Helper method for setting primary shard of a database and making sure that it was successful.
+ * Note: first mongos needs to be up.
+ */
+ this.ensurePrimaryShard = function(dbName, shardName) {
+ var db = this.s0.getDB('admin');
+ var res = db.adminCommand({ movePrimary: dbName, to: shardName });
+ assert(res.ok || res.errmsg == "it is already the primary", tojson(res));
+ };
+
+ // ShardingTest initialization
+
+ assert(isObject(params), 'ShardingTest configuration must be a JSON object');
+
+ var testName = params.name || "test";
+ var otherParams = Object.merge(params, params.other || {});
+
+ var numShards = otherParams.hasOwnProperty('shards') ? otherParams.shards : 2;
+ var verboseLevel = otherParams.hasOwnProperty('verbose') ? otherParams.verbose : 0;
+ var numMongos = otherParams.hasOwnProperty('mongos') ? otherParams.mongos : 1;
+ var numConfigs = otherParams.hasOwnProperty('config') ? otherParams.config : 3;
+
+ // Allow specifying options like :
+ // { mongos : [ { noprealloc : "" } ], config : [ { smallfiles : "" } ], shards : { rs : true, d : true } }
+ if (Array.isArray(numShards)) {
+ for(var i = 0; i < numShards.length; i++) {
+ otherParams[ "d" + i ] = numShards[i];
+ }
+
+ numShards = numShards.length;
+ }
+ else if (isObject(numShards)) {
+ var tempCount = 0;
+ for(var i in numShards) {
+ otherParams[ i ] = numShards[i];
+ tempCount++;
}
+
+ numShards = tempCount;
+ }
+
+ if (Array.isArray(numMongos)) {
+ for(var i = 0; i < numMongos.length; i++) {
+ otherParams[ "s" + i ] = numMongos[i];
+ }
+
+ numMongos = numMongos.length;
+ }
+ else if (isObject(numMongos)) {
+ var tempCount = 0;
+ for(var i in numMongos) {
+ otherParams[ i ] = numMongos[i];
+ tempCount++;
+ }
+
+ numMongos = tempCount;
+ }
+
+ if (Array.isArray(numConfigs)) {
+ for(var i = 0; i < numConfigs.length; i++) {
+ otherParams[ "c" + i ] = numConfigs[i];
+ }
+
+ numConfigs = numConfigs.length
+ }
+ else if (isObject(numConfigs)) {
+ var tempCount = 0;
+ for(var i in numConfigs) {
+ otherParams[ i ] = numConfigs[i];
+ tempCount++;
+ }
+
+ numConfigs = tempCount;
}
otherParams.extraOptions = otherParams.extraOptions || {};
otherParams.useHostname = otherParams.useHostname == undefined ?
true : otherParams.useHostname;
- keyFile = otherParams.keyFile || otherParams.extraOptions.keyFile
+ var keyFile = otherParams.keyFile || otherParams.extraOptions.keyFile
this._testName = testName
this._otherParams = otherParams
@@ -166,8 +687,8 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
var pathOpts = this.pathOpts = { testName : testName }
var hasRS = false
- for( var k in otherParams ){
- if( k.startsWith( "rs" ) && otherParams[k] != undefined ){
+ for(var k in otherParams) {
+ if (k.startsWith("rs") && otherParams[k] != undefined) {
hasRS = true
break
}
@@ -180,19 +701,19 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
this._rsObjects = []
// Start the MongoD servers (shards)
- for ( var i = 0; i < numShards; i++ ) {
- if( otherParams.rs || otherParams["rs" + i] ){
+ for (var i = 0; i < numShards; i++) {
+ if (otherParams.rs || otherParams["rs" + i]) {
var setName = testName + "-rs" + i;
rsDefaults = { useHostname : otherParams.useHostname,
noJournalPrealloc : otherParams.nopreallocj,
oplogSize : 16,
- pathOpts : Object.merge( pathOpts, { shard : i } )}
+ pathOpts : Object.merge(pathOpts, { shard : i })}
- rsDefaults = Object.merge( rsDefaults, ShardingTest.rsOptions || {} )
- rsDefaults = Object.merge( rsDefaults, otherParams.rs )
- rsDefaults = Object.merge( rsDefaults, otherParams.rsOptions )
- rsDefaults = Object.merge( rsDefaults, otherParams["rs" + i] )
+ rsDefaults = Object.merge(rsDefaults, ShardingTest.rsOptions || {})
+ rsDefaults = Object.merge(rsDefaults, otherParams.rs)
+ rsDefaults = Object.merge(rsDefaults, otherParams.rsOptions)
+ rsDefaults = Object.merge(rsDefaults, otherParams["rs" + i])
rsDefaults.nodes = rsDefaults.nodes || otherParams.numReplicas
var numReplicas = rsDefaults.nodes || 3;
@@ -200,7 +721,7 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
var protocolVersion = rsDefaults.protocolVersion;
delete rsDefaults.protocolVersion;
- print( "Replica set test!" )
+ print("Replica set test!")
var rs = new ReplSetTest({ name : setName,
nodes : numReplicas,
@@ -219,8 +740,8 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
this._rsObjects[i] = rs
- this._alldbpaths.push( null )
- this._connections.push( null )
+ this._alldbpaths.push(null)
+ this._connections.push(null)
}
else {
var options = {
@@ -231,20 +752,20 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
keyFile: keyFile
};
- options = Object.merge( options, ShardingTest.shardOptions || {} )
+ options = Object.merge(options, ShardingTest.shardOptions || {})
- if( otherParams.shardOptions && otherParams.shardOptions.binVersion ){
+ if (otherParams.shardOptions && otherParams.shardOptions.binVersion) {
otherParams.shardOptions.binVersion =
- MongoRunner.versionIterator( otherParams.shardOptions.binVersion )
+ MongoRunner.versionIterator(otherParams.shardOptions.binVersion)
}
- options = Object.merge( options, otherParams.shardOptions )
- options = Object.merge( options, otherParams["d" + i] )
+ options = Object.merge(options, otherParams.shardOptions)
+ options = Object.merge(options, otherParams["d" + i])
- var conn = MongoRunner.runMongod( options );
+ var conn = MongoRunner.runMongod(options);
- this._alldbpaths.push( testName +i )
- this._connections.push( conn );
+ this._alldbpaths.push(testName +i)
+ this._connections.push(conn);
this["shard" + i] = conn
this["d" + i] = conn
@@ -255,19 +776,19 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
// Do replication on replica sets if required
for (var i = 0; i < numShards; i++) {
- if(!otherParams.rs && !otherParams["rs" + i]) {
+ if (!otherParams.rs && !otherParams["rs" + i]) {
continue;
}
var rs = this._rs[i].test;
- rs.getMaster().getDB( "admin" ).foo.save( { x : 1 } )
+ rs.getMaster().getDB("admin").foo.save({ x : 1 })
if (keyFile) {
authutil.asCluster(rs.nodes, keyFile, function() { rs.awaitReplication(); });
}
rs.awaitSecondaryNodes();
- var rsConn = new Mongo( rs.getURL() );
+ var rsConn = new Mongo(rs.getURL());
rsConn.name = rs.getURL();
this._connections[i] = rsConn
this["shard" + i] = rsConn
@@ -282,8 +803,7 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
otherParams.sync = true;
}
- this._configServers = []
- this._configServersAreRS = !otherParams.sync;
+ this._configServers = [];
// Start the config servers
if (otherParams.sync) {
@@ -295,26 +815,26 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
for (var i = 0; i < numConfigs; i++) {
var options = { useHostname : otherParams.useHostname,
noJournalPrealloc : otherParams.nopreallocj,
- pathOpts : Object.merge( pathOpts, { config : i } ),
+ pathOpts : Object.merge(pathOpts, { config : i }),
dbpath : "$testName-config$config",
keyFile : keyFile,
// Ensure that journaling is always enabled for config servers.
journal : "",
configsvr : "" };
- options = Object.merge( options, ShardingTest.configOptions || {} )
+ options = Object.merge(options, ShardingTest.configOptions || {})
if (otherParams.configOptions && otherParams.configOptions.binVersion) {
otherParams.configOptions.binVersion =
- MongoRunner.versionIterator( otherParams.configOptions.binVersion )
+ MongoRunner.versionIterator(otherParams.configOptions.binVersion)
}
- options = Object.merge( options, otherParams.configOptions )
- options = Object.merge( options, otherParams["c" + i] )
+ options = Object.merge(options, otherParams.configOptions)
+ options = Object.merge(options, otherParams["c" + i])
- var conn = MongoRunner.runMongod( options )
+ var conn = MongoRunner.runMongod(options)
- this._alldbpaths.push( testName + "-config" + i )
+ this._alldbpaths.push(testName + "-config" + i)
this._configServers.push(conn);
configNames.push(conn.name);
@@ -327,7 +847,6 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
}
else {
// Using replica set for config servers
-
var rstOptions = { useHostName : otherParams.useHostname,
keyFile : keyFile,
name: testName + "-configRS",
@@ -342,14 +861,14 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
storageEngine : "wiredTiger",
};
- startOptions = Object.merge( startOptions, ShardingTest.configOptions || {} )
+ startOptions = Object.merge(startOptions, ShardingTest.configOptions || {})
- if ( otherParams.configOptions && otherParams.configOptions.binVersion ) {
+ if (otherParams.configOptions && otherParams.configOptions.binVersion) {
otherParams.configOptions.binVersion =
- MongoRunner.versionIterator( otherParams.configOptions.binVersion )
+ MongoRunner.versionIterator(otherParams.configOptions.binVersion)
}
- startOptions = Object.merge( startOptions, otherParams.configOptions )
+ startOptions = Object.merge(startOptions, otherParams.configOptions)
var nodeOptions = [];
for (var i = 0; i < numConfigs; ++i) {
nodeOptions.push(otherParams["c" + i] || {});
@@ -377,36 +896,23 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
printjson("config servers: " + this._configDB);
- var connectWithRetry = function(url) {
- var conn = null;
- assert.soon( function() {
- try {
- conn = new Mongo(url);
- return true;
- } catch (e) {
- print("Error connecting to " + url + ": " + e);
- return false;
- }
- });
- return conn;
- }
- this._configConnection = connectWithRetry(this._configDB);
+ this._configConnection = _connectWithRetry(this._configDB);
- print( "ShardingTest " + this._testName + " :\n" + tojson( { config : this._configDB, shards : this._connections } ) );
+ print("ShardingTest " + this._testName + " :\n" + tojson({ config : this._configDB, shards : this._connections }));
- if ( numMongos == 0 && !otherParams.noChunkSize ) {
- if ( keyFile ) {
+ if (numMongos == 0 && !otherParams.noChunkSize) {
+ if (keyFile) {
throw Error("Cannot set chunk size without any mongos when using auth");
} else {
- this._configConnection.getDB( "config" ).settings.insert(
- { _id : "chunksize" , value : otherParams.chunksize || otherParams.chunkSize || 50 } );
+ this._configConnection.getDB("config").settings.insert(
+ { _id : "chunksize" , value : otherParams.chunksize || otherParams.chunkSize || 50 });
}
}
this._mongos = []
// Start the MongoS servers
- for (var i = 0; i < ( ( numMongos == 0 ? -1 : numMongos ) || 1 ); i++ ){
+ for (var i = 0; i < ((numMongos == 0 ? -1 : numMongos) || 1); i++) {
options = {
useHostname: otherParams.useHostname,
pathOpts: Object.merge(pathOpts, {mongos: i}),
@@ -419,16 +925,16 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
options.chunkSize = otherParams.chunksize || otherParams.chunkSize || 50;
}
- options = Object.merge( options, ShardingTest.mongosOptions || {} )
+ options = Object.merge(options, ShardingTest.mongosOptions || {})
if (otherParams.mongosOptions && otherParams.mongosOptions.binVersion) {
otherParams.mongosOptions.binVersion =
MongoRunner.versionIterator(otherParams.mongosOptions.binVersion);
}
- options = Object.merge( options, otherParams.mongosOptions )
- options = Object.merge( options, otherParams.extraOptions )
- options = Object.merge( options, otherParams["s" + i] )
+ options = Object.merge(options, otherParams.mongosOptions)
+ options = Object.merge(options, otherParams.extraOptions)
+ options = Object.merge(options, otherParams["s" + i])
conn = MongoRunner.runMongos(options);
@@ -444,7 +950,7 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
}
// Disable the balancer unless it is explicitly turned on
- if ( !otherParams.enableBalancer ) {
+ if (!otherParams.enableBalancer) {
if (keyFile) {
authutil.assertAuthenticate(this._mongos, 'admin', {
user: '__system',
@@ -474,7 +980,7 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
this._connections.forEach(
function(z) {
var n = z.name;
- if (!n){
+ if (!n) {
n = z.host;
if (!n) {
n = z;
@@ -489,540 +995,12 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
shardNames.push(result.shardAdded);
z.shardName = result.shardAdded;
}
- );
+ );
}
if (jsTestOptions().keyFile) {
- jsTest.authenticate( this._configConnection );
- jsTest.authenticateNodes( this._configServers );
- jsTest.authenticateNodes( this._mongos );
- }
-}
-
-ShardingTest.prototype.getRSEntry = function( setName ){
- for ( var i=0; i<this._rs.length; i++ )
- if ( this._rs[i].setName == setName )
- return this._rs[i];
- throw Error( "can't find rs: " + setName );
-}
-
-ShardingTest.prototype.getConfigIndex = function( config ){
-
- // Assume config is a # if not a conn object
- if( ! isObject( config ) ) config = getHostName() + ":" + config
-
- for( var i = 0; i < this._configServers.length; i++ ){
- if( connectionURLTheSame( this._configServers[i], config ) ) return i
- }
-
- return -1
-}
-
-ShardingTest.prototype.getDB = function( name ){
- return this.s.getDB( name );
-}
-
-ShardingTest.prototype.getServerName = function( dbname ){
- var x = this.config.databases.findOne( { _id : "" + dbname } );
- if ( x )
- return x.primary;
- this.config.databases.find().forEach( printjson );
- throw Error( "couldn't find dbname: " + dbname + " total: " + this.config.databases.count() );
-}
-
-
-ShardingTest.prototype.getNonPrimaries = function( dbname ){
- var x = this.config.databases.findOne( { _id : dbname } );
- if ( ! x ){
- this.config.databases.find().forEach( printjson );
- throw Error( "couldn't find dbname: " + dbname + " total: " + this.config.databases.count() );
+ jsTest.authenticate(this._configConnection);
+ jsTest.authenticateNodes(this._configServers);
+ jsTest.authenticateNodes(this._mongos);
}
-
- return this.config.shards.find( { _id : { $ne : x.primary } } ).map( function(z){ return z._id; } )
-}
-
-
-ShardingTest.prototype.getConnNames = function(){
- var names = [];
- for ( var i=0; i<this._connections.length; i++ ){
- names.push( this._connections[i].name );
- }
- return names;
-}
-
-ShardingTest.prototype.getServer = function( dbname ){
- var name = this.getServerName( dbname );
-
- var x = this.config.shards.findOne( { _id : name } );
- if ( x )
- name = x.host;
-
- var rsName = null;
- if ( name.indexOf( "/" ) > 0 )
- rsName = name.substring( 0 , name.indexOf( "/" ) );
-
- for ( var i=0; i<this._connections.length; i++ ){
- var c = this._connections[i];
- if ( connectionURLTheSame( name , c.name ) ||
- connectionURLTheSame( rsName , c.name ) )
- return c;
- }
-
- throw Error( "can't find server for: " + dbname + " name:" + name );
-
-}
-
-ShardingTest.prototype.normalize = function( x ){
- var z = this.config.shards.findOne( { host : x } );
- if ( z )
- return z._id;
- return x;
-}
-
-ShardingTest.prototype.getOther = function( one ){
- if ( this._connections.length < 2 )
- throw Error("getOther only works with 2 servers");
-
- if ( one._mongo )
- one = one._mongo
-
- for( var i = 0; i < this._connections.length; i++ ){
- if( this._connections[i] != one ) return this._connections[i]
- }
-
- return null
-}
-
-ShardingTest.prototype.getAnother = function( one ){
- if(this._connections.length < 2)
- throw Error("getAnother() only works with multiple servers");
-
- if ( one._mongo )
- one = one._mongo
-
- for(var i = 0; i < this._connections.length; i++){
- if(this._connections[i] == one)
- return this._connections[(i + 1) % this._connections.length];
- }
-}
-
-ShardingTest.prototype.getFirstOther = function( one ){
- for ( var i=0; i<this._connections.length; i++ ){
- if ( this._connections[i] != one )
- return this._connections[i];
- }
- throw Error("impossible");
-}
-
-ShardingTest.prototype.stop = function(){
- for (var i = 0; i < this._mongos.length; i++) {
- MongoRunner.stopMongos(this._mongos[i].port);
- }
-
- for (var i = 0; i < this._connections.length; i++) {
- if (this._rs[i]) {
- this._rs[i].test.stopSet(15);
- } else {
- MongoRunner.stopMongod(this._connections[i].port);
- }
- }
-
- if (this._configServersAreRS) {
- this.configRS.stopSet();
- } else {
- // Old style config triplet
- for (var i = 0; i < this._configServers.length; i++) {
- MongoRunner.stopMongod(this._configServers[i]);
- }
- }
-
- for (var i = 0; i < this._alldbpaths.length; i++) {
- resetDbpath(MongoRunner.dataPath + this._alldbpaths[i]);
- }
-
- var timeMillis = new Date().getTime() - this._startTime.getTime();
-
- print('*** ShardingTest ' + this._testName + " completed successfully in " + ( timeMillis / 1000 ) + " seconds ***");
-}
-
-ShardingTest.prototype.adminCommand = function(cmd){
- var res = this.admin.runCommand( cmd );
- if ( res && res.ok == 1 )
- return true;
-
- throw _getErrorWithCode(res, "command " + tojson(cmd) + " failed: " + tojson(res));
-}
-
-ShardingTest.prototype._rangeToString = function(r){
- return tojsononeline( r.min ) + " -> " + tojsononeline( r.max );
-}
-
-ShardingTest.prototype.printChangeLog = function(){
- var s = this;
- this.config.changelog.find().forEach(
- function(z){
- var msg = z.server + "\t" + z.time + "\t" + z.what;
- for ( i=z.what.length; i<15; i++ )
- msg += " ";
- msg += " " + z.ns + "\t";
- if ( z.what == "split" ){
- msg += s._rangeToString( z.details.before ) + " -->> (" + s._rangeToString( z.details.left ) + "),(" + s._rangeToString( z.details.right ) + ")";
- }
- else if (z.what == "multi-split" ){
- msg += s._rangeToString( z.details.before ) + " -->> (" + z.details.number + "/" + z.details.of + " " + s._rangeToString( z.details.chunk ) + ")";
- }
- else {
- msg += tojsononeline( z.details );
- }
-
- print( "ShardingTest " + msg )
- }
- );
-
-}
-
-ShardingTest.prototype.getChunksString = function( ns ){
- var q = {}
- if ( ns )
- q.ns = ns;
-
- var s = "";
- this.config.chunks.find( q ).sort( { ns : 1 , min : 1 } ).forEach(
- function(z){
- s += " " + z._id + "\t" + z.lastmod.t + "|" + z.lastmod.i + "\t" + tojson(z.min) + " -> " + tojson(z.max) + " " + z.shard + " " + z.ns + "\n";
- }
- );
-
- return s;
-}
-
-ShardingTest.prototype.printChunks = function( ns ){
- print( "ShardingTest " + this.getChunksString( ns ) );
-}
-
-ShardingTest.prototype.printShardingStatus = function(){
- printShardingStatus( this.config );
-}
-
-ShardingTest.prototype.printCollectionInfo = function( ns , msg ){
- var out = "";
- if ( msg )
- out += msg + "\n";
- out += "sharding collection info: " + ns + "\n";
- for ( var i=0; i<this._connections.length; i++ ){
- var c = this._connections[i];
- out += " mongod " + c + " " + tojson( c.getCollection( ns ).getShardVersion() , " " , true ) + "\n";
- }
- for ( var i=0; i<this._mongos.length; i++ ){
- var c = this._mongos[i];
- out += " mongos " + c + " " + tojson( c.getCollection( ns ).getShardVersion() , " " , true ) + "\n";
- }
-
- out += this.getChunksString( ns );
-
- print( "ShardingTest " + out );
-}
-
-ShardingTest.prototype.sync = function(){
- this.adminCommand( "connpoolsync" );
-}
-
-ShardingTest.prototype.onNumShards = function( collName , dbName ){
- this.sync(); // we should sync since we're going directly to mongod here
- dbName = dbName || "test";
- var num=0;
- for ( var i=0; i<this._connections.length; i++ )
- if ( this._connections[i].getDB( dbName ).getCollection( collName ).count() > 0 )
- num++;
- return num;
-}
-
-
-ShardingTest.prototype.shardCounts = function( collName , dbName ){
- this.sync(); // we should sync since we're going directly to mongod here
- dbName = dbName || "test";
- var counts = {}
- for ( var i=0; i<this._connections.length; i++ )
- counts[i] = this._connections[i].getDB( dbName ).getCollection( collName ).count();
- return counts;
-}
-
-ShardingTest.prototype.chunkCounts = function( collName , dbName ){
- dbName = dbName || "test";
- var x = {}
-
- this.config.shards.find().forEach(
- function(z){
- x[z._id] = 0;
- }
- );
-
- this.config.chunks.find( { ns : dbName + "." + collName } ).forEach(
- function(z){
- if ( x[z.shard] )
- x[z.shard]++
- else
- x[z.shard] = 1;
- }
- );
- return x;
-
-}
-
-ShardingTest.prototype.chunkDiff = function( collName , dbName ){
- var c = this.chunkCounts( collName , dbName );
- var min = 100000000;
- var max = 0;
- for ( var s in c ){
- if ( c[s] < min )
- min = c[s];
- if ( c[s] > max )
- max = c[s];
- }
- print( "ShardingTest input: " + tojson( c ) + " min: " + min + " max: " + max );
- return max - min;
-}
-
-// Waits up to one minute for the difference in chunks between the most loaded shard and least
-// loaded shard to be 0 or 1, indicating that the collection is well balanced.
-// This should only be called after creating a big enough chunk difference to trigger balancing.
-ShardingTest.prototype.awaitBalance = function( collName , dbName , timeToWait ) {
- timeToWait = timeToWait || 60000;
- var shardingTest = this;
- assert.soon( function() {
- var x = shardingTest.chunkDiff( collName , dbName );
- print( "chunk diff: " + x );
- return x < 2;
- } , "no balance happened", 60000 );
-
-}
-
-ShardingTest.prototype.getShardNames = function() {
- var shards = [];
- this.s.getCollection("config.shards").find().forEach(function(shardDoc) {
- shards.push(shardDoc._id);
- });
- return shards;
-}
-
-
-ShardingTest.prototype.getShard = function( coll, query, includeEmpty ){
- var shards = this.getShardsForQuery( coll, query, includeEmpty )
- assert.eq( shards.length, 1 )
- return shards[0]
-}
-
-// Returns the shards on which documents matching a particular query reside
-ShardingTest.prototype.getShardsForQuery = function( coll, query, includeEmpty ){
- if( ! coll.getDB )
- coll = this.s.getCollection( coll )
-
- var explain = coll.find( query ).explain("executionStats")
- var shards = []
-
- var execStages = explain.executionStats.executionStages;
- var plannerShards = explain.queryPlanner.winningPlan.shards;
-
- if( execStages.shards ){
- for( var i = 0; i < execStages.shards.length; i++ ){
- var hasResults = execStages.shards[i].executionStages.nReturned &&
- execStages.shards[i].executionStages.nReturned > 0;
- if( includeEmpty || hasResults ){
- shards.push(plannerShards[i].connectionString);
- }
- }
- }
-
- for( var i = 0; i < shards.length; i++ ){
- for( var j = 0; j < this._connections.length; j++ ){
- if ( connectionURLTheSame( this._connections[j] , shards[i] ) ){
- shards[i] = this._connections[j]
- break;
- }
- }
- }
-
- return shards
-}
-
-ShardingTest.prototype.isSharded = function( collName ){
-
- var collName = "" + collName
- var dbName = undefined
-
- if( typeof collName.getCollectionNames == 'function' ){
- dbName = "" + collName
- collName = undefined
- }
-
- if( dbName ){
- var x = this.config.databases.findOne( { _id : dbname } )
- if( x ) return x.partitioned
- else return false
- }
-
- if( collName ){
- var x = this.config.collections.findOne( { _id : collName } )
- if( x ) return true
- else return false
- }
-
-}
-
-ShardingTest.prototype.shardGo = function( collName , key , split , move , dbName, waitForDelete ){
-
- split = ( split != false ? ( split || key ) : split )
- move = ( split != false && move != false ? ( move || split ) : false )
-
- if( collName.getDB )
- dbName = "" + collName.getDB()
- else dbName = dbName || "test";
-
- var c = dbName + "." + collName;
- if( collName.getDB )
- c = "" + collName
-
- var isEmpty = this.s.getCollection( c ).count() == 0
-
- if( ! this.isSharded( dbName ) )
- this.s.adminCommand( { enableSharding : dbName } )
-
- var result = this.s.adminCommand( { shardcollection : c , key : key } )
- if( ! result.ok ){
- printjson( result )
- assert( false )
- }
-
- if( split == false ) return
-
- result = this.s.adminCommand( { split : c , middle : split } );
- if( ! result.ok ){
- printjson( result )
- assert( false )
- }
-
- if( move == false ) return
-
- var result = null
- for( var i = 0; i < 5; i++ ){
- result = this.s.adminCommand( { movechunk : c , find : move , to : this.getOther( this.getServer( dbName ) ).name, _waitForDelete: waitForDelete } );
- if( result.ok ) break;
- sleep( 5 * 1000 );
- }
- printjson( result )
- assert( result.ok )
-
-};
-
-ShardingTest.prototype.shardColl = ShardingTest.prototype.shardGo
-
-ShardingTest.prototype.setBalancer = function( balancer ){
- if( balancer || balancer == undefined ){
- this.config.settings.update( { _id: "balancer" }, { $set : { stopped: false } } , true )
- }
- else if( balancer == false ){
- this.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true )
- }
-}
-
-ShardingTest.prototype.stopBalancer = function( timeout, interval ) {
- this.setBalancer( false )
-
- if( typeof db == "undefined" ) db = undefined
- var oldDB = db
-
- db = this.config
- sh.waitForBalancer( false, timeout, interval )
- db = oldDB
-}
-
-ShardingTest.prototype.startBalancer = function( timeout, interval ) {
- this.setBalancer( true )
-
- if( typeof db == "undefined" ) db = undefined
- var oldDB = db
-
- db = this.config
- sh.waitForBalancer( true, timeout, interval )
- db = oldDB
-}
-
-ShardingTest.prototype.isAnyBalanceInFlight = function() {
- if ( this.config.locks.find({ _id : { $ne : "balancer" }, state : 2 }).count() > 0 )
- return true;
-
- var allCurrent = this.s.getDB( "admin" ).currentOp().inprog;
- for ( var i = 0; i < allCurrent.length; i++ ) {
- if ( allCurrent[i].desc &&
- allCurrent[i].desc.indexOf( "cleanupOldData" ) == 0 )
- return true;
- }
- return false;
-}
-
-/**
- * Kills the mongos with index n.
- */
-ShardingTest.prototype.stopMongos = function(n) {
- MongoRunner.stopMongos(this['s' + n].port);
-};
-
-/**
- * Kills the mongod with index n.
- */
-ShardingTest.prototype.stopMongod = function(n) {
- MongoRunner.stopMongod(this['d' + n].port);
-};
-
-/**
- * Restarts a previously stopped mongos.
- *
- * If opts is specified, the new mongos is started using those options. Otherwise, it is started
- * with its previous parameters.
- *
- * Warning: Overwrites the old s (if n = 0) admin, config, and sn member variables.
- */
-ShardingTest.prototype.restartMongos = function(n, opts) {
- var mongos = this['s' + n];
-
- if (opts === undefined) {
- opts = this['s' + n];
- opts.restart = true;
- }
-
- MongoRunner.stopMongos(mongos);
-
- var newConn = MongoRunner.runMongos(opts);
-
- this['s' + n] = newConn;
- if (n == 0) {
- this.s = newConn;
- this.admin = newConn.getDB('admin');
- this.config = newConn.getDB('config');
- }
-};
-
-/**
- * Restarts a previously stopped mongod using the same parameters as before.
- *
- * Warning: Overwrites the old dn member variables.
- */
-ShardingTest.prototype.restartMongod = function(n) {
- var mongod = this['d' + n];
- MongoRunner.stopMongod(mongod);
- mongod.restart = true;
-
- var newConn = MongoRunner.runMongod(mongod);
-
- this['d' + n] = newConn;
-};
-
-/**
- * Helper method for setting primary shard of a database and making sure that it was successful.
- * Note: first mongos needs to be up.
- */
-ShardingTest.prototype.ensurePrimaryShard = function(dbName, shardName) {
- var db = this.s0.getDB('admin');
- var res = db.adminCommand({ movePrimary: dbName, to: shardName });
- assert(res.ok || res.errmsg == "it is already the primary", tojson(res));
};