summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-26 10:09:34 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-27 08:24:24 -0400
commit38e5c4febb788441f19044a4d3d2b89e1ab70bf0 (patch)
tree19da129e382ed59e845838f67a4711d558c3f1c3 /jstests
parentba8757e0afd1577a090785f014fc07b55e219d9a (diff)
downloadmongo-38e5c4febb788441f19044a4d3d2b89e1ab70bf0.tar.gz
SERVER-21009 Get rid of some unused/unnecessary methods in ShardingTest
Removes shardGo in lieu of shardColl and removes setBalancer because it duplicates functionality, which is already in sh.
Diffstat (limited to 'jstests')
-rw-r--r--jstests/multiVersion/upgrade_cluster_v5_to_v6.js11
-rw-r--r--jstests/sharding/balance_repl.js (renamed from jstests/noPassthroughWithMongod/balance_repl.js)8
-rw-r--r--jstests/sharding/bulk_shard_insert.js (renamed from jstests/noPassthroughWithMongod/bulk_shard_insert.js)18
-rw-r--r--jstests/sharding/count_slaveok.js25
-rw-r--r--jstests/sharding/features2.js4
-rw-r--r--jstests/sharding/features3.js8
-rw-r--r--jstests/sharding/geo_near_random1.js14
-rw-r--r--jstests/sharding/geo_near_random2.js14
-rw-r--r--jstests/sharding/group_slaveok.js18
-rw-r--r--jstests/sharding/jumbo1.js2
-rw-r--r--jstests/sharding/listshards.js109
-rw-r--r--jstests/sharding/migrateBig.js2
-rw-r--r--jstests/sharding/migrateBig_balancer.js (renamed from jstests/noPassthroughWithMongod/sharding_migrateBigObject.js)11
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js9
-rw-r--r--jstests/sharding/mongos_no_replica_set_refresh.js30
-rw-r--r--jstests/sharding/mrShardedOutputAuth.js14
-rw-r--r--jstests/sharding/multi_coll_drop.js17
-rw-r--r--jstests/sharding/multi_mongos2.js6
-rw-r--r--jstests/sharding/names.js10
-rw-r--r--jstests/sharding/noUpdateButN1inAnotherCollection.js2
-rw-r--r--jstests/sharding/parallel.js3
-rw-r--r--jstests/sharding/read_does_not_create_namespaces.js8
-rw-r--r--jstests/sharding/recovering_slaveok.js2
-rw-r--r--jstests/sharding/shard3.js12
-rw-r--r--jstests/sharding/shard_targeting.js4
-rw-r--r--jstests/sharding/tag_auto_split.js10
26 files changed, 199 insertions, 172 deletions
diff --git a/jstests/multiVersion/upgrade_cluster_v5_to_v6.js b/jstests/multiVersion/upgrade_cluster_v5_to_v6.js
index 239c58bfbfb..aea6bda3e6b 100644
--- a/jstests/multiVersion/upgrade_cluster_v5_to_v6.js
+++ b/jstests/multiVersion/upgrade_cluster_v5_to_v6.js
@@ -2,8 +2,10 @@
* Tests upgrading a cluster which has 3.0 mongos.
*/
-load( './jstests/multiVersion/libs/multi_rs.js' )
-load( './jstests/multiVersion/libs/multi_cluster.js' )
+load('./jstests/multiVersion/libs/multi_rs.js');
+load('./jstests/multiVersion/libs/multi_cluster.js');
+
+(function() {
/**
* @param isRSCluster {bool} use replica set shards.
@@ -29,9 +31,6 @@ var options = {
var st = new ShardingTest({ shards : 2, mongos : 2, other : options });
-// Just stop balancer, to simulate race conds
-st.setBalancer(false);
-
var shards = st.s0.getDB("config").shards.find().toArray();
var configConnStr = st._configDB;
@@ -124,3 +123,5 @@ st.stop();
runTest(false);
runTest(true);
+
+})();
diff --git a/jstests/noPassthroughWithMongod/balance_repl.js b/jstests/sharding/balance_repl.js
index f54b391a7e3..16ae418eb0e 100644
--- a/jstests/noPassthroughWithMongod/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -1,9 +1,8 @@
(function() {
"use strict";
-var otherOptions = { rs: true , numReplicas: 2 , chunkSize: 1 , nopreallocj: true };
-var s = new ShardingTest({ shards: 2, verbose: 1, other: otherOptions });
-assert.writeOK(s.config.settings.update({ _id: "balancer" },
- { $set: { stopped: true }}, true ));
+
+var s = new ShardingTest({ shards: 2,
+ other: { rs: true , numReplicas: 2 , chunkSize: 1 } });
var db = s.getDB( "test" );
var bulk = db.foo.initializeUnorderedBulkOp();
@@ -42,4 +41,3 @@ for ( i=0; i<20; i++ ) {
s.stop();
}());
-
diff --git a/jstests/noPassthroughWithMongod/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 4ce7f555f36..a349d770a2f 100644
--- a/jstests/noPassthroughWithMongod/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -1,4 +1,5 @@
// Test bulk inserts with sharding
+(function() {
// Setup randomized test
var seed = new Date().getTime()
@@ -7,12 +8,8 @@ var seed = new Date().getTime()
Random.srand( seed )
print( "Seeded with " + seed )
-
var st = new ShardingTest({ name : jsTestName(), shards : 4, chunkSize: 1 })
-// Turn off balancer initially
-st.setBalancer( false )
-
// Setup sharded collection
var mongos = st.s0
var db = mongos.getDB( jsTestName() )
@@ -37,8 +34,7 @@ print( "\n\n\nDocument size is " + Object.bsonsize({ x : data }) )
var docsInserted = 0;
var balancerOn = false;
-while( docsInserted < numDocs ){
-
+while (docsInserted < numDocs) {
var currBulkSize = ( numDocs - docsInserted > bulkSize ) ? bulkSize : ( numDocs - docsInserted )
var bulk = []
@@ -57,7 +53,7 @@ while( docsInserted < numDocs ){
if( docsInserted > numDocs / 2 && ! balancerOn ){
print( "Turning on balancer after half documents inserted." )
- st.setBalancer( true )
+ st.startBalancer();
balancerOn = true;
}
}
@@ -68,10 +64,9 @@ st.printShardingStatus()
var count = coll.find().count()
var itcount = count //coll.find().itcount()
-print( "Inserted " + docsInserted + " count : " + count + " itcount : " + itcount )
+print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
-st.setBalancer( true )
-sleep( 10000 )
+st.startBalancer();
var count = coll.find().count()
var itcount = coll.find().itcount()
@@ -81,5 +76,6 @@ print( "Inserted " + docsInserted + " count : " + count + " itcount : " + itcoun
// SERVER-3645
// assert.eq( docsInserted, count )
-assert.eq( docsInserted, itcount )
+assert.eq(docsInserted, itcount);
+})();
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index c39162d4da6..470174890c4 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -1,16 +1,14 @@
-/* Tests count and distinct using slaveOk. Also tests a scenario querying a set
- * where only one secondary is up.
- */
+// Tests count and distinct using slaveOk. Also tests a scenario querying a set where only one
+// secondary is up.
+(function() {
-var st = new ShardingTest( testName = "countSlaveOk",
- numShards = 1,
- verboseLevel = 0,
- numMongos = 1,
- { rs : true,
- rs0 : { nodes : 2 }
- })
+var st = new ShardingTest({ name: "countSlaveOk",
+ shards: 1,
+ mongos: 1,
+ other: { rs : true,
+ rs0 : { nodes : 2 } } });
-var rst = st._rs[0].test
+var rst = st._rs[0].test;
// Insert data into replica set
var conn = new Mongo( st.s.host )
@@ -70,5 +68,6 @@ catch( e ){
print( "Non-slaveOk'd connection failed." )
}
-// Finish
-st.stop()
+st.stop();
+
+})();
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index 80a06ae6ba9..bf31e4448b2 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -23,7 +23,7 @@ assert( a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0 , "di
assert.eq( 1 , s.onNumShards( "foo" ) , "A1" );
-s.shardGo( "foo" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
+s.shardColl( "foo" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
assert.eq( 2 , s.onNumShards( "foo" ) , "A2" );
@@ -131,7 +131,7 @@ doMR = function( n ){
doMR( "before" );
assert.eq( 1 , s.onNumShards( "mr" ) , "E1" );
-s.shardGo( "mr" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
+s.shardColl( "mr" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
assert.eq( 2 , s.onNumShards( "mr" ) , "E1" );
doMR( "after" );
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index c1fec85d988..04f54655afd 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -9,13 +9,11 @@
var s = new ShardingTest({shards: 2,
mongos: 1,
verbose:1});
+
var db = s.getDB("test"); // db variable name is required due to startParallelShell()
var numDocs = 10000;
db.foo.drop();
-// stop the balancer
-s.stopBalancer()
-
// shard test.foo and add a split point
s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
@@ -27,7 +25,7 @@ s.adminCommand({moveChunk: "test.foo", find: {_id: 3},
to: s.getNonPrimaries("test")[0], _waitForDelete: true});
// restart balancer
-s.setBalancer(true)
+s.startBalancer();
// insert 10k small documents into the sharded collection
var bulk = db.foo.initializeUnorderedBulkOp();
@@ -150,4 +148,4 @@ if ( x.all.shard0000 > 0 ) {
x = db._adminCommand({"fsync" :1, lock:true});
assert(!x.ok, "lock should fail: " + tojson(x));
-s.stop()
+s.stop();
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
index c899ff8b776..ea34d428a4d 100644
--- a/jstests/sharding/geo_near_random1.js
+++ b/jstests/sharding/geo_near_random1.js
@@ -1,10 +1,10 @@
-// this tests all points using $near
+// This tests all points using $near
+(function() {
+
load("jstests/libs/geo_near_random.js");
var testName = "geo_near_random1";
-var s = new ShardingTest( testName , 3 );
-
-s.stopBalancer()
+var s = new ShardingTest({ name: testName, shards: 3 });
db = s.getDB("test"); // global db
@@ -30,7 +30,7 @@ for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
}
// Turn balancer back on, for actual tests
-// s.setBalancer( true ) // SERVER-13365
+// s.startBalancer() // SERVER-13365
printShardingSizes()
@@ -41,4 +41,6 @@ test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
index 1fd139017a6..92652292272 100644
--- a/jstests/sharding/geo_near_random2.js
+++ b/jstests/sharding/geo_near_random2.js
@@ -1,10 +1,10 @@
-// this tests 1% of all points using $near and $nearSphere
+// This tests 1% of all points using $near and $nearSphere
+(function() {
+
load("jstests/libs/geo_near_random.js");
var testName = "geo_near_random2";
-var s = new ShardingTest( testName , 3 );
-
-s.stopBalancer();
+var s = new ShardingTest({ name: testName, shards: 3 });
db = s.getDB("test"); // global db
@@ -29,7 +29,7 @@ for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
}
//Turn balancer back on, for actual tests
-// s.setBalancer( true ); // SERVER-13365
+// s.startBalancer(); // SERVER-13365
printShardingSizes()
@@ -47,4 +47,6 @@ test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
index 461daeb2973..a5c20f51ea5 100644
--- a/jstests/sharding/group_slaveok.js
+++ b/jstests/sharding/group_slaveok.js
@@ -1,12 +1,11 @@
// Tests group using slaveOk
+(function() {
-var st = new ShardingTest( testName = "groupSlaveOk",
- numShards = 1,
- verboseLevel = 0,
- numMongos = 1,
- { rs : true,
- rs0 : { nodes : 2 }
- })
+var st = new ShardingTest({ name: "groupSlaveOk",
+ shards: 1,
+ mongos: 1,
+ other :{ rs : true,
+ rs0 : { nodes : 2 } } });
var rst = st._rs[0].test
@@ -61,5 +60,6 @@ catch( e ){
print( "Non-slaveOk'd connection failed." + tojson(e) )
}
-// Finish
-st.stop()
+st.stop();
+
+})();
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index b55c41bbeb9..02f18530533 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -11,8 +11,6 @@ s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
db = s.getDB( "test" );
-sh.setBalancerState( false )
-
big = ""
while ( big.length < 10000 )
big += "."
diff --git a/jstests/sharding/listshards.js b/jstests/sharding/listshards.js
index 035837a752b..b4c87eda7ab 100644
--- a/jstests/sharding/listshards.js
+++ b/jstests/sharding/listshards.js
@@ -2,66 +2,69 @@
// Test the listShards command by adding stand-alone and replica-set shards to a cluster
//
(function() {
- 'use strict';
+'use strict';
- function checkShardName(shardName, shardsArray) {
- var found = false;
- shardsArray.forEach(function(shardObj) {
- if (shardObj._id === shardName) {
- found = true;
- return;
- }
- });
- return found;
- }
+function checkShardName(shardName, shardsArray) {
+ var found = false;
+ shardsArray.forEach(function(shardObj) {
+ if (shardObj._id === shardName) {
+ found = true;
+ return;
+ }
+ });
+ return found;
+}
- var shardTest = new ShardingTest('listShardsTest', 1, 0, 1, { useHostname: true });
+var shardTest = new ShardingTest({ name: 'listShardsTest',
+ shards: 1,
+ mongos: 1,
+ other: { useHostname: true } });
- var mongos = shardTest.s0;
- var res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- var shardsArray = res.shards;
- assert.eq(shardsArray.length, 1);
+var mongos = shardTest.s0;
+var res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+var shardsArray = res.shards;
+assert.eq(shardsArray.length, 1);
- // add standalone mongod
- var standaloneShard = MongoRunner.runMongod({useHostName: true});
- res = shardTest.admin.runCommand({ addShard: standaloneShard.host, name: 'standalone' });
- assert.commandWorked(res, 'addShard command failed');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 2);
- assert(checkShardName('standalone', shardsArray),
- 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
+// add standalone mongod
+var standaloneShard = MongoRunner.runMongod({useHostName: true});
+res = shardTest.admin.runCommand({ addShard: standaloneShard.host, name: 'standalone' });
+assert.commandWorked(res, 'addShard command failed');
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 2);
+assert(checkShardName('standalone', shardsArray),
+ 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
- // add replica set named 'repl'
- var rs1 = new ReplSetTest({ name: 'repl', nodes: 1, useHostName: true});
- rs1.startSet();
- rs1.initiate();
- res = shardTest.admin.runCommand({ addShard: rs1.getURL()});
- assert.commandWorked(res, 'addShard command failed');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 3);
- assert(checkShardName('repl', shardsArray),
- 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
+// add replica set named 'repl'
+var rs1 = new ReplSetTest({ name: 'repl', nodes: 1, useHostName: true});
+rs1.startSet();
+rs1.initiate();
+res = shardTest.admin.runCommand({ addShard: rs1.getURL()});
+assert.commandWorked(res, 'addShard command failed');
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 3);
+assert(checkShardName('repl', shardsArray),
+ 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
- // remove 'repl' shard
- assert.soon(function() {
- var res = shardTest.admin.runCommand({ removeShard: 'repl' });
- assert.commandWorked(res, 'removeShard command failed');
- return res.state === 'completed';
- }, 'failed to remove the replica set shard');
+// remove 'repl' shard
+assert.soon(function() {
+ var res = shardTest.admin.runCommand({ removeShard: 'repl' });
+ assert.commandWorked(res, 'removeShard command failed');
+ return res.state === 'completed';
+}, 'failed to remove the replica set shard');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 2);
- assert(!checkShardName('repl', shardsArray),
- 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 2);
+assert(!checkShardName('repl', shardsArray),
+ 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
- rs1.stopSet();
- shardTest.stop();
+rs1.stopSet();
+shardTest.stop();
})();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index bd3f4e9de0b..534c8476565 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -2,7 +2,6 @@
var s = new ShardingTest({ name: "migrateBig",
shards: 2,
- mongos: 1,
other: { chunkSize: 1 } });
s.config.settings.update( { _id: "balancer" }, { $set : { _waitForDelete : true } } , true);
@@ -58,7 +57,6 @@ for ( i=0; i<20; i+= 2 ) {
db.printShardingStatus()
-
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false } } , true );
assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 )
diff --git a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js b/jstests/sharding/migrateBig_balancer.js
index cfa91a6ec77..a46614a3699 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrateBigObject.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -1,4 +1,8 @@
-var st = new ShardingTest({numShards: 2, nopreallocj: "", enableBalancer: true});
+(function() {
+
+var st = new ShardingTest({ name: 'migrateBig_balancer',
+ shards: 2,
+ other: { enableBalancer: true } });
var mongos = st.s;
var admin = mongos.getDB("admin");
@@ -34,8 +38,7 @@ admin.runCommand({ shardcollection : "" + coll, key : { _id : 1 } })
assert.lt( 5 , mongos.getDB( "config" ).chunks.find( { ns : "test.stuff" } ).count() , "not enough chunks" );
assert.soon(
- function(){
-
+ function() {
// On *extremely* slow or variable systems, we've seen migrations fail in the critical section and
// kill the server. Do an explicit check for this. SERVER-8781
// TODO: Remove once we can better specify what systems to run what tests on.
@@ -61,3 +64,5 @@ assert.soon(
"never migrated" , 10 * 60 * 1000 , 1000 );
st.stop();
+
+})();
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index 1c001a6a76b..a0423381fc9 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -1,6 +1,9 @@
// Tests whether new sharding is detected on insert by mongos
+(function() {
-var st = new ShardingTest( name = "test", shards = 1, verbose = 2, mongos = 2 );
+var st = new ShardingTest({ name: "mongos_no_detect_sharding",
+ shards: 1,
+ mongos: 2 });
var mongos = st.s
var config = mongos.getDB("config")
@@ -40,4 +43,6 @@ config.printShardingStatus( true )
assert.eq( coll.getShardVersion().ok, 1 )
assert.eq( 101, coll.find().itcount() )
-st.stop()
+st.stop();
+
+})();
diff --git a/jstests/sharding/mongos_no_replica_set_refresh.js b/jstests/sharding/mongos_no_replica_set_refresh.js
index 6dbb54b44b2..fb853189cf0 100644
--- a/jstests/sharding/mongos_no_replica_set_refresh.js
+++ b/jstests/sharding/mongos_no_replica_set_refresh.js
@@ -1,21 +1,20 @@
// Tests whether new sharding is detected on insert by mongos
load("jstests/replsets/rslib.js");
+
(function () {
-var st = new ShardingTest(
- name = "test",
- shards = 1,
- verbose = 2,
- mongos = 2,
- other = {
- rs0: {
- nodes: [
- {rsConfig: {priority: 10}},
- {},
- {},
- ],
- },
- }
-);
+
+var st = new ShardingTest({ name: 'mongos_no_replica_set_refresh',
+ shards: 1,
+ mongos: 2,
+ other: {
+ rs0: {
+ nodes: [
+ {rsConfig: {priority: 10}},
+ {},
+ {},
+ ],
+ }
+ } });
var rsObj = st._rs[0].test;
assert.commandWorked(
@@ -94,4 +93,5 @@ assert.soon( function(){ return configServerURL().indexOf( removedNode.host ) >=
jsTestLog( "Done..." );
st.stop();
+
}());
diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js
index cf1bf612085..c8ea6d490ad 100644
--- a/jstests/sharding/mrShardedOutputAuth.js
+++ b/jstests/sharding/mrShardedOutputAuth.js
@@ -4,6 +4,8 @@
* from a separate input database while authenticated to both.
*/
+(function() {
+
function doMapReduce(connection, outputDb) {
// clean output db and run m/r
outputDb.numbers_out.drop();
@@ -43,12 +45,10 @@ function assertFailure(configDb, outputDb) {
}
-var st = new ShardingTest( testName = "mrShardedOutputAuth",
- numShards = 1,
- verboseLevel = 0,
- numMongos = 1,
- { extraOptions : {"keyFile" : "jstests/libs/key1"} }
- );
+var st = new ShardingTest({ name: "mrShardedOutputAuth",
+ shards: 1,
+ mongos: 1,
+ other: { extraOptions : {"keyFile" : "jstests/libs/key1"} } });
// Setup the users to the input, output and admin databases
var mongos = st.s;
@@ -94,3 +94,5 @@ doMapReduce(outputAuthConn, outputDb);
assertFailure(configDb, outputDb);
st.stop();
+
+})();
diff --git a/jstests/sharding/multi_coll_drop.js b/jstests/sharding/multi_coll_drop.js
index fc285f6a969..feb3cd41e60 100644
--- a/jstests/sharding/multi_coll_drop.js
+++ b/jstests/sharding/multi_coll_drop.js
@@ -1,14 +1,15 @@
// Tests the dropping and re-adding of a collection
+(function() {
-var st = new ShardingTest( name = "multidrop", shards = 1, verbose = 0, mongos = 2 )
+var st = new ShardingTest({ name: "multidrop", shards: 1, mongos: 2 });
-var mA = st.s0
-var mB = st.s1
+var mA = st.s0;
+var mB = st.s1;
-var coll = mA.getCollection( name + ".coll" )
-var collB = mB.getCollection( coll + "" )
+var coll = mA.getCollection('multidrop.coll');
+var collB = mB.getCollection('multidrop.coll');
-jsTestLog( "Shard and split collection..." )
+jsTestLog( "Shard and split collection..." );
var admin = mA.getDB( "admin" )
admin.runCommand({ enableSharding : coll.getDB() + "" })
@@ -40,6 +41,6 @@ collB.find().itcount()
jsTestLog( "Done." )
-st.stop()
-
+st.stop();
+})();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index bad6afd3a54..92c9993ecdd 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -26,7 +26,7 @@ res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing" , find : { _i
assert.eq(1 , res.ok, tojson(res));
-s1.setBalancer( true )
+s1.startBalancer();
printjson( s2.adminCommand( {"getShardVersion" : "test.existing" } ) )
printjson( new Mongo(s1.getServer( "test" ).name).getDB( "admin" ).adminCommand( {"getShardVersion" : "test.existing" } ) )
@@ -54,7 +54,7 @@ s1.getDB('test').existing3.insert({_id:1})
assert.eq(1, s1.getDB('test').existing3.count({_id:1}));
assert.eq(1, s2.getDB('test').existing3.count({_id:1}));
-s1.stopBalancer()
+s1.stopBalancer();
s2.adminCommand( { shardcollection : "test.existing3" , key : { _id : 1 } } );
assert.commandWorked(s2.adminCommand({ split: "test.existing3", middle: { _id: 5 }}));
@@ -62,7 +62,7 @@ assert.commandWorked(s2.adminCommand({ split: "test.existing3", middle: { _id: 5
res = s1.getDB( "admin" ).runCommand( { moveChunk: "test.existing3" , find : { _id : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
assert.eq(1 , res.ok, tojson(res));
-s1.setBalancer( true )
+s1.startBalancer();
s1.stop();
diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js
index 17e98f82b30..5b30dc436c4 100644
--- a/jstests/sharding/names.js
+++ b/jstests/sharding/names.js
@@ -1,6 +1,10 @@
// Test that having replica set names the same as the names of other shards works fine
+(function() {
-var st = new ShardingTest( name = "test", shards = 0, verbose = 2, mongos = 2, other = { rs : true } )
+var st = new ShardingTest({ name: "HostNames",
+ shards: 0,
+ mongos: 2,
+ other: { rs : true } });
var rsA = new ReplSetTest({ nodes : 2, name : "rsA" })
var rsB = new ReplSetTest({ nodes : 2, name : "rsB" })
@@ -41,4 +45,6 @@ assert.eq(2, config.shards.count(), "Error re-adding a shard");
assert.eq(rsB.getURL(), config.shards.findOne({_id:rsA.name})["host"], "Wrong host for shard rsA 3");
assert.eq(rsA.getURL(), config.shards.findOne({_id:rsB.name})["host"], "Wrong host for shard rsB 3");
-st.stop() \ No newline at end of file
+st.stop();
+
+})();
diff --git a/jstests/sharding/noUpdateButN1inAnotherCollection.js b/jstests/sharding/noUpdateButN1inAnotherCollection.js
index 1bfdb33a0d7..73b6dd8a9ec 100644
--- a/jstests/sharding/noUpdateButN1inAnotherCollection.js
+++ b/jstests/sharding/noUpdateButN1inAnotherCollection.js
@@ -6,7 +6,7 @@ function debug( str ) {
var name = "badNonUpdate";
debug("Starting sharded cluster test stuff");
-s = new ShardingTest( {name: name, shards : 2, mongos : 2, verbose:5, nopreallocj : true });
+var s = new ShardingTest({name: name, shards : 2, mongos : 2, verbose: 5 });
var mongosA=s.s0;
var mongosB=s.s1;
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index e4c2b462851..af0bdcc8d5b 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -20,7 +20,8 @@ for (var i=0; i<N; i+=(N/12)) {
to: "shard000" + Math.floor(Math.random() * numShards)});
}
-s.setBalancer( true )
+s.startBalancer();
+
var bulk = db.foo.initializeUnorderedBulkOp();
for ( i=0; i<N; i++ )
bulk.insert({ _id: i });
diff --git a/jstests/sharding/read_does_not_create_namespaces.js b/jstests/sharding/read_does_not_create_namespaces.js
index d07280fc0d3..db3c098c0fc 100644
--- a/jstests/sharding/read_does_not_create_namespaces.js
+++ b/jstests/sharding/read_does_not_create_namespaces.js
@@ -1,6 +1,8 @@
// This test ensures that just attempting to read from a non-existent database or collection won't
// cause entries to be created in the catalog.
-var shardingTest = new ShardingTest('read_does_not_create_namespaces', 1);
+(function() {
+
+var shardingTest = new ShardingTest({ name: 'read_does_not_create_namespaces', shards: 1 });
var db = shardingTest.getDB('NonExistentDB');
assert.isnull(db.nonExistentColl.findOne({}));
@@ -9,4 +11,6 @@ assert.isnull(db.nonExistentColl.findOne({}));
assert.isnull(shardingTest.getDB('config').databases.findOne({ _id: 'NonExistentDB' }));
assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
-shardingTest.stop(); \ No newline at end of file
+shardingTest.stop();
+
+})();
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index b446c80918c..c4efc5bd666 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -38,7 +38,7 @@ coll.save({ _id : 1, b : "b", date : new Date() });
print("2: shard collection");
-shardTest.shardGo(coll, /* shardBy */ { _id : 1 }, /* splitAt */ { _id : 0 });
+shardTest.shardColl(coll, /* shardBy */ { _id : 1 }, /* splitAt */ { _id : 0 });
print("3: test normal and slaveOk queries");
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index fedf2ed72b8..abae4f19eb7 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,9 +1,9 @@
-// shard3.js
+(function() {
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: {enableBalancer: true}});
+var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: { enableBalancer: true }});
s2 = s._mongos[1];
@@ -17,11 +17,11 @@ if (s.configRS) {
}
assert( sh.getBalancerState() , "A1" )
-sh.setBalancerState( false )
+sh.setBalancerState(false);
assert( ! sh.getBalancerState() , "A2" )
-sh.setBalancerState( true )
+sh.setBalancerState(true);
assert( sh.getBalancerState() , "A3" )
-sh.setBalancerState( false )
+sh.setBalancerState(false);
assert( ! sh.getBalancerState() , "A4" )
s.config.databases.find().forEach( printjson )
@@ -173,3 +173,5 @@ y = dbb.foo.stats()
printjson( y )
s.stop();
+
+})();
diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js
index 98840c0c3ac..1189e4e6cf3 100644
--- a/jstests/sharding/shard_targeting.js
+++ b/jstests/sharding/shard_targeting.js
@@ -17,7 +17,7 @@ var res;
//
// Shard key is the same with command name.
-s.shardGo("foo", {count: 1}, { count: "" })
+s.shardColl("foo", {count: 1}, { count: "" })
for (var i=0; i<50; i++) {
db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
@@ -40,7 +40,7 @@ assert.eq(res.n, 100);
//
db.foo.drop();
// Shard key is the same with command name.
-s.shardGo("foo", {mapReduce: 1}, { mapReduce: "" })
+s.shardColl("foo", {mapReduce: 1}, { mapReduce: "" })
for (var i=0; i<50; i++) {
db.foo.insert({mapReduce: i}); // to the chunk including number
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index a239ad88c01..8d8e4a35ef7 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -1,4 +1,5 @@
-// test to make sure that tag ranges get split
+// Test to make sure that tag ranges get split
+(function() {
var s = new ShardingTest({ name: "tag_auto_split",
shards: 2,
@@ -28,7 +29,10 @@ printjson( sh.status() );
s.stop();
//test without full shard key on tags
-s = new ShardingTest( "tag_auto_split2", 2, 0, 1, { nopreallocj : true, enableBalancer : true } );
+s = new ShardingTest({ name: "tag_auto_split2",
+ shards: 2,
+ mongos: 1,
+ other: { enableBalancer : true } });
db = s.getDB( "test" );
@@ -66,3 +70,5 @@ assert.eq( 1, s.config.chunks.find( {min : {_id : 10 , a : MinKey} } ).count(),
"bad chunk range boundary" );
s.stop();
+
+})();