summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-22 11:18:50 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-23 09:50:35 -0400
commit3db9d1338c4d15b9b937516676645fd26d5f0a23 (patch)
tree639f8de38537f721aeeeb4c2eb804e9212428d65
parentc48ff0ba613fdafd51d26f664371522837809a9d (diff)
downloadmongo-3db9d1338c4d15b9b937516676645fd26d5f0a23.tar.gz
SERVER-21009 Remove usages of the multi-argument ShardingTest constructor
No functional changes, just converting everything to use the JSON-based constructor. Also moves some sharding-specific tests out of noPassthroughWithMongod and under the sharding suite.
-rw-r--r--jstests/aggregation/mongos_slaveok.js39
-rw-r--r--jstests/aggregation/testshard1.js11
-rw-r--r--jstests/gle/updated_existing.js2
-rw-r--r--jstests/noPassthroughWithMongod/balance_repl.js2
-rw-r--r--jstests/noPassthroughWithMongod/bulk_shard_insert.js2
-rw-r--r--jstests/noPassthroughWithMongod/replReads.js2
-rw-r--r--jstests/noPassthroughWithMongod/sharding_rs1.js2
-rw-r--r--jstests/sharding/addshard2.js10
-rw-r--r--jstests/sharding/addshard4.js12
-rw-r--r--jstests/sharding/auth2.js2
-rw-r--r--jstests/sharding/authConnectionHook.js2
-rw-r--r--jstests/sharding/authmr.js13
-rw-r--r--jstests/sharding/authwhere.js13
-rw-r--r--jstests/sharding/auto1.js10
-rw-r--r--jstests/sharding/auto2.js11
-rw-r--r--jstests/sharding/autosplit_heuristics.js (renamed from jstests/noPassthroughWithMongod/autosplit_heuristics.js)0
-rw-r--r--jstests/sharding/balance_tags1.js40
-rw-r--r--jstests/sharding/balance_tags2.js8
-rw-r--r--jstests/sharding/bouncing_count.js9
-rw-r--r--jstests/sharding/count1.js14
-rw-r--r--jstests/sharding/count2.js15
-rw-r--r--jstests/sharding/cursor1.js10
-rw-r--r--jstests/sharding/diffservers1.js6
-rw-r--r--jstests/sharding/disallow_mongos_add_as_shard.js (renamed from jstests/sharding/addshard3.js)2
-rw-r--r--jstests/sharding/features1.js9
-rw-r--r--jstests/sharding/features2.js10
-rw-r--r--jstests/sharding/findandmodify1.js6
-rw-r--r--jstests/sharding/findandmodify2.js2
-rw-r--r--jstests/sharding/index1.js11
-rw-r--r--jstests/sharding/jumbo1.js11
-rw-r--r--jstests/sharding/key_many.js15
-rw-r--r--jstests/sharding/key_string.js10
-rw-r--r--jstests/sharding/large_chunk.js (renamed from jstests/noPassthroughWithMongod/large_chunk.js)20
-rw-r--r--jstests/sharding/limit_push.js11
-rw-r--r--jstests/sharding/mapReduce_inSharded.js2
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js2
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js2
-rw-r--r--jstests/sharding/mapReduce_outSharded.js2
-rw-r--r--jstests/sharding/migrateBig.js13
-rw-r--r--jstests/sharding/movePrimary1.js5
-rw-r--r--jstests/sharding/movechunk_with_def_paranoia.js2
-rw-r--r--jstests/sharding/movechunk_with_moveParanoia.js2
-rw-r--r--jstests/sharding/movechunk_with_noMoveParanoia.js2
-rw-r--r--jstests/sharding/mrShardedOutput.js2
-rw-r--r--jstests/sharding/multi_mongos2.js8
-rw-r--r--jstests/sharding/multi_mongos2a.js10
-rw-r--r--jstests/sharding/presplit.js10
-rw-r--r--jstests/sharding/remove1.js6
-rw-r--r--jstests/sharding/shard_existing.js10
-rw-r--r--jstests/sharding/shard_keycount.js11
-rw-r--r--jstests/sharding/shard_targeting.js10
-rw-r--r--jstests/sharding/shard_with_special_db_names.js5
-rw-r--r--jstests/sharding/sharding_balance1.js (renamed from jstests/noPassthroughWithMongod/sharding_balance1.js)11
-rw-r--r--jstests/sharding/sharding_balance2.js (renamed from jstests/noPassthroughWithMongod/sharding_balance2.js)2
-rw-r--r--jstests/sharding/sharding_balance3.js (renamed from jstests/noPassthroughWithMongod/sharding_balance3.js)14
-rw-r--r--jstests/sharding/sharding_balance4.js (renamed from jstests/noPassthroughWithMongod/sharding_balance4.js)14
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js (renamed from jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js)17
-rw-r--r--jstests/sharding/sharding_multiple_ns_rs.js (renamed from jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js)11
-rw-r--r--jstests/sharding/sharding_rs2.js (renamed from jstests/noPassthroughWithMongod/sharding_rs2.js)9
-rw-r--r--jstests/sharding/sort1.js8
-rw-r--r--jstests/sharding/split_with_force.js2
-rw-r--r--jstests/sharding/split_with_force_small.js2
-rw-r--r--jstests/sharding/stats.js13
-rw-r--r--jstests/sharding/sync_cluster_config/parallel.js10
-rw-r--r--jstests/sharding/sync_cluster_config/sync2.js10
-rw-r--r--jstests/sharding/sync_cluster_config/sync7.js7
-rw-r--r--jstests/sharding/sync_cluster_config/sync_conn_cmd.js7
-rw-r--r--jstests/sharding/tag_auto_split.js5
-rw-r--r--jstests/sharding/tag_range.js4
-rw-r--r--jstests/sharding/top_chunk_autosplit.js6
-rw-r--r--jstests/sharding/update_sharded.js8
-rw-r--r--jstests/sharding/user_flags_sharded.js7
-rw-r--r--jstests/sharding/version1.js6
-rw-r--r--jstests/sharding/version2.js7
-rw-r--r--jstests/sharding/zbigMapReduce.js2
-rw-r--r--jstests/slow1/sharding_multiple_collections.js11
-rw-r--r--jstests/tool/dumprestore9.js19
-rw-r--r--jstests/tool/gridfs.js2
78 files changed, 424 insertions, 236 deletions
diff --git a/jstests/aggregation/mongos_slaveok.js b/jstests/aggregation/mongos_slaveok.js
index c356a0e7dc5..57ce168456e 100644
--- a/jstests/aggregation/mongos_slaveok.js
+++ b/jstests/aggregation/mongos_slaveok.js
@@ -2,39 +2,42 @@
* Tests aggregate command against mongos with slaveOk. For more tests on read preference,
* please refer to jstests/sharding/read_pref_cmd.js.
*/
+(function() {
var NODES = 2;
var doTest = function(st, doSharded) {
-var testDB = st.s.getDB('test');
+ var testDB = st.s.getDB('test');
-if (doSharded) {
- testDB.adminCommand({ enableSharding: 'test' });
- testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
-}
+ if (doSharded) {
+ testDB.adminCommand({ enableSharding: 'test' });
+ testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+ }
-testDB.user.insert({ x: 10 }, { writeConcern: { w: NODES }});
-testDB.setSlaveOk(true);
+ testDB.user.insert({ x: 10 }, { writeConcern: { w: NODES }});
+ testDB.setSlaveOk(true);
-var secNode = st.rs0.getSecondary();
-secNode.getDB('test').setProfilingLevel(2);
+ var secNode = st.rs0.getSecondary();
+ secNode.getDB('test').setProfilingLevel(2);
-// wait for mongos to recognize that the slave is up
-ReplSetTest.awaitRSClientHosts(st.s, secNode, {ok: true });
+ // wait for mongos to recognize that the slave is up
+ ReplSetTest.awaitRSClientHosts(st.s, secNode, {ok: true });
-var res = testDB.runCommand({ aggregate: 'user', pipeline: [{ $project: { x: 1 }}]});
-assert(res.ok, 'aggregate command failed: ' + tojson(res));
+ var res = testDB.runCommand({ aggregate: 'user', pipeline: [{ $project: { x: 1 }}]});
+ assert(res.ok, 'aggregate command failed: ' + tojson(res));
-var profileQuery = { op: 'command', ns: 'test.user', 'command.aggregate': 'user' };
-var profileDoc = secNode.getDB('test').system.profile.findOne(profileQuery);
+ var profileQuery = { op: 'command', ns: 'test.user', 'command.aggregate': 'user' };
+ var profileDoc = secNode.getDB('test').system.profile.findOne(profileQuery);
-assert(profileDoc != null);
-testDB.dropDatabase();
+ assert(profileDoc != null);
+ testDB.dropDatabase();
};
-var st = new ShardingTest({ shards: { rs0: { oplogSize: 10, verbose: 1, nodes: NODES }}});
+var st = new ShardingTest({ shards: { rs0: { oplogSize: 10, verbose: 1, nodes: NODES } } });
doTest(st, false);
doTest(st, true);
st.stop();
+
+})();
diff --git a/jstests/aggregation/testshard1.js b/jstests/aggregation/testshard1.js
index ab49ad5653a..512099f1509 100644
--- a/jstests/aggregation/testshard1.js
+++ b/jstests/aggregation/testshard1.js
@@ -17,12 +17,11 @@ function aggregateNoOrder(coll, pipeline) {
jsTestLog("Creating sharded cluster");
var shardedAggTest = new ShardingTest({
- shards: 2,
- verbose: 2,
- mongos: 1,
- other: { chunksize : 1, enableBalancer: true }
- }
-);
+ shards: 2,
+ mongos: 1,
+ verbose: 2,
+ other: { chunkSize: 1, enableBalancer: true }
+ });
jsTestLog("Setting up sharded cluster");
shardedAggTest.adminCommand( { enablesharding : "aggShard" } );
diff --git a/jstests/gle/updated_existing.js b/jstests/gle/updated_existing.js
index bd03c535099..5e9891ccf85 100644
--- a/jstests/gle/updated_existing.js
+++ b/jstests/gle/updated_existing.js
@@ -3,7 +3,7 @@
* an upsert is not missing when autosplit takes place.
*/
-var st = new ShardingTest({ shards : 1, mongos : 1, verbose : 1, chunksize : 1 });
+var st = new ShardingTest({ shards : 1, mongos : 1, verbose : 1, chunkSize: 1 });
var testDB = st.getDB("test");
var coll = "foo";
diff --git a/jstests/noPassthroughWithMongod/balance_repl.js b/jstests/noPassthroughWithMongod/balance_repl.js
index a2ab6cab8b7..f54b391a7e3 100644
--- a/jstests/noPassthroughWithMongod/balance_repl.js
+++ b/jstests/noPassthroughWithMongod/balance_repl.js
@@ -1,6 +1,6 @@
(function() {
"use strict";
-var otherOptions = { rs: true , numReplicas: 2 , chunksize: 1 , nopreallocj: true };
+var otherOptions = { rs: true , numReplicas: 2 , chunkSize: 1 , nopreallocj: true };
var s = new ShardingTest({ shards: 2, verbose: 1, other: otherOptions });
assert.writeOK(s.config.settings.update({ _id: "balancer" },
{ $set: { stopped: true }}, true ));
diff --git a/jstests/noPassthroughWithMongod/bulk_shard_insert.js b/jstests/noPassthroughWithMongod/bulk_shard_insert.js
index 068a75b7820..4ce7f555f36 100644
--- a/jstests/noPassthroughWithMongod/bulk_shard_insert.js
+++ b/jstests/noPassthroughWithMongod/bulk_shard_insert.js
@@ -8,7 +8,7 @@ Random.srand( seed )
print( "Seeded with " + seed )
-var st = new ShardingTest({ name : jsTestName(), shards : 4, chunksize : 1 })
+var st = new ShardingTest({ name : jsTestName(), shards : 4, chunkSize: 1 })
// Turn off balancer initially
st.setBalancer( false )
diff --git a/jstests/noPassthroughWithMongod/replReads.js b/jstests/noPassthroughWithMongod/replReads.js
index d665e9967fe..09b09277011 100644
--- a/jstests/noPassthroughWithMongod/replReads.js
+++ b/jstests/noPassthroughWithMongod/replReads.js
@@ -3,7 +3,7 @@
function testReadLoadBalancing(numReplicas) {
var s = new ShardingTest({ shards: { rs0: { nodes: numReplicas }},
- verbose: 2, other: { chunksize: 1 }});
+ verbose: 2, other: { chunkSize: 1 }});
s.adminCommand({enablesharding : "test"})
s.config.settings.find().forEach(printjson)
diff --git a/jstests/noPassthroughWithMongod/sharding_rs1.js b/jstests/noPassthroughWithMongod/sharding_rs1.js
index 4c93b43dbfa..c85fa2a8e54 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs1.js
+++ b/jstests/noPassthroughWithMongod/sharding_rs1.js
@@ -1,7 +1,7 @@
// tests sharding with replica sets
var s = new ShardingTest({ shards: 3,
- other: { rs: true , chunksize: 1, enableBalancer: true }});
+ other: { rs: true , chunkSize: 1, enableBalancer: true }});
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'test-rs0');
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 3799a4fd46a..e06c1bb990f 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -1,5 +1,10 @@
+(function() {
+
// Don't start any shards, yet
-var s = new ShardingTest("add_shard2", 1, 0, 1, {useHostname : true});
+var s = new ShardingTest({name: "add_shard2",
+ shards: 1,
+ mongos: 1,
+ other: {useHostname : true} });
// Start two new instances, which will be used for shards
var conn1 = MongoRunner.runMongod({useHostname: true});
@@ -110,8 +115,11 @@ assert(!wRes.hasWriteError() && wRes.nInserted === 1,
assert.commandFailed(s.admin.runCommand({addshard: rs5.getURL()}));
s.stop();
+
rs1.stopSet();
rs2.stopSet();
rs3.stopSet();
rs4.stopSet();
rs5.stopSet();
+
+})();
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index 885227a1d7c..aa4ccbfed19 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -1,6 +1,10 @@
-// a replica set's passive nodes should be okay to add as part of a shard config
+// A replica set's passive nodes should be okay to add as part of a shard config
+(function() {
-s = new ShardingTest( "addshard4", 2 , 0 , 1 , {useHostname : true});
+var s = new ShardingTest({ name: "addshard4",
+ shards: 2,
+ mongos: 1,
+ other: {useHostname : true} });
var r = new ReplSetTest({name: "addshard4", nodes: 3});
r.startSet();
@@ -50,3 +54,7 @@ result = s.adminCommand({"addshard" : "addshard42/"+config.members[2].host});
printjson(result);
assert.eq(result, true);
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/auth2.js b/jstests/sharding/auth2.js
index 8aaca73379f..25e7a0144c2 100644
--- a/jstests/sharding/auth2.js
+++ b/jstests/sharding/auth2.js
@@ -1,4 +1,4 @@
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunksize : 1, verbose : 2,
+var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize: 1, verbose : 2,
other : { nopreallocj : 1, verbose : 2, useHostname : true,
configOptions : { verbose : 2 }}});
diff --git a/jstests/sharding/authConnectionHook.js b/jstests/sharding/authConnectionHook.js
index e13c04dc73c..aa8d4d9d9d9 100644
--- a/jstests/sharding/authConnectionHook.js
+++ b/jstests/sharding/authConnectionHook.js
@@ -1,5 +1,5 @@
// Test for SERVER-8786 - if the first operation on an authenticated shard is moveChunk, it breaks the cluster.
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunksize : 1, verbose : 2,
+var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize: 1, verbose : 2,
other : { nopreallocj : 1, verbose : 2, useHostname : true,
configOptions : { verbose : 2 }}});
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 31a1bcf18f9..a94a013e4dc 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -1,6 +1,8 @@
// Verify that a user with read and write access to database "test" cannot access database "test2"
// via a mapper, reducer or finalizer.
+(function() {
+
//
// User document declarations. All users in this test are added to the admin database.
//
@@ -28,8 +30,13 @@ function assertInsert(collection, obj) {
assert.writeOK(collection.insert(obj));
}
-var cluster = new ShardingTest("authwhere", 1, 0, 1,
- { extraOptions: { keyFile: "jstests/libs/key1" } });
+var cluster = new ShardingTest({ name: "authmr",
+ shards: 1,
+ mongos: 1,
+ other: {
+ extraOptions: { keyFile: "jstests/libs/key1" }
+ }
+ });
// Set up the test data.
(function() {
@@ -109,3 +116,5 @@ assert.throws(function() {
adminDB.logout();
}
}());
+
+})();
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 9516499580b..37dbbeca5bb 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -1,6 +1,8 @@
// Verify that a user with read access to database "test" cannot access database "test2" via a where
// clause.
+(function() {
+
//
// User document declarations. All users in this test are added to the admin database.
//
@@ -28,8 +30,13 @@ function assertInsert(collection, obj) {
assert.writeOK(collection.insert(obj));
}
-var cluster = new ShardingTest("authwhere", 1, 0, 1,
- { extraOptions: { keyFile: "jstests/libs/key1" } });
+var cluster = new ShardingTest({ name: "authwhere",
+ shards: 1,
+ mongos: 1,
+ other: {
+ extraOptions: { keyFile: "jstests/libs/key1" }
+ }
+ });
// Set up the test data.
(function() {
@@ -76,3 +83,5 @@ var cluster = new ShardingTest("authwhere", 1, 0, 1,
adminDB.logout();
}
}());
+
+})();
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index 70249c85c8d..433352a288b 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -1,6 +1,10 @@
-// auto1.js
+(function() {
-s = new ShardingTest( "auto1" , 2 , 1 , 1, { enableBalancer : 1 } );
+var s = new ShardingTest({ name: "auto1",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { enableBalancer : 1 } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -72,3 +76,5 @@ print( counts )
printjson( db.stats() )
s.stop();
+
+})();
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index b0ce66292d5..0fc17a55124 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -1,6 +1,9 @@
-// auto2.js
+(function() {
-s = new ShardingTest( "auto2" , 2 , 1 , 2 );
+var s = new ShardingTest({ name: "auto2",
+ shards: 2,
+ mongos: 2,
+ verbose: 1 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -83,8 +86,6 @@ for ( i=0; i<j*100; i++ ){
}
}
-
-
s.printChangeLog();
print( "missing: " + tojson( missing ) )
@@ -147,3 +148,5 @@ assert.throws( function(){ s.getDB( "test" ).foo.find().sort( { s : 1 } ).forEac
print( "checkpoint G")
s.stop();
+
+})();
diff --git a/jstests/noPassthroughWithMongod/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
index 6618dd329a5..6618dd329a5 100644
--- a/jstests/noPassthroughWithMongod/autosplit_heuristics.js
+++ b/jstests/sharding/autosplit_heuristics.js
diff --git a/jstests/sharding/balance_tags1.js b/jstests/sharding/balance_tags1.js
index b7bff5ad213..052260b47e9 100644
--- a/jstests/sharding/balance_tags1.js
+++ b/jstests/sharding/balance_tags1.js
@@ -1,11 +1,15 @@
// Test balancing all chunks off of one shard
-var s = new ShardingTest("balance_tags1", 3, 1, 1, { sync:true, chunksize : 1, nopreallocj : true });
-s.config.settings.update({ _id: "balancer" }, { $set: { stopped: false }}, true);
+var st = new ShardingTest({ name: "balance_tags1",
+ shards: 3,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1,
+ enableBalancer : true } });
-s.adminCommand({ enablesharding: "test" });
-s.ensurePrimaryShard('test', 'shard0001');
+st.adminCommand({ enablesharding: "test" });
+st.ensurePrimaryShard('test', 'shard0001');
-var db = s.getDB("test");
+var db = st.getDB("test");
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < 21; i++) {
@@ -13,21 +17,21 @@ for (i = 0; i < 21; i++) {
}
assert.writeOK(bulk.execute());
-sh.shardCollection("test.foo", { _id : 1 });
+assert.commandWorked(st.s.adminCommand({ shardCollection: 'test.foo', key: { _id : 1 } }));
-sh.stopBalancer();
+st.stopBalancer();
for (i = 0; i < 20; i++) {
- s.adminCommand({ split : "test.foo", middle : { _id : i } });
+ st.adminCommand({ split : "test.foo", middle : { _id : i } });
}
-sh.startBalancer();
+st.startBalancer();
-sh.status(true);
+st.printShardingStatus();
// Wait for the initial balance to happen
assert.soon(function() {
- var counts = s.chunkCounts("foo");
+ var counts = st.chunkCounts("foo");
printjson(counts);
return counts["shard0000"] == 7 &&
counts["shard0001"] == 7 &&
@@ -39,28 +43,28 @@ assert.soon(function() {
// Quick test of some shell helpers and setting up state
sh.addShardTag("shard0000", "a");
-assert.eq([ "a" ] , s.config.shards.findOne({ _id : "shard0000" }).tags);
+assert.eq([ "a" ] , st.config.shards.findOne({ _id : "shard0000" }).tags);
sh.addShardTag("shard0000", "b");
-assert.eq([ "a" , "b" ], s.config.shards.findOne({ _id : "shard0000" }).tags);
+assert.eq([ "a" , "b" ], st.config.shards.findOne({ _id : "shard0000" }).tags);
sh.removeShardTag("shard0000", "b");
-assert.eq([ "a" ], s.config.shards.findOne( { _id : "shard0000" } ).tags);
+assert.eq([ "a" ], st.config.shards.findOne( { _id : "shard0000" } ).tags);
sh.addShardTag("shard0001" , "a");
sh.addTagRange("test.foo" , { _id : -1 } , { _id : 1000 } , "a");
-sh.status( true );
+st.printShardingStatus();
// At this point, everything should drain off shard 2, which does not have the tag
assert.soon(function() {
- var counts = s.chunkCounts("foo");
+ var counts = st.chunkCounts("foo");
printjson(counts);
return counts["shard0002"] == 0;
},
"balance 2 didn't happen",
1000 * 60 * 10 , 1000);
-printjson(sh.status());
+st.printShardingStatus();
-s.stop();
+st.stop();
diff --git a/jstests/sharding/balance_tags2.js b/jstests/sharding/balance_tags2.js
index d6b817c3820..6b584a907d7 100644
--- a/jstests/sharding/balance_tags2.js
+++ b/jstests/sharding/balance_tags2.js
@@ -1,6 +1,10 @@
// Test balancing all chunks to one shard by tagging the full shard-key range on that collection
-var s = new ShardingTest("balance_tags2", 3, 1, 1, { sync:true, chunksize : 1, nopreallocj : true });
-s.config.settings.update({ _id: "balancer" }, { $set: { stopped: false }}, true);
+var s = new ShardingTest({ name: "balance_tags2",
+ shards: 3,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1,
+ enableBalancer : true } });
s.adminCommand({ enablesharding: "test" });
s.ensurePrimaryShard('test', 'shard0001');
diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js
index c5f22f0b170..f6fc6bfc063 100644
--- a/jstests/sharding/bouncing_count.js
+++ b/jstests/sharding/bouncing_count.js
@@ -1,6 +1,9 @@
// Tests whether new sharding is detected on insert by mongos
+(function() {
-var st = new ShardingTest( name = "test", shards = 10, verbose = 0, mongos = 3 )
+var st = new ShardingTest({ name: "test",
+ shards: 10,
+ mongos: 3 });
var mongosA = st.s0
var mongosB = st.s1
@@ -47,4 +50,6 @@ jsTestLog( "Running count!" )
printjson( collB.count() )
printjson( collC.find().toArray() )
-st.stop()
+st.stop();
+
+})();
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index 19d70456d1f..486bf40080b 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -1,13 +1,7 @@
-// count1.js
+(function() {
-s = new ShardingTest( "count1" , 2 , 1 );
-db = s.getDB( "test" );
-
-// Stop balancer since doing manual stuff
-// Make sure we totally stop here, otherwise balancing round can intermittently slip by
-// Counts during balancing are only approximate (as of 7/28/12).
-// If we fix that, we should write a test for it elsewhere
-s.stopBalancer();
+var s = new ShardingTest({ name: "count1", shards: 2 });
+var db = s.getDB( "test" );
// ************** Test Set #1 *************
// Basic counts on "bar" collections, not yet sharded
@@ -177,3 +171,5 @@ assert( ! negSkipLimitResult.ok , "negative skip value with limit shouldn't work
assert( negSkipLimitResult.errmsg.length > 0 , "no error msg for negative skip" );
s.stop();
+
+})();
diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js
index 7c84c415646..7361359791d 100644
--- a/jstests/sharding/count2.js
+++ b/jstests/sharding/count2.js
@@ -1,15 +1,16 @@
-// count2.js
+(function() {
-s1 = new ShardingTest( "count2" , 2 , 1 , 2 );
-s2 = s1._mongos[1];
-s1.stopBalancer();
+var s1 = new ShardingTest({ name: "count2",
+ shards: 2,
+ mongos: 2 });
+var s2 = s1._mongos[1];
s1.adminCommand( { enablesharding: "test" } );
s1.ensurePrimaryShard('test', 'shard0001');
s1.adminCommand( { shardcollection: "test.foo" , key : { name : 1 } } );
-db1 = s1.getDB( "test" ).foo;
-db2 = s2.getDB( "test" ).foo;
+var db1 = s1.getDB( "test" ).foo;
+var db2 = s2.getDB( "test" ).foo;
assert.eq( 1, s1.config.chunks.count(), "sanity check A");
@@ -48,3 +49,5 @@ assert.eq( 6, db2.find().limit( 0 ).count( true ));
assert.eq( 6, db2.getDB().runCommand({ count: db2.getName(), limit: 0 }).n );
s1.stop();
+
+})();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index e7583ba9b99..7c83b79d742 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -1,10 +1,8 @@
// cursor1.js
// checks that cursors survive a chunk's move
+(function() {
-s = new ShardingTest( "sharding_cursor1" , 2 , 2 )
-
-// take the balancer out of the equation
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+var s = new ShardingTest({ name: "sharding_cursor1", shards: 2 });
s.config.settings.find().forEach( printjson )
// create a sharded 'test.foo', for the moment with just one chunk
@@ -63,4 +61,6 @@ assert.throws( function(){ cur.next(); } , null , "T5" )
after = db.serverStatus().metrics.cursor;
gc(); gc()
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index f2c16664398..b485d508b43 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -1,4 +1,6 @@
-var s = new ShardingTest( "diffservers1" , 2 );
+(function() {
+
+var s = new ShardingTest({ name: "diffservers1", shards: 2 });
assert.eq( 2 , s.config.shards.count() , "server count wrong" );
assert.eq( 0 , s._shardServers[0].getDB( "config" ).shards.count() , "shouldn't be here" );
@@ -11,6 +13,7 @@ test1.save( { a : 3 } );
assert( 3 , test1.count() );
assert( ! s.admin.runCommand( { addshard: "sdd$%" } ).ok , "bad hostname" );
+
var portWithoutHostRunning = allocatePort();
assert(!s.admin.runCommand({addshard: "127.0.0.1:" + portWithoutHostRunning}).ok, "host not up");
assert(!s.admin.runCommand({ addshard: "10.0.0.1:" + portWithoutHostRunning}).ok,
@@ -18,3 +21,4 @@ assert(!s.admin.runCommand({ addshard: "10.0.0.1:" + portWithoutHostRunning}).ok
s.stop();
+})();
diff --git a/jstests/sharding/addshard3.js b/jstests/sharding/disallow_mongos_add_as_shard.js
index f8d43587fc0..524715bde25 100644
--- a/jstests/sharding/addshard3.js
+++ b/jstests/sharding/disallow_mongos_add_as_shard.js
@@ -1,6 +1,6 @@
(function() {
-var st = new ShardingTest("add_shard3", 1);
+var st = new ShardingTest({ name: "add_shard3", shards: 1 });
var result = st.admin.runCommand({addshard: st.s.host});
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 22fed89fef8..e5f88b907be 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -1,8 +1,6 @@
-// features1.js
+(function() {
-s = new ShardingTest( "features1" , 2 , 1 , 1 );
-
-s.stopBalancer();
+var s = new ShardingTest({ name: "features1", shards: 2, mongos: 1 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -197,5 +195,6 @@ r = db.getMongo().getDBs()
assert.eq( 3 , r.databases.length , "listDatabases 1 : " + tojson( r ) )
assert.eq( "number", typeof(r.totalSize) , "listDatabases 2 : " + tojson( r ) );
-s.stop()
+s.stop();
+})();
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index a3e0ae777f3..80a06ae6ba9 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -1,10 +1,6 @@
-// features2.js
+(function() {
-s = new ShardingTest( "features2" , 2 , 1 , 1 );
-
-// The counts and the tests for "on-num-shards" only works for previous assumptions in balancer
-// behavior and assumes migrations do not occur during count() commands.
-s.stopBalancer()
+var s = new ShardingTest({ name: "features2", shards: 2, mongos: 1 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -202,3 +198,5 @@ delete im2.localTime;
assert.eq( isMaster, im2 );
s.stop();
+
+})();
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index bc15a40f48d..a144eceed72 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -1,4 +1,6 @@
-s = new ShardingTest( "find_and_modify_sharded" , 2 , 2);
+(function() {
+
+var s = new ShardingTest({ name: "find_and_modify_sharded", shards: 2 });
s.adminCommand( { enablesharding : "test" } );
db = s.getDB( "test" );
@@ -58,3 +60,5 @@ for (var i=0; i < numObjs; i++){
}
s.stop();
+
+})();
diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js
index 189838d76d3..2ce2988c470 100644
--- a/jstests/sharding/findandmodify2.js
+++ b/jstests/sharding/findandmodify2.js
@@ -1,4 +1,4 @@
-var s = new ShardingTest({ name: "find_and_modify_sharded_2", shards: 2, verbose: 2, mongos: 1, other: { chunksize: 1 }});
+var s = new ShardingTest({ name: "find_and_modify_sharded_2", shards: 2, verbose: 2, mongos: 1, other: { chunkSize: 1 }});
s.adminCommand( { enablesharding : "test" } );
var db = s.getDB( "test" );
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index 77ed3ba7444..57092b3a566 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -1,10 +1,7 @@
-/**
- * @tags : [ hashed ]
- */
+// SERVER-2326 - make sure that sharding only works with unique indices
+(function() {
-// from server 2326 - make sure that sharding only works with unique indices
-
-s = new ShardingTest( "shard_index", 2, 0, 1 )
+var s = new ShardingTest({ name: "shard_index", shards: 2, mongos: 1 });
// Regenerate fully because of SERVER-2782
for ( var i = 0; i < 22; i++ ) {
@@ -390,3 +387,5 @@ for ( var i = 0; i < 22; i++ ) {
}
s.stop();
+
+})();
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index b8882f632ea..b55c41bbeb9 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -1,6 +1,9 @@
-// jump1.js
+(function() {
-s = new ShardingTest( "jump1" , 2 /* numShards */, 2 /* verboseLevel */, 1 /* numMongos */, { chunksize : 1 } )
+var s = new ShardingTest({ name: "jumbo1",
+ shards: 2,
+ mongos: 1,
+ other: { chunkSize: 1 } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -51,4 +54,6 @@ assert.soon( function(){
} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index a458d5a9284..1b512d646a4 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -1,8 +1,7 @@
-// key_many.js
+(function() {
-// values have to be sorted
-// you must have exactly 6 values in each array
-types = [
+// Values have to be sorted - you must have exactly 6 values in each array
+var types = [
{ name : "string" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield: "k" } ,
{ name : "double" , values : [ 1.2 , 3.5 , 4.5 , 4.6 , 6.7 , 9.9 ] , keyfield : "a" } ,
{ name : "date" , values : [ new Date( 1000000 ) , new Date( 2000000 ) , new Date( 3000000 ) , new Date( 4000000 ) , new Date( 5000000 ) , new Date( 6000000 ) ] , keyfield : "a" } ,
@@ -15,8 +14,7 @@ types = [
{ name : "oid_other" , values : [ ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() ] , keyfield : "o" } ,
]
-s = new ShardingTest( "key_many" , 2 );
-s.setBalancer( false )
+var s = new ShardingTest({ name: "key_many", shards: 2 });
s.adminCommand( { enablesharding : "test" } )
db = s.getDB( "test" );
@@ -73,8 +71,6 @@ function getKey( o ){
return o;
}
-
-
for ( var i=0; i<types.length; i++ ){
curT = types[i]; //global
@@ -156,7 +152,6 @@ for ( var i=0; i<types.length; i++ ){
// TODO remove
}
-
s.stop();
-
+})();
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index 7434b75e52f..4c365fdf0d8 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -1,9 +1,7 @@
-// key_string.js
+(function() {
-s = new ShardingTest( "keystring" , 2 );
-s.stopBalancer();
+var s = new ShardingTest({ name: "keystring", shards: 2 });
-db = s.getDB( "test" );
s.adminCommand( { enablesharding : "test" } )
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
@@ -13,6 +11,8 @@ seconday = s.getOther( primary ).getDB( "test" );
assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
+var db = s.getDB( "test" );
+
db.foo.save( { name : "eliot" } )
db.foo.save( { name : "sara" } )
db.foo.save( { name : "bob" } )
@@ -48,4 +48,4 @@ assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { nam
s.stop();
-
+})();
diff --git a/jstests/noPassthroughWithMongod/large_chunk.js b/jstests/sharding/large_chunk.js
index 13183896ae8..7506f03e14a 100644
--- a/jstests/noPassthroughWithMongod/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -1,13 +1,17 @@
// Where we test operations dealing with large chunks
+(function() {
// Starts a new sharding environment limiting the chunksize to 1GB (highest value allowed).
// Note that early splitting will start with a 1/4 of max size currently.
-var s = new ShardingTest({ name: 'large_chunk', shards: 2, verbose: 2,
- other: { chunkSize: 1024 }});
+var s = new ShardingTest({ name: 'large_chunk',
+ shards: 2,
+ verbose: 2,
+ other: { chunkSize: 1024 } });
// take the balancer out of the equation
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
-s.config.settings.find().forEach( printjson )
+s.config.settings.find().forEach(printjson);
+
db = s.getDB( "test" );
//
@@ -41,8 +45,12 @@ secondary = s.getOther( primary ).getDB( "test" );
// Make sure that we don't move that chunk if it goes past what we consider the maximum chunk size
print("Checkpoint 1a")
max = 200 * 1024 * 1024;
-moveChunkCmd = { movechunk : "test.foo" , find : { _id : 1 } , to : secondary.getMongo().name , maxChunkSizeBytes : max };
-assert.throws( function() { s.adminCommand( moveChunkCmd ); } );
+assert.throws(function() {
+ s.adminCommand({ movechunk: "test.foo",
+ find: { _id: 1 },
+ to: secondary.getMongo().name,
+ maxChunkSizeBytes: max });
+ });
// Move the chunk
print("checkpoint 1b");
@@ -54,3 +62,5 @@ assert.neq( before[0].shard , after[0].shard , "move chunk did not work" );
s.config.changelog.find().forEach( printjson )
s.stop();
+
+})();
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index 5aa9bd5bee0..53acb6583eb 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -1,12 +1,9 @@
// This test is to ensure that limit() clauses are pushed down to the shards and evaluated
// See: http://jira.mongodb.org/browse/SERVER-1896
+(function() {
-s = new ShardingTest( "limit_push", 2, 1, 1 );
-
-// Stop balancer since we do manual moves.
-s.stopBalancer();
-
-db = s.getDB( "test" );
+var s = new ShardingTest({ name: "limit_push", shards: 2, mongos: 1 });
+var db = s.getDB( "test" );
// Create some data
for (i=0; i < 100; i++) { db.limit_push.insert({ _id : i, x: i}); }
@@ -50,3 +47,5 @@ for (var j in execStages.shards) {
}
s.stop();
+
+})();
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
index ae35861fb5a..4fffeed9e1e 100644
--- a/jstests/sharding/mapReduce_inSharded.js
+++ b/jstests/sharding/mapReduce_inSharded.js
@@ -6,7 +6,7 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
}
-var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunksize : 1 } });
+var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunkSize: 1 } });
st.startBalancer();
st.adminCommand( { enablesharding : "mrShard" } )
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index 69174f2589d..f93acae31a3 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -6,7 +6,7 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
}
-var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunksize : 1 } });
+var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunkSize: 1 } });
st.startBalancer();
st.adminCommand( { enablesharding : "mrShard" } )
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
index cd1437b83a5..acf9e20319a 100644
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ b/jstests/sharding/mapReduce_nonSharded.js
@@ -6,7 +6,7 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
}
-var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunksize : 1 } });
+var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunkSize: 1 } });
st.startBalancer();
st.adminCommand( { enablesharding : "mrShard" } )
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index a42eb166e65..331e8a52a4d 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -6,7 +6,7 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
}
-var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunksize : 1 } });
+var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunkSize: 1 } });
st.startBalancer();
st.adminCommand( { enablesharding : "mrShard" } )
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 424eddb9042..bd3f4e9de0b 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -1,6 +1,11 @@
+(function() {
-s = new ShardingTest( "migrateBig" , 2 , 0 , 1 , { chunksize : 1 } );
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped : true, _waitForDelete : true } } , true );
+var s = new ShardingTest({ name: "migrateBig",
+ shards: 2,
+ mongos: 1,
+ other: { chunkSize: 1 } });
+
+s.config.settings.update( { _id: "balancer" }, { $set : { _waitForDelete : true } } , true);
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
@@ -62,4 +67,6 @@ assert.soon( function(){ return !s.isAnyBalanceInFlight(); } );
assert.eq( coll.count() , coll.find().itcount() );
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
index 7e764efc507..50cc1ccacca 100644
--- a/jstests/sharding/movePrimary1.js
+++ b/jstests/sharding/movePrimary1.js
@@ -1,6 +1,6 @@
+(function() {
-
-s = new ShardingTest( "movePrimary1" , 2 );
+var s = new ShardingTest({ name: "movePrimary1", shards: 2 });
initDB = function( name ){
var db = s.getDB( name );
@@ -49,3 +49,4 @@ assert.eq(res.code, 70, 'ShardNotFound code not used');
s.stop();
+})();
diff --git a/jstests/sharding/movechunk_with_def_paranoia.js b/jstests/sharding/movechunk_with_def_paranoia.js
index c2cd70df970..f689eb072d5 100644
--- a/jstests/sharding/movechunk_with_def_paranoia.js
+++ b/jstests/sharding/movechunk_with_def_paranoia.js
@@ -3,7 +3,7 @@
/**
* This test checks that the moveChunk directory is not created
*/
-var st = new ShardingTest( { shards:2, mongos:1 , other : { chunksize : 1 }});
+var st = new ShardingTest( { shards:2, mongos:1 , other : { chunkSize: 1 }});
load("jstests/sharding/movechunk_include.js")
setupMoveChunkTest(st);
diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js
index 4091792d27f..a87969a8737 100644
--- a/jstests/sharding/movechunk_with_moveParanoia.js
+++ b/jstests/sharding/movechunk_with_moveParanoia.js
@@ -6,7 +6,7 @@
var st = new ShardingTest( { shards: 2,
mongos:1,
other : {
- chunksize : 1,
+ chunkSize: 1,
shardOptions: { moveParanoia:"" }}});
load("jstests/sharding/movechunk_include.js")
diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js
index 1844528b225..b08f4a61ff5 100644
--- a/jstests/sharding/movechunk_with_noMoveParanoia.js
+++ b/jstests/sharding/movechunk_with_noMoveParanoia.js
@@ -6,7 +6,7 @@
var st = new ShardingTest( { shards: 2,
mongos:1,
other : {
- chunksize : 1,
+ chunkSize: 1,
shardOptions: { noMoveParanoia:"" }}});
load("jstests/sharding/movechunk_include.js")
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index 79dce25ce18..b30c45b8aa2 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -4,7 +4,7 @@
// collection input twice the size of the first and outputs it to the new sharded
// collection created in the first pass.
-var st = new ShardingTest({ shards: 2, verbose: 1, other: { chunksize : 1 }});
+var st = new ShardingTest({ shards: 2, verbose: 1, other: { chunkSize: 1 }});
st.stopBalancer();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index bcb66fc79e3..bad6afd3a54 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -1,9 +1,7 @@
-// multi_mongos2.js
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
+(function() {
-
-// setup sharding with two mongos, s1 and s2
-s1 = new ShardingTest( "multi_mongos1" , 2 , 1 , 2 );
+var s1 = new ShardingTest({ name: "multi_mongos1", shards: 2, mongos: 2 });
s2 = s1._mongos[1];
s1.adminCommand( { enablesharding : "test" } );
@@ -67,3 +65,5 @@ assert.eq(1 , res.ok, tojson(res));
s1.setBalancer( true )
s1.stop();
+
+})();
diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js
index 75583f9cd91..712e3cc9dd1 100644
--- a/jstests/sharding/multi_mongos2a.js
+++ b/jstests/sharding/multi_mongos2a.js
@@ -1,9 +1,9 @@
-// multi_mongos2.js
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
+(function() {
-
-// setup sharding with two mongos, s1 and s2
-s1 = new ShardingTest( "multi_mongos1" , 2 , 1 , 2 );
+var s1 = new ShardingTest({ name: "multi_mongos2a",
+ shards: 2,
+ mongos: 2 });
s2 = s1._mongos[1];
s1.adminCommand( { enablesharding : "test" } );
@@ -30,3 +30,5 @@ assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
assert.eq(1, s2.getDB('test').existing.count({_id:1}));
s1.stop();
+
+})();
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index 894ea473a79..5a4a69cc5b3 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -1,7 +1,9 @@
-// presplit.js
+(function() {
-// Starts a new sharding environment limiting the chunksize to 1MB.
-s = new ShardingTest( "presplit" , 2 , 2 , 1 , { chunksize : 1 } );
+var s = new ShardingTest({ name: "presplit",
+ shards: 2,
+ mongos: 1,
+ other: { chunkSize : 1 } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -37,3 +39,5 @@ assert.eq( num , primary.foo.count() );
s.printChangeLog();
s.stop();
+
+})();
diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js
index 19d911d9fb4..0143e49dc1b 100644
--- a/jstests/sharding/remove1.js
+++ b/jstests/sharding/remove1.js
@@ -1,4 +1,6 @@
-s = new ShardingTest( "remove_shard1", 2 );
+(function() {
+
+var s = new ShardingTest({ name: "remove_shard1", shards: 2 });
assert.eq( 2, s.config.shards.count() , "initial server count wrong" );
@@ -23,3 +25,5 @@ assert.eq( 2, s.config.shards.count(), "new server does not appear in count" );
MongoRunner.stopMongod(conn);
s.stop();
+
+})();
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index f2dca5a7b7a..6283a0868bf 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -1,4 +1,10 @@
-s = new ShardingTest( "shard_existing" , 2 /* numShards */, 1 /* verboseLevel */, 1 /* numMongos */, { chunksize : 1 } )
+(function() {
+
+var s = new ShardingTest({ name: "shard_existing",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1 } });
db = s.getDB( "test" )
@@ -33,3 +39,5 @@ var guess = Math.ceil(dataSize / (512*1024 + avgObjSize));
assert( Math.abs( numChunks - guess ) < 2, "not right number of chunks" );
s.stop();
+
+})();
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 408774785c8..5702b59dc84 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -1,9 +1,10 @@
// Tests splitting a chunk twice
+(function() {
-s = new ShardingTest( "shard_keycount" , 2, 0, 1, /* chunkSize */1);
-
-// Kill balancer
-s.config.settings.update({ _id: "balancer" }, { $set : { stopped: true } }, true )
+var s = new ShardingTest({ name: "shard_keycount",
+ shards: 2,
+ mongos: 1,
+ other:{ chunkSize: 1 } });
dbName = "test"
collName = "foo"
@@ -45,3 +46,5 @@ coll.update({ _id : 3 }, { _id : 3 })
s.adminCommand({ split : ns, find : { _id : 3 } })
s.stop();
+
+});
diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js
index 6a2634f40a0..98840c0c3ac 100644
--- a/jstests/sharding/shard_targeting.js
+++ b/jstests/sharding/shard_targeting.js
@@ -3,11 +3,11 @@
// If the optional query is not given, mongos will wrongly use the command
// BSONObj itself as the query to target shards, which could return wrong
// shards if the shard key happens to be one of the fields in the command object.
+(function() {
-var s = new ShardingTest("shard_targeting", 2, 0, 1);
+var s = new ShardingTest({ name: "shard_targeting", shards: 2 });
s.adminCommand({ enablesharding : "test" });
s.ensurePrimaryShard('test', 'shard0001');
-s.stopBalancer();
var db = s.getDB("test");
var res;
@@ -25,7 +25,7 @@ for (var i=0; i<50; i++) {
}
var theOtherShard = s.getOther( s.getServer( "test" ) ).name;
-printShardingStatus();
+s.printShardingStatus();
// Count documents on both shards
@@ -47,7 +47,7 @@ for (var i=0; i<50; i++) {
db.foo.insert({mapReduce: "" + i}); // to the chunk including string
}
-printShardingStatus();
+s.printShardingStatus();
function m() { emit("total", 1); }
function r(k, v) { return Array.sum(v); }
@@ -63,3 +63,5 @@ res = db.foo.runCommand(
assert.eq(res.results[0].value, 100);
s.stop();
+
+})();
diff --git a/jstests/sharding/shard_with_special_db_names.js b/jstests/sharding/shard_with_special_db_names.js
index 2887f364743..cb1ae66a04c 100644
--- a/jstests/sharding/shard_with_special_db_names.js
+++ b/jstests/sharding/shard_with_special_db_names.js
@@ -1,6 +1,8 @@
(function(){
-var s = new ShardingTest( "shard_with_special_db_names", 2, 0, 2 );
+var s = new ShardingTest({ name: "shard_with_special_db_names",
+ shards: 2,
+ mongos: 2 });
var specialDB = "[a-z]+";
var specialNS = specialDB + ".special";
@@ -26,4 +28,3 @@ assert.eq( cursor.count(), 1 );
assert( cursor.next()["dropped"] );
})();
-
diff --git a/jstests/noPassthroughWithMongod/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index e7897d6e87f..e577511e571 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -1,7 +1,10 @@
-// sharding_balance1.js
+(function() {
-
-s = new ShardingTest( "slow_sharding_balance1" , 2 , 1 , 1 , { chunksize : 1, enableBalancer : true } )
+var s = new ShardingTest({ name: "slow_sharding_balance1",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1, enableBalancer : true } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -56,3 +59,5 @@ assert.soon( function(){
} , "removeshard didn't happen" , 1000 * 60 * 3 , 5000 );
s.stop();
+
+})();
diff --git a/jstests/noPassthroughWithMongod/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index 9aee92bf3bf..74cf0be1fa9 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -7,7 +7,7 @@
var MaxSizeMB = 1;
-var s = new ShardingTest({ shards: 2, other: { chunksize: 1, manualAddShard: true }});
+var s = new ShardingTest({ shards: 2, other: { chunkSize: 1, manualAddShard: true }});
var db = s.getDB( "test" );
s.stopBalancer();
diff --git a/jstests/noPassthroughWithMongod/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index fb34a09e24d..f42dec42a57 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -1,8 +1,12 @@
-// sharding_balance3.js
+// Simple test to make sure things get balanced
-// simple test to make sure things get balanced
+(function() {
-s = new ShardingTest( "slow_sharding_balance3" , 2 , 3 , 1 , { chunksize : 1, enableBalancer : true } );
+var s = new ShardingTest({ name: "slow_sharding_balance3",
+ shards: 2,
+ mongos: 1,
+ verbose: 2,
+ other: { chunkSize: 1, enableBalancer : true } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -56,4 +60,6 @@ assert.repeat( function(){
return d != currDiff;
} , "balance with stopped flag should not have happened" , 1000 * 60 , 5000 );
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/noPassthroughWithMongod/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 9ce404d9f95..3560dac9a6b 100644
--- a/jstests/noPassthroughWithMongod/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -1,9 +1,11 @@
-// sharding_balance4.js
+// Check that doing updates done during a migrate all go to the right place
+(function() {
-// check that doing updates done during a migrate all go to the right place
-
-s = new ShardingTest( "slow_sharding_balance4" , 2 , 1 , 1 , { chunksize : 1 } )
-s.stopBalancer();
+var s = new ShardingTest({ name: "slow_sharding_balance4",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1 } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -134,3 +136,5 @@ assert.soon( function(){
} , "balance didn't happen" , 1000 * 60 * 20 , 1 );
s.stop();
+
+})();
diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
index 93422301de2..fa8668ae2bb 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrate_cursor1.js
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -1,10 +1,13 @@
// SERVER-2068
+(function() {
-chunksize = 25
+var chunkSize = 25
-s = new ShardingTest( "migrate_cursor1" , 2 , 1 , 1 , { chunksize : chunksize } );
-
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+var s = new ShardingTest({ name: "migrate_cursor1",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize : chunkSize } });
s.adminCommand( { enablesharding : "test" } );
db = s.getDB( "test" )
@@ -18,7 +21,7 @@ while ( bigString.length < stringSize )
bigString += "asdasdas";
stringSize = bigString.length
-docsPerChunk = Math.ceil( ( chunksize * 1024 * 1024 ) / ( stringSize - 12 ) )
+docsPerChunk = Math.ceil( ( chunkSize * 1024 * 1024 ) / ( stringSize - 12 ) )
numChunks = 5
numDocs = 20 * docsPerChunk
@@ -74,4 +77,6 @@ join();
// Use itcount() to ignore orphan docments.
assert.eq( numDocs , t.find().itcount() , "at end 2" )
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js b/jstests/sharding/sharding_multiple_ns_rs.js
index 4f1e986097c..1bb121c544f 100644
--- a/jstests/noPassthroughWithMongod/sharding_multiple_ns_rs.js
+++ b/jstests/sharding/sharding_multiple_ns_rs.js
@@ -1,5 +1,10 @@
+(function() {
-s = new ShardingTest( "blah" , 1 /* numShards */, 1 /* verboseLevel */, 1 /* numMongos */, { rs : true , chunksize : 1 } )
+var s = new ShardingTest({ name: "Sharding multiple ns",
+ shards: 1,
+ mongos: 1,
+ verbose: 1,
+ other: { rs : true , chunkSize: 1 } });
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
@@ -26,9 +31,7 @@ assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
-
s._rs[0].test.awaitReplication();
-
s._rs[0].test.stopMaster(15);
// Wait for the primary to come back online...
@@ -50,6 +53,6 @@ assert.eq( 5 , yetagain.getDB( "test" ).foo.findOne( { _id : 5 } ).x )
assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
-
s.stop();
+})();
diff --git a/jstests/noPassthroughWithMongod/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index 117b180a6b5..c439eeb6b17 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -1,7 +1,9 @@
// mostly for testing mongos w/replica sets
+(function() {
-var s = new ShardingTest({ shards: { rs0: { nodes: 2 }, rs1: { nodes: 2 }},
- verbose: 1, chunkSize: 1 });
+var s = new ShardingTest({ shards: { rs0: { nodes: 2 }, rs1: { nodes: 2 } },
+ verbose: 1,
+ chunkSize: 1 });
db = s.getDB( "test" )
t = db.foo
@@ -215,5 +217,6 @@ for ( i=0; i<10; i++ ) {
printjson( db.adminCommand( "getShardMap" ) );
+s.stop();
-s.stop()
+})();
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index f2a682a82d2..2d32a539c35 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -1,6 +1,8 @@
+(function() {
-s = new ShardingTest( "sort1" , 2 , 0 , 2 )
-s.stopBalancer();
+var s = new ShardingTest({ name: "sort1",
+ shards: 2,
+ mongos: 2 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -98,3 +100,5 @@ assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0, 'sub.num':1 } ) , "D
assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0, 'sub.num':1 } ) , "D12" )
s.stop();
+
+})();
diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js
index 4307fa9e64b..117d17361e0 100644
--- a/jstests/sharding/split_with_force.js
+++ b/jstests/sharding/split_with_force.js
@@ -2,7 +2,7 @@
// Tests autosplit locations with force : true
//
-var options = { chunksize : 1, // MB
+var options = { chunkSize: 1, // MB
mongosOptions : { noAutoSplit : "" }
};
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 8b313590a52..86fb4667132 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -2,7 +2,7 @@
// Tests autosplit locations with force : true, for small collections
//
-var options = { chunksize : 1, // MB
+var options = { chunkSize: 1, // MB
mongosOptions : { noAutoSplit : "" }
};
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index 52ec40556d7..de08c73d83d 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -1,4 +1,11 @@
-s = new ShardingTest( "stats" , 2 , 1 , 1, { enableBalancer : 1 } );
+(function () {
+
+var s = new ShardingTest({ name: "stats",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { enableBalancer: true } });
+
s.adminCommand( { enablesharding : "test" } );
a = s._connections[0].getDB( "test" );
@@ -188,4 +195,6 @@ collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
}());
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/sync_cluster_config/parallel.js b/jstests/sharding/sync_cluster_config/parallel.js
index be2aab23d17..facc29ea361 100644
--- a/jstests/sharding/sync_cluster_config/parallel.js
+++ b/jstests/sharding/sync_cluster_config/parallel.js
@@ -1,7 +1,11 @@
// This test fails when run with authentication because benchRun with auth is broken: SERVER-6388
-numShards = 3
-s = new ShardingTest( "parallel" , numShards , 2 , 2 , { sync : true } );
-s.setBalancer( false )
+var numShards = 3
+
+var s = new ShardingTest({ name: "parallel",
+ shards: numShards,
+ mongos: 2,
+ verbose: 1,
+ other: { sync : true } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
diff --git a/jstests/sharding/sync_cluster_config/sync2.js b/jstests/sharding/sync_cluster_config/sync2.js
index de4ea6b2ddc..4b94eb03fd9 100644
--- a/jstests/sharding/sync_cluster_config/sync2.js
+++ b/jstests/sharding/sync_cluster_config/sync2.js
@@ -1,7 +1,9 @@
-// sync2.js
+(function () {
-var s = new ShardingTest( "sync2" , 3 , 50 , 2 , { sync : true } );
-s.stopBalancer()
+var s = new ShardingTest({ name: "sync2",
+ shards: 3,
+ mongos: 2,
+ other: { sync : true } });
var s2 = s._mongos[1];
@@ -112,3 +114,5 @@ for (i = 1; i < hashes.length; i++) {
}
s.stop();
+
+})();
diff --git a/jstests/sharding/sync_cluster_config/sync7.js b/jstests/sharding/sync_cluster_config/sync7.js
index 33cf31bc899..25e95fdafc3 100644
--- a/jstests/sharding/sync_cluster_config/sync7.js
+++ b/jstests/sharding/sync_cluster_config/sync7.js
@@ -1,6 +1,9 @@
// Test that the clock skew of the distributed lock disallows getting locks for moving and splitting.
+(function() {
-s = new ShardingTest( "moveDistLock", 3, 0, undefined, { sync : true } );
+var s = new ShardingTest({ name: "moveDistLock",
+ shards: 3,
+ other: { sync : true } });
// Enable sharding on DB and collection before skewing the clocks
result = s.getDB("admin").runCommand( { enablesharding : "test1" } );
@@ -66,3 +69,5 @@ printjson(result);
assert.eq( result.ok, 1, "Move command should have succeeded again!" )
s.stop();
+
+})();
diff --git a/jstests/sharding/sync_cluster_config/sync_conn_cmd.js b/jstests/sharding/sync_cluster_config/sync_conn_cmd.js
index bbb9adda16a..5bf44cb5969 100644
--- a/jstests/sharding/sync_cluster_config/sync_conn_cmd.js
+++ b/jstests/sharding/sync_cluster_config/sync_conn_cmd.js
@@ -3,9 +3,11 @@
* Test SyncClusterConnection commands using call instead of findOne
*/
-// Note: count command uses call
+(function() {
-var st = new ShardingTest({ shards: [], other: { sync: true }});
+var st = new ShardingTest({ name: 'sync_conn_cmd',
+ shards: 0,
+ other: { sync: true }});
var configDB = st.config;
var coll = configDB.test;
@@ -58,3 +60,4 @@ testInvalidCount();
st.stop();
+})();
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index 5e2fe256619..a239ad88c01 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -1,6 +1,9 @@
// test to make sure that tag ranges get split
-s = new ShardingTest( "tag_auto_split", 2, 0, 1, { nopreallocj : true, enableBalancer : true } );
+var s = new ShardingTest({ name: "tag_auto_split",
+ shards: 2,
+ mongos: 1,
+ other: { enableBalancer : true } });
db = s.getDB( "test" );
diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js
index e934a0b01e9..897433001e2 100644
--- a/jstests/sharding/tag_range.js
+++ b/jstests/sharding/tag_range.js
@@ -4,7 +4,9 @@ function countTags( num, message ) {
assert.eq( s.config.tags.count() , num , message );
}
-s = new ShardingTest( "tag_range" , 2 , 0 , 1 , { nopreallocj : true } );
+var s = new ShardingTest({ name: "tag_range",
+ shards: 2,
+ mongos: 1 });
// this set up is not required but prevents warnings in the remove
db = s.getDB( "tag_range" );
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 0ebd8bf1a60..30040c7979e 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -99,7 +99,7 @@ function runTest(test) {
// Main
var dbName = "test";
var collName = "topchunk";
-var st = shardSetup({name: "topchunk", shards: 4, chunksize: 1}, dbName, collName);
+var st = shardSetup({name: "topchunk", shards: 4, chunkSize: 1}, dbName, collName);
var db = st.getDB(dbName);
var coll = db[collName];
var configDB = st.s.getDB('config');
@@ -243,7 +243,7 @@ for (var i = 0; i < tests.length; i++) {
st.stop();
// Single node shard Tests
-st = shardSetup({name: "singleNode", shards: 1, chunksize: 1}, dbName, collName);
+st = shardSetup({name: "singleNode", shards: 1, chunkSize: 1}, dbName, collName);
db = st.getDB(dbName);
coll = db[collName];
configDB = st.s.getDB('config');
@@ -276,7 +276,7 @@ st.stop();
// maxSize test
// To set maxSize, must manually add the shards
-st = shardSetup({name: "maxSize", shards: 2, chunksize: 1, other: {manualAddShard: true}},
+st = shardSetup({name: "maxSize", shards: 2, chunkSize: 1, other: {manualAddShard: true}},
dbName,
collName);
db = st.getDB(dbName);
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index 948781e6d66..805cda0c487 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -1,15 +1,18 @@
// Test simple updates issued through mongos. Updates have different constraints through mongos,
// since shard key is immutable.
+(function() {
-s = new ShardingTest( "auto1" , 2 , 1 , 1 );
+var s = new ShardingTest({ name: "auto1", shards: 2, mongos: 1 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
+
// repeat same tests with hashed shard key, to ensure identical behavior
s.adminCommand( { shardcollection : "test.update0" , key : { key : 1 } } );
s.adminCommand( { shardcollection : "test.update1" , key : { key : "hashed" } } );
db = s.getDB( "test" )
+
for(i=0; i < 2; i++){
coll = db.getCollection("update" + i);
@@ -96,5 +99,6 @@ for(i=0; i < 2; i++){
assert.writeOK(coll.update({_id : ObjectId(), 'key.x' : 1}, {$set : {x : 1}}, {multi : false}));
}
-s.stop()
+s.stop();
+})();
diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js
index e5b5f8a41dd..f2a8d626492 100644
--- a/jstests/sharding/user_flags_sharded.js
+++ b/jstests/sharding/user_flags_sharded.js
@@ -1,8 +1,8 @@
// Test that when user flags are set on a collection,
// then collection is sharded, flags get carried over.
+(function() {
if (jsTest.options().storageEngine === "mmapv1") {
-
// the dbname and collection we'll be working with
var dbname = "testDB";
var coll = "userFlagsColl";
@@ -39,7 +39,7 @@ if (jsTest.options().storageEngine === "mmapv1") {
assert.eq( collstats.userFlags , 0 , "modified collection should have userFlags = 0 ");
// start up a new sharded cluster, and add previous mongod
- var s = new ShardingTest( "user_flags", 1 );
+ var s = new ShardingTest({ name: "user_flags", shards: 1 });
assert( s.admin.runCommand( { addshard: newShardConn.host , name: "myShard" } ).ok,
"did not accept new shard" );
@@ -60,5 +60,6 @@ if (jsTest.options().storageEngine === "mmapv1") {
MongoRunner.stopMongod(newShardConn);
s.stop();
-
}
+
+})();
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index afe3f709fad..d3d317122b2 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -1,6 +1,6 @@
-// version1.js
+(function() {
-s = new ShardingTest( "version1" , 1 , 2 )
+var s = new ShardingTest({ name: "version1", shards: 1, verbose: 2 });
s.adminCommand( { enablesharding : "alleyinsider" } );
s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
@@ -69,3 +69,5 @@ assert( a.runCommand({ setShardVersion: "alleyinsider.foo",
// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get version B" );
s.stop();
+
+})();
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 5a375f89c21..ae6109ca141 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -1,6 +1,6 @@
-// version2.js
+(function() {
-s = new ShardingTest( "version2" , 1 , 2 )
+var s = new ShardingTest({ name: "version2", shards: 1, verbose: 2 });
s.adminCommand( { enablesharding : "alleyinsider" } );
s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
@@ -63,5 +63,6 @@ assert.throws( simpleFindOne , [] , "should complain about not in sharded mode 1
// simpleFindOne(); // newer version is ok
-
s.stop();
+
+})();
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index 79813ff06fd..9a63268fac2 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -7,7 +7,7 @@ function setupTest() {
mongos : 1,
other: { rs: true,
numReplicas: 2,
- chunksize : 1,
+ chunkSize: 1,
rsOptions: { oplogSize : 50 },
enableBalancer : 1
} } );
diff --git a/jstests/slow1/sharding_multiple_collections.js b/jstests/slow1/sharding_multiple_collections.js
index 922087ba2aa..ca13cbc3c40 100644
--- a/jstests/slow1/sharding_multiple_collections.js
+++ b/jstests/slow1/sharding_multiple_collections.js
@@ -1,6 +1,10 @@
-// multcollections.js
+(function() {
-s = new ShardingTest( "multcollections" , 2 , 1 , 1 , { chunksize : 1, enableBalancer : true } );
+var s = new ShardingTest({ name: "multcollections",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1, enableBalancer : true } });
s.adminCommand( { enablesharding : "test" } );
db = s.getDB( "test" )
@@ -52,5 +56,6 @@ while ( 1 ){
break
}
-s.stop()
+s.stop();
+})();
diff --git a/jstests/tool/dumprestore9.js b/jstests/tool/dumprestore9.js
index 8661452c4dd..5a36c54efd5 100644
--- a/jstests/tool/dumprestore9.js
+++ b/jstests/tool/dumprestore9.js
@@ -1,4 +1,8 @@
-if (0) { // Test disabled until SERVER-3853 is finished.
+// Test disabled until SERVER-3853 is finished
+if(0) {
+
+(function() {
+
var name = "dumprestore9";
function step(msg) {
msg = msg || "";
@@ -6,7 +10,10 @@ function step(msg) {
print('\n' + name + ".js step " + this.x + ' ' + msg);
}
-s = new ShardingTest( "dumprestore9a", 2, 0, 3, { chunksize : 1, enableBalancer : 1 } );
+var s = new ShardingTest({ name: "dumprestore9a",
+ shards: 2,
+ mongos: 3,
+ other: { chunkSize: 1, enableBalancer : 1 } });
step("Shard collection");
@@ -46,7 +53,10 @@ step("Shutting down cluster");
s.stop();
step("Starting up clean cluster");
-s = new ShardingTest( "dumprestore9b", 2, 0, 3, {chunksize:1} );
+s = new ShardingTest({ name: "dumprestore9b",
+ shards: 2,
+ mongos: 3,
+ other: {chunkSize:1} });
db = s.getDB( "aaa" );
coll = db.foo;
@@ -77,4 +87,7 @@ for (var i = 0; i < s._connections.length; i++) {
step("Stop cluster");
s.stop();
step("SUCCESS");
+
+})();
+
}
diff --git a/jstests/tool/gridfs.js b/jstests/tool/gridfs.js
index 42df1180c78..cea92c812f7 100644
--- a/jstests/tool/gridfs.js
+++ b/jstests/tool/gridfs.js
@@ -4,7 +4,7 @@ var test = new ShardingTest({shards: 3,
mongos: 1,
config: 1,
verbose: 2,
- other: {chunksize:1}})
+ other: {chunkSize:1}})
var mongos = test.s0