summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-22 11:18:50 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-23 09:50:35 -0400
commit3db9d1338c4d15b9b937516676645fd26d5f0a23 (patch)
tree639f8de38537f721aeeeb4c2eb804e9212428d65 /jstests/sharding
parentc48ff0ba613fdafd51d26f664371522837809a9d (diff)
downloadmongo-3db9d1338c4d15b9b937516676645fd26d5f0a23.tar.gz
SERVER-21009 Remove usages of the multi-argument ShardingTest constructor
No functional changes, just converting everything to use the JSON-based constructor. Also moves some sharding-specific tests out of noPassthroughWithMongod and under the sharding suite.
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/addshard2.js10
-rw-r--r--jstests/sharding/addshard4.js12
-rw-r--r--jstests/sharding/auth2.js2
-rw-r--r--jstests/sharding/authConnectionHook.js2
-rw-r--r--jstests/sharding/authmr.js13
-rw-r--r--jstests/sharding/authwhere.js13
-rw-r--r--jstests/sharding/auto1.js10
-rw-r--r--jstests/sharding/auto2.js11
-rw-r--r--jstests/sharding/autosplit_heuristics.js85
-rw-r--r--jstests/sharding/balance_tags1.js40
-rw-r--r--jstests/sharding/balance_tags2.js8
-rw-r--r--jstests/sharding/bouncing_count.js9
-rw-r--r--jstests/sharding/count1.js14
-rw-r--r--jstests/sharding/count2.js15
-rw-r--r--jstests/sharding/cursor1.js10
-rw-r--r--jstests/sharding/diffservers1.js6
-rw-r--r--jstests/sharding/disallow_mongos_add_as_shard.js (renamed from jstests/sharding/addshard3.js)2
-rw-r--r--jstests/sharding/features1.js9
-rw-r--r--jstests/sharding/features2.js10
-rw-r--r--jstests/sharding/findandmodify1.js6
-rw-r--r--jstests/sharding/findandmodify2.js2
-rw-r--r--jstests/sharding/index1.js11
-rw-r--r--jstests/sharding/jumbo1.js11
-rw-r--r--jstests/sharding/key_many.js15
-rw-r--r--jstests/sharding/key_string.js10
-rw-r--r--jstests/sharding/large_chunk.js66
-rw-r--r--jstests/sharding/limit_push.js11
-rw-r--r--jstests/sharding/mapReduce_inSharded.js2
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js2
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js2
-rw-r--r--jstests/sharding/mapReduce_outSharded.js2
-rw-r--r--jstests/sharding/migrateBig.js13
-rw-r--r--jstests/sharding/movePrimary1.js5
-rw-r--r--jstests/sharding/movechunk_with_def_paranoia.js2
-rw-r--r--jstests/sharding/movechunk_with_moveParanoia.js2
-rw-r--r--jstests/sharding/movechunk_with_noMoveParanoia.js2
-rw-r--r--jstests/sharding/mrShardedOutput.js2
-rw-r--r--jstests/sharding/multi_mongos2.js8
-rw-r--r--jstests/sharding/multi_mongos2a.js10
-rw-r--r--jstests/sharding/presplit.js10
-rw-r--r--jstests/sharding/remove1.js6
-rw-r--r--jstests/sharding/shard_existing.js10
-rw-r--r--jstests/sharding/shard_keycount.js11
-rw-r--r--jstests/sharding/shard_targeting.js10
-rw-r--r--jstests/sharding/shard_with_special_db_names.js5
-rw-r--r--jstests/sharding/sharding_balance1.js63
-rw-r--r--jstests/sharding/sharding_balance2.js82
-rw-r--r--jstests/sharding/sharding_balance3.js65
-rw-r--r--jstests/sharding/sharding_balance4.js140
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js82
-rw-r--r--jstests/sharding/sharding_multiple_ns_rs.js58
-rw-r--r--jstests/sharding/sharding_rs2.js222
-rw-r--r--jstests/sharding/sort1.js8
-rw-r--r--jstests/sharding/split_with_force.js2
-rw-r--r--jstests/sharding/split_with_force_small.js2
-rw-r--r--jstests/sharding/stats.js13
-rw-r--r--jstests/sharding/sync_cluster_config/parallel.js10
-rw-r--r--jstests/sharding/sync_cluster_config/sync2.js10
-rw-r--r--jstests/sharding/sync_cluster_config/sync7.js7
-rw-r--r--jstests/sharding/sync_cluster_config/sync_conn_cmd.js7
-rw-r--r--jstests/sharding/tag_auto_split.js5
-rw-r--r--jstests/sharding/tag_range.js4
-rw-r--r--jstests/sharding/top_chunk_autosplit.js6
-rw-r--r--jstests/sharding/update_sharded.js8
-rw-r--r--jstests/sharding/user_flags_sharded.js7
-rw-r--r--jstests/sharding/version1.js6
-rw-r--r--jstests/sharding/version2.js7
-rw-r--r--jstests/sharding/zbigMapReduce.js2
68 files changed, 1164 insertions, 169 deletions
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 3799a4fd46a..e06c1bb990f 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -1,5 +1,10 @@
+(function() {
+
// Don't start any shards, yet
-var s = new ShardingTest("add_shard2", 1, 0, 1, {useHostname : true});
+var s = new ShardingTest({name: "add_shard2",
+ shards: 1,
+ mongos: 1,
+ other: {useHostname : true} });
// Start two new instances, which will be used for shards
var conn1 = MongoRunner.runMongod({useHostname: true});
@@ -110,8 +115,11 @@ assert(!wRes.hasWriteError() && wRes.nInserted === 1,
assert.commandFailed(s.admin.runCommand({addshard: rs5.getURL()}));
s.stop();
+
rs1.stopSet();
rs2.stopSet();
rs3.stopSet();
rs4.stopSet();
rs5.stopSet();
+
+})();
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index 885227a1d7c..aa4ccbfed19 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -1,6 +1,10 @@
-// a replica set's passive nodes should be okay to add as part of a shard config
+// A replica set's passive nodes should be okay to add as part of a shard config
+(function() {
-s = new ShardingTest( "addshard4", 2 , 0 , 1 , {useHostname : true});
+var s = new ShardingTest({ name: "addshard4",
+ shards: 2,
+ mongos: 1,
+ other: {useHostname : true} });
var r = new ReplSetTest({name: "addshard4", nodes: 3});
r.startSet();
@@ -50,3 +54,7 @@ result = s.adminCommand({"addshard" : "addshard42/"+config.members[2].host});
printjson(result);
assert.eq(result, true);
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/auth2.js b/jstests/sharding/auth2.js
index 8aaca73379f..25e7a0144c2 100644
--- a/jstests/sharding/auth2.js
+++ b/jstests/sharding/auth2.js
@@ -1,4 +1,4 @@
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunksize : 1, verbose : 2,
+var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize: 1, verbose : 2,
other : { nopreallocj : 1, verbose : 2, useHostname : true,
configOptions : { verbose : 2 }}});
diff --git a/jstests/sharding/authConnectionHook.js b/jstests/sharding/authConnectionHook.js
index e13c04dc73c..aa8d4d9d9d9 100644
--- a/jstests/sharding/authConnectionHook.js
+++ b/jstests/sharding/authConnectionHook.js
@@ -1,5 +1,5 @@
// Test for SERVER-8786 - if the first operation on an authenticated shard is moveChunk, it breaks the cluster.
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunksize : 1, verbose : 2,
+var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize: 1, verbose : 2,
other : { nopreallocj : 1, verbose : 2, useHostname : true,
configOptions : { verbose : 2 }}});
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 31a1bcf18f9..a94a013e4dc 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -1,6 +1,8 @@
// Verify that a user with read and write access to database "test" cannot access database "test2"
// via a mapper, reducer or finalizer.
+(function() {
+
//
// User document declarations. All users in this test are added to the admin database.
//
@@ -28,8 +30,13 @@ function assertInsert(collection, obj) {
assert.writeOK(collection.insert(obj));
}
-var cluster = new ShardingTest("authwhere", 1, 0, 1,
- { extraOptions: { keyFile: "jstests/libs/key1" } });
+var cluster = new ShardingTest({ name: "authmr",
+ shards: 1,
+ mongos: 1,
+ other: {
+ extraOptions: { keyFile: "jstests/libs/key1" }
+ }
+ });
// Set up the test data.
(function() {
@@ -109,3 +116,5 @@ assert.throws(function() {
adminDB.logout();
}
}());
+
+})();
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 9516499580b..37dbbeca5bb 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -1,6 +1,8 @@
// Verify that a user with read access to database "test" cannot access database "test2" via a where
// clause.
+(function() {
+
//
// User document declarations. All users in this test are added to the admin database.
//
@@ -28,8 +30,13 @@ function assertInsert(collection, obj) {
assert.writeOK(collection.insert(obj));
}
-var cluster = new ShardingTest("authwhere", 1, 0, 1,
- { extraOptions: { keyFile: "jstests/libs/key1" } });
+var cluster = new ShardingTest({ name: "authwhere",
+ shards: 1,
+ mongos: 1,
+ other: {
+ extraOptions: { keyFile: "jstests/libs/key1" }
+ }
+ });
// Set up the test data.
(function() {
@@ -76,3 +83,5 @@ var cluster = new ShardingTest("authwhere", 1, 0, 1,
adminDB.logout();
}
}());
+
+})();
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index 70249c85c8d..433352a288b 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -1,6 +1,10 @@
-// auto1.js
+(function() {
-s = new ShardingTest( "auto1" , 2 , 1 , 1, { enableBalancer : 1 } );
+var s = new ShardingTest({ name: "auto1",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { enableBalancer : 1 } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -72,3 +76,5 @@ print( counts )
printjson( db.stats() )
s.stop();
+
+})();
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index b0ce66292d5..0fc17a55124 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -1,6 +1,9 @@
-// auto2.js
+(function() {
-s = new ShardingTest( "auto2" , 2 , 1 , 2 );
+var s = new ShardingTest({ name: "auto2",
+ shards: 2,
+ mongos: 2,
+ verbose: 1 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -83,8 +86,6 @@ for ( i=0; i<j*100; i++ ){
}
}
-
-
s.printChangeLog();
print( "missing: " + tojson( missing ) )
@@ -147,3 +148,5 @@ assert.throws( function(){ s.getDB( "test" ).foo.find().sort( { s : 1 } ).forEac
print( "checkpoint G")
s.stop();
+
+})();
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
new file mode 100644
index 00000000000..6618dd329a5
--- /dev/null
+++ b/jstests/sharding/autosplit_heuristics.js
@@ -0,0 +1,85 @@
+//
+// Tests autosplitting heuristics, and that the heuristic counting of chunk sizes
+// works as expected even after splitting.
+//
+
+var st = new ShardingTest({ shards : 1,
+ mongos : 1,
+ other : { mongosOptions : { chunkSize : 1, verbose : 2 }}});
+
+// The balancer may interfere unpredictably with the chunk moves/splits depending on timing.
+st.stopBalancer();
+
+// Test is not valid for debug build, heuristics get all mangled by debug reload behavior
+var isDebugBuild = st.s0.getDB( "admin" ).serverBuildInfo().debug;
+
+if ( !isDebugBuild ) {
+
+var mongos = st.s0;
+var config = mongos.getDB("config");
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.hashBar");
+
+printjson(admin.runCommand({ enableSharding : coll.getDB() + "" }));
+printjson(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
+
+var numChunks = 10;
+
+// Split off the low and high chunks, to get non-special-case behavior
+printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
+printjson( admin.runCommand({ split : coll + "", middle : { _id : numChunks + 1 } }) );
+
+// Split all the other chunks, and an extra chunk
+// We need the extra chunk to compensate for the fact that the chunk differ resets the highest
+// chunk's (i.e. the last-split-chunk's) data count on reload.
+for (var i = 1; i < numChunks + 1; i++) {
+ printjson( admin.runCommand({ split : coll + "", middle : { _id : i } }) );
+}
+
+jsTest.log("Setup collection...");
+st.printShardingStatus(true);
+
+var approxSize = Object.bsonsize({ _id : 0.0 });
+
+jsTest.log("Starting inserts of approx size: " + approxSize + "...");
+
+var chunkSizeBytes = 1024 * 1024;
+
+// We insert slightly more than the max number of docs per chunk, to test
+// if resetting the chunk size happens during reloads. If the size is
+// reset, we'd expect to split less, since the first split would then
+// disable further splits (statistically, since the decision is randomized).
+// We choose 1.4 since split attempts happen about once every 1/5 chunksize,
+// and we want to be sure we def get a split attempt at a full chunk.
+var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
+var totalInserts = insertsForSplit * numChunks;
+
+printjson({ chunkSizeBytes : chunkSizeBytes,
+ insertsForSplit : insertsForSplit,
+ totalInserts : totalInserts });
+
+// Insert enough docs to trigger splits into all chunks
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < totalInserts; i++) {
+ bulk.insert({ _id : i % numChunks + (i / totalInserts) });
+}
+assert.writeOK(bulk.execute());
+
+jsTest.log("Inserts completed...");
+
+st.printShardingStatus(true);
+printjson(coll.stats());
+
+// Check that all chunks (except the two extreme chunks)
+// have been split at least once + 1 extra chunk as reload buffer
+assert.gte(config.chunks.count(), numChunks * 2 + 3);
+
+jsTest.log("DONE!");
+
+}
+else {
+ jsTest.log( "Disabled test in debug builds." );
+}
+
+st.stop();
+
diff --git a/jstests/sharding/balance_tags1.js b/jstests/sharding/balance_tags1.js
index b7bff5ad213..052260b47e9 100644
--- a/jstests/sharding/balance_tags1.js
+++ b/jstests/sharding/balance_tags1.js
@@ -1,11 +1,15 @@
// Test balancing all chunks off of one shard
-var s = new ShardingTest("balance_tags1", 3, 1, 1, { sync:true, chunksize : 1, nopreallocj : true });
-s.config.settings.update({ _id: "balancer" }, { $set: { stopped: false }}, true);
+var st = new ShardingTest({ name: "balance_tags1",
+ shards: 3,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1,
+ enableBalancer : true } });
-s.adminCommand({ enablesharding: "test" });
-s.ensurePrimaryShard('test', 'shard0001');
+st.adminCommand({ enablesharding: "test" });
+st.ensurePrimaryShard('test', 'shard0001');
-var db = s.getDB("test");
+var db = st.getDB("test");
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < 21; i++) {
@@ -13,21 +17,21 @@ for (i = 0; i < 21; i++) {
}
assert.writeOK(bulk.execute());
-sh.shardCollection("test.foo", { _id : 1 });
+assert.commandWorked(st.s.adminCommand({ shardCollection: 'test.foo', key: { _id : 1 } }));
-sh.stopBalancer();
+st.stopBalancer();
for (i = 0; i < 20; i++) {
- s.adminCommand({ split : "test.foo", middle : { _id : i } });
+ st.adminCommand({ split : "test.foo", middle : { _id : i } });
}
-sh.startBalancer();
+st.startBalancer();
-sh.status(true);
+st.printShardingStatus();
// Wait for the initial balance to happen
assert.soon(function() {
- var counts = s.chunkCounts("foo");
+ var counts = st.chunkCounts("foo");
printjson(counts);
return counts["shard0000"] == 7 &&
counts["shard0001"] == 7 &&
@@ -39,28 +43,28 @@ assert.soon(function() {
// Quick test of some shell helpers and setting up state
sh.addShardTag("shard0000", "a");
-assert.eq([ "a" ] , s.config.shards.findOne({ _id : "shard0000" }).tags);
+assert.eq([ "a" ] , st.config.shards.findOne({ _id : "shard0000" }).tags);
sh.addShardTag("shard0000", "b");
-assert.eq([ "a" , "b" ], s.config.shards.findOne({ _id : "shard0000" }).tags);
+assert.eq([ "a" , "b" ], st.config.shards.findOne({ _id : "shard0000" }).tags);
sh.removeShardTag("shard0000", "b");
-assert.eq([ "a" ], s.config.shards.findOne( { _id : "shard0000" } ).tags);
+assert.eq([ "a" ], st.config.shards.findOne( { _id : "shard0000" } ).tags);
sh.addShardTag("shard0001" , "a");
sh.addTagRange("test.foo" , { _id : -1 } , { _id : 1000 } , "a");
-sh.status( true );
+st.printShardingStatus();
// At this point, everything should drain off shard 2, which does not have the tag
assert.soon(function() {
- var counts = s.chunkCounts("foo");
+ var counts = st.chunkCounts("foo");
printjson(counts);
return counts["shard0002"] == 0;
},
"balance 2 didn't happen",
1000 * 60 * 10 , 1000);
-printjson(sh.status());
+st.printShardingStatus();
-s.stop();
+st.stop();
diff --git a/jstests/sharding/balance_tags2.js b/jstests/sharding/balance_tags2.js
index d6b817c3820..6b584a907d7 100644
--- a/jstests/sharding/balance_tags2.js
+++ b/jstests/sharding/balance_tags2.js
@@ -1,6 +1,10 @@
// Test balancing all chunks to one shard by tagging the full shard-key range on that collection
-var s = new ShardingTest("balance_tags2", 3, 1, 1, { sync:true, chunksize : 1, nopreallocj : true });
-s.config.settings.update({ _id: "balancer" }, { $set: { stopped: false }}, true);
+var s = new ShardingTest({ name: "balance_tags2",
+ shards: 3,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1,
+ enableBalancer : true } });
s.adminCommand({ enablesharding: "test" });
s.ensurePrimaryShard('test', 'shard0001');
diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js
index c5f22f0b170..f6fc6bfc063 100644
--- a/jstests/sharding/bouncing_count.js
+++ b/jstests/sharding/bouncing_count.js
@@ -1,6 +1,9 @@
// Tests whether new sharding is detected on insert by mongos
+(function() {
-var st = new ShardingTest( name = "test", shards = 10, verbose = 0, mongos = 3 )
+var st = new ShardingTest({ name: "test",
+ shards: 10,
+ mongos: 3 });
var mongosA = st.s0
var mongosB = st.s1
@@ -47,4 +50,6 @@ jsTestLog( "Running count!" )
printjson( collB.count() )
printjson( collC.find().toArray() )
-st.stop()
+st.stop();
+
+})();
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index 19d70456d1f..486bf40080b 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -1,13 +1,7 @@
-// count1.js
+(function() {
-s = new ShardingTest( "count1" , 2 , 1 );
-db = s.getDB( "test" );
-
-// Stop balancer since doing manual stuff
-// Make sure we totally stop here, otherwise balancing round can intermittently slip by
-// Counts during balancing are only approximate (as of 7/28/12).
-// If we fix that, we should write a test for it elsewhere
-s.stopBalancer();
+var s = new ShardingTest({ name: "count1", shards: 2 });
+var db = s.getDB( "test" );
// ************** Test Set #1 *************
// Basic counts on "bar" collections, not yet sharded
@@ -177,3 +171,5 @@ assert( ! negSkipLimitResult.ok , "negative skip value with limit shouldn't work
assert( negSkipLimitResult.errmsg.length > 0 , "no error msg for negative skip" );
s.stop();
+
+})();
diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js
index 7c84c415646..7361359791d 100644
--- a/jstests/sharding/count2.js
+++ b/jstests/sharding/count2.js
@@ -1,15 +1,16 @@
-// count2.js
+(function() {
-s1 = new ShardingTest( "count2" , 2 , 1 , 2 );
-s2 = s1._mongos[1];
-s1.stopBalancer();
+var s1 = new ShardingTest({ name: "count2",
+ shards: 2,
+ mongos: 2 });
+var s2 = s1._mongos[1];
s1.adminCommand( { enablesharding: "test" } );
s1.ensurePrimaryShard('test', 'shard0001');
s1.adminCommand( { shardcollection: "test.foo" , key : { name : 1 } } );
-db1 = s1.getDB( "test" ).foo;
-db2 = s2.getDB( "test" ).foo;
+var db1 = s1.getDB( "test" ).foo;
+var db2 = s2.getDB( "test" ).foo;
assert.eq( 1, s1.config.chunks.count(), "sanity check A");
@@ -48,3 +49,5 @@ assert.eq( 6, db2.find().limit( 0 ).count( true ));
assert.eq( 6, db2.getDB().runCommand({ count: db2.getName(), limit: 0 }).n );
s1.stop();
+
+})();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index e7583ba9b99..7c83b79d742 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -1,10 +1,8 @@
// cursor1.js
// checks that cursors survive a chunk's move
+(function() {
-s = new ShardingTest( "sharding_cursor1" , 2 , 2 )
-
-// take the balancer out of the equation
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+var s = new ShardingTest({ name: "sharding_cursor1", shards: 2 });
s.config.settings.find().forEach( printjson )
// create a sharded 'test.foo', for the moment with just one chunk
@@ -63,4 +61,6 @@ assert.throws( function(){ cur.next(); } , null , "T5" )
after = db.serverStatus().metrics.cursor;
gc(); gc()
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index f2c16664398..b485d508b43 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -1,4 +1,6 @@
-var s = new ShardingTest( "diffservers1" , 2 );
+(function() {
+
+var s = new ShardingTest({ name: "diffservers1", shards: 2 });
assert.eq( 2 , s.config.shards.count() , "server count wrong" );
assert.eq( 0 , s._shardServers[0].getDB( "config" ).shards.count() , "shouldn't be here" );
@@ -11,6 +13,7 @@ test1.save( { a : 3 } );
assert( 3 , test1.count() );
assert( ! s.admin.runCommand( { addshard: "sdd$%" } ).ok , "bad hostname" );
+
var portWithoutHostRunning = allocatePort();
assert(!s.admin.runCommand({addshard: "127.0.0.1:" + portWithoutHostRunning}).ok, "host not up");
assert(!s.admin.runCommand({ addshard: "10.0.0.1:" + portWithoutHostRunning}).ok,
@@ -18,3 +21,4 @@ assert(!s.admin.runCommand({ addshard: "10.0.0.1:" + portWithoutHostRunning}).ok
s.stop();
+})();
diff --git a/jstests/sharding/addshard3.js b/jstests/sharding/disallow_mongos_add_as_shard.js
index f8d43587fc0..524715bde25 100644
--- a/jstests/sharding/addshard3.js
+++ b/jstests/sharding/disallow_mongos_add_as_shard.js
@@ -1,6 +1,6 @@
(function() {
-var st = new ShardingTest("add_shard3", 1);
+var st = new ShardingTest({ name: "add_shard3", shards: 1 });
var result = st.admin.runCommand({addshard: st.s.host});
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 22fed89fef8..e5f88b907be 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -1,8 +1,6 @@
-// features1.js
+(function() {
-s = new ShardingTest( "features1" , 2 , 1 , 1 );
-
-s.stopBalancer();
+var s = new ShardingTest({ name: "features1", shards: 2, mongos: 1 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -197,5 +195,6 @@ r = db.getMongo().getDBs()
assert.eq( 3 , r.databases.length , "listDatabases 1 : " + tojson( r ) )
assert.eq( "number", typeof(r.totalSize) , "listDatabases 2 : " + tojson( r ) );
-s.stop()
+s.stop();
+})();
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index a3e0ae777f3..80a06ae6ba9 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -1,10 +1,6 @@
-// features2.js
+(function() {
-s = new ShardingTest( "features2" , 2 , 1 , 1 );
-
-// The counts and the tests for "on-num-shards" only works for previous assumptions in balancer
-// behavior and assumes migrations do not occur during count() commands.
-s.stopBalancer()
+var s = new ShardingTest({ name: "features2", shards: 2, mongos: 1 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -202,3 +198,5 @@ delete im2.localTime;
assert.eq( isMaster, im2 );
s.stop();
+
+})();
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index bc15a40f48d..a144eceed72 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -1,4 +1,6 @@
-s = new ShardingTest( "find_and_modify_sharded" , 2 , 2);
+(function() {
+
+var s = new ShardingTest({ name: "find_and_modify_sharded", shards: 2 });
s.adminCommand( { enablesharding : "test" } );
db = s.getDB( "test" );
@@ -58,3 +60,5 @@ for (var i=0; i < numObjs; i++){
}
s.stop();
+
+})();
diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js
index 189838d76d3..2ce2988c470 100644
--- a/jstests/sharding/findandmodify2.js
+++ b/jstests/sharding/findandmodify2.js
@@ -1,4 +1,4 @@
-var s = new ShardingTest({ name: "find_and_modify_sharded_2", shards: 2, verbose: 2, mongos: 1, other: { chunksize: 1 }});
+var s = new ShardingTest({ name: "find_and_modify_sharded_2", shards: 2, verbose: 2, mongos: 1, other: { chunkSize: 1 }});
s.adminCommand( { enablesharding : "test" } );
var db = s.getDB( "test" );
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index 77ed3ba7444..57092b3a566 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -1,10 +1,7 @@
-/**
- * @tags : [ hashed ]
- */
+// SERVER-2326 - make sure that sharding only works with unique indices
+(function() {
-// from server 2326 - make sure that sharding only works with unique indices
-
-s = new ShardingTest( "shard_index", 2, 0, 1 )
+var s = new ShardingTest({ name: "shard_index", shards: 2, mongos: 1 });
// Regenerate fully because of SERVER-2782
for ( var i = 0; i < 22; i++ ) {
@@ -390,3 +387,5 @@ for ( var i = 0; i < 22; i++ ) {
}
s.stop();
+
+})();
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index b8882f632ea..b55c41bbeb9 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -1,6 +1,9 @@
-// jump1.js
+(function() {
-s = new ShardingTest( "jump1" , 2 /* numShards */, 2 /* verboseLevel */, 1 /* numMongos */, { chunksize : 1 } )
+var s = new ShardingTest({ name: "jumbo1",
+ shards: 2,
+ mongos: 1,
+ other: { chunkSize: 1 } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -51,4 +54,6 @@ assert.soon( function(){
} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index a458d5a9284..1b512d646a4 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -1,8 +1,7 @@
-// key_many.js
+(function() {
-// values have to be sorted
-// you must have exactly 6 values in each array
-types = [
+// Values have to be sorted - you must have exactly 6 values in each array
+var types = [
{ name : "string" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield: "k" } ,
{ name : "double" , values : [ 1.2 , 3.5 , 4.5 , 4.6 , 6.7 , 9.9 ] , keyfield : "a" } ,
{ name : "date" , values : [ new Date( 1000000 ) , new Date( 2000000 ) , new Date( 3000000 ) , new Date( 4000000 ) , new Date( 5000000 ) , new Date( 6000000 ) ] , keyfield : "a" } ,
@@ -15,8 +14,7 @@ types = [
{ name : "oid_other" , values : [ ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() ] , keyfield : "o" } ,
]
-s = new ShardingTest( "key_many" , 2 );
-s.setBalancer( false )
+var s = new ShardingTest({ name: "key_many", shards: 2 });
s.adminCommand( { enablesharding : "test" } )
db = s.getDB( "test" );
@@ -73,8 +71,6 @@ function getKey( o ){
return o;
}
-
-
for ( var i=0; i<types.length; i++ ){
curT = types[i]; //global
@@ -156,7 +152,6 @@ for ( var i=0; i<types.length; i++ ){
// TODO remove
}
-
s.stop();
-
+})();
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index 7434b75e52f..4c365fdf0d8 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -1,9 +1,7 @@
-// key_string.js
+(function() {
-s = new ShardingTest( "keystring" , 2 );
-s.stopBalancer();
+var s = new ShardingTest({ name: "keystring", shards: 2 });
-db = s.getDB( "test" );
s.adminCommand( { enablesharding : "test" } )
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
@@ -13,6 +11,8 @@ seconday = s.getOther( primary ).getDB( "test" );
assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
+var db = s.getDB( "test" );
+
db.foo.save( { name : "eliot" } )
db.foo.save( { name : "sara" } )
db.foo.save( { name : "bob" } )
@@ -48,4 +48,4 @@ assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { nam
s.stop();
-
+})();
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
new file mode 100644
index 00000000000..7506f03e14a
--- /dev/null
+++ b/jstests/sharding/large_chunk.js
@@ -0,0 +1,66 @@
+// Where we test operations dealing with large chunks
+(function() {
+
+// Starts a new sharding environment limiting the chunksize to 1GB (highest value allowed).
+// Note that early splitting will start with a 1/4 of max size currently.
+var s = new ShardingTest({ name: 'large_chunk',
+ shards: 2,
+ verbose: 2,
+ other: { chunkSize: 1024 } });
+
+// take the balancer out of the equation
+s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+s.config.settings.find().forEach(printjson);
+
+db = s.getDB( "test" );
+
+//
+// Step 1 - Test moving a large chunk
+//
+
+// Turn on sharding on the 'test.foo' collection and generate a large chunk
+s.adminCommand( { enablesharding : "test" } );
+s.ensurePrimaryShard('test', 'shard0001');
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while ( inserted < ( 400 * 1024 * 1024 ) ){
+ bulk.insert({ _id: num++, s: bigString });
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+assert.eq( 1 , s.config.chunks.count() , "step 1 - need one large chunk" );
+
+primary = s.getServer( "test" ).getDB( "test" );
+secondary = s.getOther( primary ).getDB( "test" );
+
+// Make sure that we don't move that chunk if it goes past what we consider the maximum chunk size
+print("Checkpoint 1a")
+max = 200 * 1024 * 1024;
+assert.throws(function() {
+ s.adminCommand({ movechunk: "test.foo",
+ find: { _id: 1 },
+ to: secondary.getMongo().name,
+ maxChunkSizeBytes: max });
+ });
+
+// Move the chunk
+print("checkpoint 1b");
+before = s.config.chunks.find().toArray();
+s.adminCommand( { movechunk : "test.foo" , find : { _id : 1 } , to : secondary.getMongo().name } );
+after = s.config.chunks.find().toArray();
+assert.neq( before[0].shard , after[0].shard , "move chunk did not work" );
+
+s.config.changelog.find().forEach( printjson )
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index 5aa9bd5bee0..53acb6583eb 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -1,12 +1,9 @@
// This test is to ensure that limit() clauses are pushed down to the shards and evaluated
// See: http://jira.mongodb.org/browse/SERVER-1896
+(function() {
-s = new ShardingTest( "limit_push", 2, 1, 1 );
-
-// Stop balancer since we do manual moves.
-s.stopBalancer();
-
-db = s.getDB( "test" );
+var s = new ShardingTest({ name: "limit_push", shards: 2, mongos: 1 });
+var db = s.getDB( "test" );
// Create some data
for (i=0; i < 100; i++) { db.limit_push.insert({ _id : i, x: i}); }
@@ -50,3 +47,5 @@ for (var j in execStages.shards) {
}
s.stop();
+
+})();
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
index ae35861fb5a..4fffeed9e1e 100644
--- a/jstests/sharding/mapReduce_inSharded.js
+++ b/jstests/sharding/mapReduce_inSharded.js
@@ -6,7 +6,7 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
}
-var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunksize : 1 } });
+var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunkSize: 1 } });
st.startBalancer();
st.adminCommand( { enablesharding : "mrShard" } )
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index 69174f2589d..f93acae31a3 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -6,7 +6,7 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
}
-var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunksize : 1 } });
+var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunkSize: 1 } });
st.startBalancer();
st.adminCommand( { enablesharding : "mrShard" } )
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
index cd1437b83a5..acf9e20319a 100644
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ b/jstests/sharding/mapReduce_nonSharded.js
@@ -6,7 +6,7 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
}
-var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunksize : 1 } });
+var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunkSize: 1 } });
st.startBalancer();
st.adminCommand( { enablesharding : "mrShard" } )
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index a42eb166e65..331e8a52a4d 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -6,7 +6,7 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
}
-var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunksize : 1 } });
+var st = new ShardingTest({ shards : 2, verbose : 1, mongos : 1, other : { chunkSize: 1 } });
st.startBalancer();
st.adminCommand( { enablesharding : "mrShard" } )
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 424eddb9042..bd3f4e9de0b 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -1,6 +1,11 @@
+(function() {
-s = new ShardingTest( "migrateBig" , 2 , 0 , 1 , { chunksize : 1 } );
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped : true, _waitForDelete : true } } , true );
+var s = new ShardingTest({ name: "migrateBig",
+ shards: 2,
+ mongos: 1,
+ other: { chunkSize: 1 } });
+
+s.config.settings.update( { _id: "balancer" }, { $set : { _waitForDelete : true } } , true);
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
@@ -62,4 +67,6 @@ assert.soon( function(){ return !s.isAnyBalanceInFlight(); } );
assert.eq( coll.count() , coll.find().itcount() );
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
index 7e764efc507..50cc1ccacca 100644
--- a/jstests/sharding/movePrimary1.js
+++ b/jstests/sharding/movePrimary1.js
@@ -1,6 +1,6 @@
+(function() {
-
-s = new ShardingTest( "movePrimary1" , 2 );
+var s = new ShardingTest({ name: "movePrimary1", shards: 2 });
initDB = function( name ){
var db = s.getDB( name );
@@ -49,3 +49,4 @@ assert.eq(res.code, 70, 'ShardNotFound code not used');
s.stop();
+})();
diff --git a/jstests/sharding/movechunk_with_def_paranoia.js b/jstests/sharding/movechunk_with_def_paranoia.js
index c2cd70df970..f689eb072d5 100644
--- a/jstests/sharding/movechunk_with_def_paranoia.js
+++ b/jstests/sharding/movechunk_with_def_paranoia.js
@@ -3,7 +3,7 @@
/**
* This test checks that the moveChunk directory is not created
*/
-var st = new ShardingTest( { shards:2, mongos:1 , other : { chunksize : 1 }});
+var st = new ShardingTest( { shards:2, mongos:1 , other : { chunkSize: 1 }});
load("jstests/sharding/movechunk_include.js")
setupMoveChunkTest(st);
diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js
index 4091792d27f..a87969a8737 100644
--- a/jstests/sharding/movechunk_with_moveParanoia.js
+++ b/jstests/sharding/movechunk_with_moveParanoia.js
@@ -6,7 +6,7 @@
var st = new ShardingTest( { shards: 2,
mongos:1,
other : {
- chunksize : 1,
+ chunkSize: 1,
shardOptions: { moveParanoia:"" }}});
load("jstests/sharding/movechunk_include.js")
diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js
index 1844528b225..b08f4a61ff5 100644
--- a/jstests/sharding/movechunk_with_noMoveParanoia.js
+++ b/jstests/sharding/movechunk_with_noMoveParanoia.js
@@ -6,7 +6,7 @@
var st = new ShardingTest( { shards: 2,
mongos:1,
other : {
- chunksize : 1,
+ chunkSize: 1,
shardOptions: { noMoveParanoia:"" }}});
load("jstests/sharding/movechunk_include.js")
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index 79dce25ce18..b30c45b8aa2 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -4,7 +4,7 @@
// collection input twice the size of the first and outputs it to the new sharded
// collection created in the first pass.
-var st = new ShardingTest({ shards: 2, verbose: 1, other: { chunksize : 1 }});
+var st = new ShardingTest({ shards: 2, verbose: 1, other: { chunkSize: 1 }});
st.stopBalancer();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index bcb66fc79e3..bad6afd3a54 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -1,9 +1,7 @@
-// multi_mongos2.js
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
+(function() {
-
-// setup sharding with two mongos, s1 and s2
-s1 = new ShardingTest( "multi_mongos1" , 2 , 1 , 2 );
+var s1 = new ShardingTest({ name: "multi_mongos1", shards: 2, mongos: 2 });
s2 = s1._mongos[1];
s1.adminCommand( { enablesharding : "test" } );
@@ -67,3 +65,5 @@ assert.eq(1 , res.ok, tojson(res));
s1.setBalancer( true )
s1.stop();
+
+})();
diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js
index 75583f9cd91..712e3cc9dd1 100644
--- a/jstests/sharding/multi_mongos2a.js
+++ b/jstests/sharding/multi_mongos2a.js
@@ -1,9 +1,9 @@
-// multi_mongos2.js
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
+(function() {
-
-// setup sharding with two mongos, s1 and s2
-s1 = new ShardingTest( "multi_mongos1" , 2 , 1 , 2 );
+var s1 = new ShardingTest({ name: "multi_mongos2a",
+ shards: 2,
+ mongos: 2 });
s2 = s1._mongos[1];
s1.adminCommand( { enablesharding : "test" } );
@@ -30,3 +30,5 @@ assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
assert.eq(1, s2.getDB('test').existing.count({_id:1}));
s1.stop();
+
+})();
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index 894ea473a79..5a4a69cc5b3 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -1,7 +1,9 @@
-// presplit.js
+(function() {
-// Starts a new sharding environment limiting the chunksize to 1MB.
-s = new ShardingTest( "presplit" , 2 , 2 , 1 , { chunksize : 1 } );
+var s = new ShardingTest({ name: "presplit",
+ shards: 2,
+ mongos: 1,
+ other: { chunkSize : 1 } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -37,3 +39,5 @@ assert.eq( num , primary.foo.count() );
s.printChangeLog();
s.stop();
+
+})();
diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js
index 19d911d9fb4..0143e49dc1b 100644
--- a/jstests/sharding/remove1.js
+++ b/jstests/sharding/remove1.js
@@ -1,4 +1,6 @@
-s = new ShardingTest( "remove_shard1", 2 );
+(function() {
+
+var s = new ShardingTest({ name: "remove_shard1", shards: 2 });
assert.eq( 2, s.config.shards.count() , "initial server count wrong" );
@@ -23,3 +25,5 @@ assert.eq( 2, s.config.shards.count(), "new server does not appear in count" );
MongoRunner.stopMongod(conn);
s.stop();
+
+})();
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index f2dca5a7b7a..6283a0868bf 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -1,4 +1,10 @@
-s = new ShardingTest( "shard_existing" , 2 /* numShards */, 1 /* verboseLevel */, 1 /* numMongos */, { chunksize : 1 } )
+(function() {
+
+var s = new ShardingTest({ name: "shard_existing",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1 } });
db = s.getDB( "test" )
@@ -33,3 +39,5 @@ var guess = Math.ceil(dataSize / (512*1024 + avgObjSize));
assert( Math.abs( numChunks - guess ) < 2, "not right number of chunks" );
s.stop();
+
+})();
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 408774785c8..5702b59dc84 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -1,9 +1,10 @@
// Tests splitting a chunk twice
+(function() {
-s = new ShardingTest( "shard_keycount" , 2, 0, 1, /* chunkSize */1);
-
-// Kill balancer
-s.config.settings.update({ _id: "balancer" }, { $set : { stopped: true } }, true )
+var s = new ShardingTest({ name: "shard_keycount",
+ shards: 2,
+ mongos: 1,
+ other:{ chunkSize: 1 } });
dbName = "test"
collName = "foo"
@@ -45,3 +46,5 @@ coll.update({ _id : 3 }, { _id : 3 })
s.adminCommand({ split : ns, find : { _id : 3 } })
s.stop();
+
+});
diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js
index 6a2634f40a0..98840c0c3ac 100644
--- a/jstests/sharding/shard_targeting.js
+++ b/jstests/sharding/shard_targeting.js
@@ -3,11 +3,11 @@
// If the optional query is not given, mongos will wrongly use the command
// BSONObj itself as the query to target shards, which could return wrong
// shards if the shard key happens to be one of the fields in the command object.
+(function() {
-var s = new ShardingTest("shard_targeting", 2, 0, 1);
+var s = new ShardingTest({ name: "shard_targeting", shards: 2 });
s.adminCommand({ enablesharding : "test" });
s.ensurePrimaryShard('test', 'shard0001');
-s.stopBalancer();
var db = s.getDB("test");
var res;
@@ -25,7 +25,7 @@ for (var i=0; i<50; i++) {
}
var theOtherShard = s.getOther( s.getServer( "test" ) ).name;
-printShardingStatus();
+s.printShardingStatus();
// Count documents on both shards
@@ -47,7 +47,7 @@ for (var i=0; i<50; i++) {
db.foo.insert({mapReduce: "" + i}); // to the chunk including string
}
-printShardingStatus();
+s.printShardingStatus();
function m() { emit("total", 1); }
function r(k, v) { return Array.sum(v); }
@@ -63,3 +63,5 @@ res = db.foo.runCommand(
assert.eq(res.results[0].value, 100);
s.stop();
+
+})();
diff --git a/jstests/sharding/shard_with_special_db_names.js b/jstests/sharding/shard_with_special_db_names.js
index 2887f364743..cb1ae66a04c 100644
--- a/jstests/sharding/shard_with_special_db_names.js
+++ b/jstests/sharding/shard_with_special_db_names.js
@@ -1,6 +1,8 @@
(function(){
-var s = new ShardingTest( "shard_with_special_db_names", 2, 0, 2 );
+var s = new ShardingTest({ name: "shard_with_special_db_names",
+ shards: 2,
+ mongos: 2 });
var specialDB = "[a-z]+";
var specialNS = specialDB + ".special";
@@ -26,4 +28,3 @@ assert.eq( cursor.count(), 1 );
assert( cursor.next()["dropped"] );
})();
-
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
new file mode 100644
index 00000000000..e577511e571
--- /dev/null
+++ b/jstests/sharding/sharding_balance1.js
@@ -0,0 +1,63 @@
+(function() {
+
+var s = new ShardingTest({ name: "slow_sharding_balance1",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1, enableBalancer : true } });
+
+s.adminCommand( { enablesharding : "test" } );
+s.ensurePrimaryShard('test', 'shard0001');
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while ( inserted < ( 20 * 1024 * 1024 ) ){
+ bulk.insert({ _id: num++, s: bigString });
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function diff1(){
+ var x = s.chunkCounts( "foo" );
+ printjson( x )
+ return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
+}
+
+function sum(){
+ var x = s.chunkCounts( "foo" );
+ return x.shard0000 + x.shard0001;
+}
+
+assert.lt( 20 , diff1() , "big differential here" );
+print( diff1() )
+
+assert.soon( function(){
+ var d = diff1();
+ return d < 5;
+// Make sure there's enough time here, since balancing can sleep for 15s or so between balances.
+} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
+
+var chunkCount = sum();
+s.adminCommand( { removeshard: "shard0000" } );
+
+assert.soon( function(){
+ printjson(s.chunkCounts( "foo" ));
+ s.config.shards.find().forEach(function(z){printjson(z);});
+ return chunkCount == s.config.chunks.count({shard: "shard0001"});
+} , "removeshard didn't happen" , 1000 * 60 * 3 , 5000 );
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
new file mode 100644
index 00000000000..74cf0be1fa9
--- /dev/null
+++ b/jstests/sharding/sharding_balance2.js
@@ -0,0 +1,82 @@
+/**
+ * Test the maxSize setting for the addShard command.
+ */
+
+(function() {
+"use strict";
+
+var MaxSizeMB = 1;
+
+var s = new ShardingTest({ shards: 2, other: { chunkSize: 1, manualAddShard: true }});
+var db = s.getDB( "test" );
+s.stopBalancer();
+
+var names = s.getConnNames();
+assert.eq(2, names.length);
+s.adminCommand({ addshard: names[0] });
+s.adminCommand({ addshard: names[1], maxSize: MaxSizeMB });
+
+s.adminCommand({ enablesharding: "test" });
+var res = db.adminCommand({ movePrimary: 'test', to: names[0] });
+assert(res.ok || res.errmsg == "it is already the primary");
+
+
+var bigString = "";
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+var inserted = 0;
+var num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while ( inserted < ( 40 * 1024 * 1024 ) ){
+ bulk.insert({ _id: num++, s: bigString });
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.gt(s.config.chunks.count(), 10);
+
+var getShardSize = function(conn) {
+ var listDatabases = conn.getDB('admin').runCommand({ listDatabases: 1 });
+ return listDatabases.totalSize;
+};
+
+var shardConn = new Mongo(names[1]);
+
+// Make sure that shard doesn't have any documents.
+assert.eq(0, shardConn.getDB('test').foo.find().itcount());
+
+var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+
+// Fill the shard with documents to exceed the max size so the balancer won't move
+// chunks to this shard.
+var localColl = shardConn.getDB('local').padding;
+while (getShardSize(shardConn) < maxSizeBytes) {
+ var localBulk = localColl.initializeUnorderedBulkOp();
+
+ for (var x = 0; x < 20; x++) {
+ localBulk.insert({ x: x, val: bigString });
+ }
+ assert.writeOK(localBulk.execute());
+
+ // Force the storage engine to flush files to disk so shardSize will get updated.
+ assert.commandWorked(shardConn.getDB('admin').runCommand({ fsync: 1 }));
+}
+
+var configDB = s.s.getDB('config');
+var balanceRoundsBefore = configDB.actionlog.find({ what: 'balancer.round' }).count();
+
+s.startBalancer();
+
+// Wait until a balancer finishes at least one round.
+assert.soon(function() {
+ var currentBalanceRound = configDB.actionlog.find({ what: 'balancer.round' }).count();
+ return balanceRoundsBefore < currentBalanceRound;
+});
+
+var chunkCounts = s.chunkCounts('foo', 'test');
+assert.eq(0, chunkCounts.shard0001);
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
new file mode 100644
index 00000000000..f42dec42a57
--- /dev/null
+++ b/jstests/sharding/sharding_balance3.js
@@ -0,0 +1,65 @@
+// Simple test to make sure things get balanced
+
+(function() {
+
+var s = new ShardingTest({ name: "slow_sharding_balance3",
+ shards: 2,
+ mongos: 1,
+ verbose: 2,
+ other: { chunkSize: 1, enableBalancer : true } });
+
+s.adminCommand( { enablesharding : "test" } );
+s.ensurePrimaryShard('test', 'shard0001');
+
+s.config.settings.find().forEach( printjson );
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while ( inserted < ( 40 * 1024 * 1024 ) ){
+ bulk.insert({ _id: num++, s: bigString });
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function diff1(){
+ var x = s.chunkCounts( "foo" );
+ printjson( x )
+ return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
+}
+
+assert.lt( 10 , diff1() );
+
+// Wait for balancer to kick in.
+var initialDiff = diff1();
+assert.soon(function() {
+ return diff1() != initialDiff;
+ }, "Balancer did not kick in");
+
+print("* A");
+print( "disabling the balancer" );
+s.config.settings.update( { _id : "balancer" }, { $set : { stopped : true } } , true );
+s.config.settings.find().forEach( printjson );
+print("* B");
+
+
+print( diff1() )
+
+var currDiff = diff1();
+assert.repeat( function(){
+ var d = diff1();
+ return d != currDiff;
+} , "balance with stopped flag should not have happened" , 1000 * 60 , 5000 );
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
new file mode 100644
index 00000000000..3560dac9a6b
--- /dev/null
+++ b/jstests/sharding/sharding_balance4.js
@@ -0,0 +1,140 @@
+// Check that doing updates done during a migrate all go to the right place
+(function() {
+
+var s = new ShardingTest({ name: "slow_sharding_balance4",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize: 1 } });
+
+s.adminCommand( { enablesharding : "test" } );
+s.ensurePrimaryShard('test', 'shard0001');
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.eq( 1 , s.config.chunks.count() , "setup1" );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+N = 3000
+
+num = 0;
+
+counts = {}
+
+//
+// TODO: Rewrite to make much clearer.
+//
+// The core behavior of this test is to add a bunch of documents to a sharded collection, then
+// incrementally update each document and make sure the counts in the document match our update
+// counts while balancing occurs (doUpdate()). Every once in a while we also check (check())
+// our counts via a query.
+//
+// If during a chunk migration an update is missed, we trigger an assertion and fail.
+//
+
+
+function doUpdate( bulk, includeString, optionalId ){
+ var up = { $inc : { x : 1 } }
+ if ( includeString )
+ up["$set"] = { s : bigString };
+ var myid = optionalId == undefined ? Random.randInt( N ) : optionalId
+ bulk.find({ _id : myid }).upsert().update( up );
+
+ counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1;
+ return myid;
+}
+
+// Initially update all documents from 1 to N, otherwise later checks can fail because no document
+// previously existed
+var bulk = db.foo.initializeUnorderedBulkOp();
+for ( i = 0; i < N; i++ ){
+ doUpdate( bulk, true, i );
+}
+
+for ( i=0; i<N*9; i++ ){
+ doUpdate( bulk, false );
+}
+assert.writeOK(bulk.execute());
+
+for ( var i=0; i<50; i++ ){
+ s.printChunks( "test.foo" )
+ if ( check( "initial:" + i , true ) )
+ break;
+ sleep( 5000 )
+}
+check( "initial at end" )
+
+
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function check( msg , dontAssert ){
+ for ( var x in counts ){
+ var e = counts[x];
+ var z = db.foo.findOne( { _id : parseInt( x ) } )
+
+ if ( z && z.x == e )
+ continue;
+
+ if ( dontAssert ){
+ if ( z )
+ delete z.s;
+ print( "not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z) )
+ return false;
+ }
+
+ s.s.getDB("admin").runCommand({ setParameter : 1, logLevel : 2 })
+
+ printjson( db.foo.findOne( { _id : parseInt( x ) } ) )
+
+ var y = db.foo.findOne( { _id : parseInt( x ) } )
+
+ if ( y ){
+ delete y.s;
+ }
+
+ s.printChunks( "test.foo" )
+
+ assert( z , "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg )
+ assert.eq( e , z.x , "count for : " + x + " y:" + tojson(y) + " " + msg )
+ }
+
+ return true;
+}
+
+function diff1(){
+
+ jsTest.log("Running diff1...")
+
+ bulk = db.foo.initializeUnorderedBulkOp();
+ var myid = doUpdate( bulk, false );
+ var res = assert.writeOK(bulk.execute());
+
+ assert.eq( 1, res.nModified,
+ "diff myid: " + myid + " 2: " + res.toString() + "\n" +
+ " correct count is: " + counts[myid] +
+ " db says count is: " + tojson(db.foo.findOne({ _id: myid })) );
+
+ var x = s.chunkCounts( "foo" )
+ if ( Math.random() > .999 )
+ printjson( x )
+ return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
+}
+
+assert.lt( 20 , diff1() ,"initial load" );
+print( diff1() )
+
+s.startBalancer();
+
+assert.soon( function(){
+ var d = diff1();
+ return d < 5;
+} , "balance didn't happen" , 1000 * 60 * 20 , 1 );
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
new file mode 100644
index 00000000000..fa8668ae2bb
--- /dev/null
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -0,0 +1,82 @@
+// SERVER-2068
+(function() {
+
+var chunkSize = 25
+
+var s = new ShardingTest({ name: "migrate_cursor1",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { chunkSize : chunkSize } });
+
+s.adminCommand( { enablesharding : "test" } );
+db = s.getDB( "test" )
+s.ensurePrimaryShard('test', 'shard0001');
+t = db.foo
+
+bigString = ""
+stringSize = 1024;
+
+while ( bigString.length < stringSize )
+ bigString += "asdasdas";
+
+stringSize = bigString.length
+docsPerChunk = Math.ceil( ( chunkSize * 1024 * 1024 ) / ( stringSize - 12 ) )
+numChunks = 5
+numDocs = 20 * docsPerChunk
+
+print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs )
+
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++){
+ bulk.insert({ _id: i, s: bigString });
+}
+assert.writeOK(bulk.execute());
+
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+assert.lt( numChunks , s.config.chunks.find().count() , "initial 1" );
+
+primary = s.getServer( "test" ).getDB( "test" ).foo;
+secondaryName = s.getOther( primary.name )
+secondary = secondaryName.getDB( "test" ).foo;
+
+assert.eq( numDocs , primary.count() , "initial 2" );
+assert.eq( 0 , secondary.count() , "initial 3" );
+assert.eq( numDocs , t.count() , "initial 4" )
+
+x = primary.find( { _id : { $lt : 500 } } ).batchSize(2)
+x.next(); // 1. Create an open cursor
+
+print("start moving chunks...")
+
+// 2. Move chunk from s0 to s1 without waiting for deletion.
+// Command returns, but the deletion on s0 will block due to the open cursor.
+s.adminCommand( { moveChunk : "test.foo" , find : { _id : 0 } , to : secondaryName.name } )
+
+// 3. Start second moveChunk command from s0 to s1.
+// This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause deletion on s1.
+// This moveChunk will wait for deletion.
+join = startParallelShell( "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " + docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )" )
+assert.soon( function(){ return db.x.count() > 0; } , "XXX" , 30000 , 1 )
+
+// 4. Close the cursor to enable chunk deletion.
+print( "itcount: " + x.itcount() )
+
+x = null;
+for ( i=0; i<5; i++ ) gc()
+
+print( "cursor should be gone" )
+
+// 5. Waiting for the second moveChunk to finish its deletion.
+// Note the deletion for the first moveChunk may not be finished.
+join();
+
+//assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
+// 6. Check the total number of docs on both shards to make sure no doc is lost.
+// Use itcount() to ignore orphan docments.
+assert.eq( numDocs , t.find().itcount() , "at end 2" )
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/sharding_multiple_ns_rs.js b/jstests/sharding/sharding_multiple_ns_rs.js
new file mode 100644
index 00000000000..1bb121c544f
--- /dev/null
+++ b/jstests/sharding/sharding_multiple_ns_rs.js
@@ -0,0 +1,58 @@
+(function() {
+
+var s = new ShardingTest({ name: "Sharding multiple ns",
+ shards: 1,
+ mongos: 1,
+ verbose: 1,
+ other: { rs : true , chunkSize: 1 } });
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+db = s.getDB( "test" );
+
+var bulk = db.foo.initializeUnorderedBulkOp();
+var bulk2 = db.bar.initializeUnorderedBulkOp();
+for ( i=0; i<100; i++ ) {
+ bulk.insert({ _id: i, x: i });
+ bulk2.insert({ _id: i, x: i });
+}
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
+
+sh.splitAt( "test.foo" , { _id : 50 } )
+
+other = new Mongo( s.s.name );
+dbother = other.getDB( "test" );
+
+assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
+assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+
+assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
+assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
+
+s._rs[0].test.awaitReplication();
+s._rs[0].test.stopMaster(15);
+
+// Wait for the primary to come back online...
+var primary = s._rs[0].test.getPrimary();
+
+// Wait for the mongos to recognize the new primary...
+ReplSetTest.awaitRSClientHosts( db.getMongo(), primary, { ismaster : true } );
+
+assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
+assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
+
+s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } );
+sh.splitAt( "test.bar" , { _id : 50 } )
+
+yetagain = new Mongo( s.s.name )
+assert.eq( 5 , yetagain.getDB( "test" ).bar.findOne( { _id : 5 } ).x )
+assert.eq( 5 , yetagain.getDB( "test" ).foo.findOne( { _id : 5 } ).x )
+
+assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
+assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
new file mode 100644
index 00000000000..c439eeb6b17
--- /dev/null
+++ b/jstests/sharding/sharding_rs2.js
@@ -0,0 +1,222 @@
+// mostly for testing mongos w/replica sets
+(function() {
+
+var s = new ShardingTest({ shards: { rs0: { nodes: 2 }, rs1: { nodes: 2 } },
+ verbose: 1,
+ chunkSize: 1 });
+
+db = s.getDB( "test" )
+t = db.foo
+
+s.adminCommand( { enablesharding : "test" } );
+s.ensurePrimaryShard('test', 'test-rs0');
+
+// -------------------------------------------------------------------------------------------
+// ---------- test that config server updates when replica set config changes ----------------
+// -------------------------------------------------------------------------------------------
+
+
+db.foo.save( { _id : 5 ,x : 17 } )
+assert.eq( 1 , db.foo.count() );
+
+s.config.databases.find().forEach( printjson )
+s.config.shards.find().forEach( printjson )
+
+serverName = s.getServerName( "test" )
+
+function countNodes(){
+ var x = s.config.shards.findOne( { _id : serverName } );
+ return x.host.split( "," ).length
+}
+
+assert.eq( 2 , countNodes() , "A1" )
+
+rs = s.getRSEntry( serverName );
+rs.test.add()
+try {
+ rs.test.reInitiate();
+}
+catch ( e ){
+ // this os ok as rs's may close connections on a change of master
+ print( e );
+}
+
+assert.soon(
+ function(){
+ try {
+ printjson( rs.test.getMaster().getDB("admin").runCommand( "isMaster" ) )
+ s.config.shards.find().forEach( printjsononeline );
+ return countNodes() == 3;
+ }
+ catch ( e ){
+ print( e );
+ }
+ } , "waiting for config server to update" , 180 * 1000 , 1000 );
+
+// cleanup after adding node
+for ( i=0; i<5; i++ ){
+ try {
+ db.foo.findOne();
+ }
+ catch ( e ){}
+}
+
+jsTest.log( "Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts..." )
+rs.test.awaitReplication()
+// Make sure we wait for secondaries here - otherwise a secondary could come online later and be used for the
+// count command before being fully replicated
+jsTest.log( "Awaiting secondary status of all nodes" )
+rs.test.waitForState( rs.test.getSecondaries(), rs.test.SECONDARY, 180 * 1000 )
+
+// -------------------------------------------------------------------------------------------
+// ---------- test routing to slaves ----------------
+// -------------------------------------------------------------------------------------------
+
+// --- not sharded ----
+
+m = new Mongo( s.s.name );
+ts = m.getDB( "test" ).foo
+
+before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+for ( i=0; i<10; i++ )
+ assert.eq( 17 , ts.findOne().x , "B1" )
+
+m.setSlaveOk()
+for ( i=0; i<10; i++ )
+ assert.eq( 17 , ts.findOne().x , "B2" )
+
+after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+printjson( before )
+printjson( after )
+
+assert.lte( before.query + 10 , after.query , "B3" )
+
+// --- add more data ----
+
+db.foo.ensureIndex( { x : 1 } )
+
+var bulk = db.foo.initializeUnorderedBulkOp();
+for ( i=0; i<100; i++ ){
+ if ( i == 17 ) continue;
+ bulk.insert({ x: i });
+}
+assert.writeOK(bulk.execute({ w: 3 }));
+
+// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
+// replication for this and future tests to pass
+rs.test.awaitReplication()
+
+assert.eq( 100 , ts.count() , "B4" )
+assert.eq( 100 , ts.find().itcount() , "B5" )
+assert.eq( 100 , ts.find().batchSize(5).itcount() , "B6" )
+
+t.find().batchSize(3).next();
+gc(); gc(); gc();
+
+// --- sharded ----
+
+assert.eq( 100 , db.foo.count() , "C1" )
+
+s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
+
+// We're doing some manual chunk stuff, so stop the balancer first
+s.stopBalancer()
+
+assert.eq( 100 , t.count() , "C2" )
+s.adminCommand( { split : "test.foo" , middle : { x : 50 } } )
+
+db.printShardingStatus()
+
+other = s.config.shards.findOne( { _id : { $ne : serverName } } );
+assert.commandWorked(s.getDB('admin').runCommand({ moveChunk: "test.foo",
+ find: { x: 10 },
+ to: other._id,
+ _secondaryThrottle: true,
+ writeConcern: { w: 2 },
+ _waitForDelete: true }));
+assert.eq( 100 , t.count() , "C3" )
+
+assert.eq( 50 , rs.test.getMaster().getDB( "test" ).foo.count() , "C4" )
+
+// by non-shard key
+
+m = new Mongo( s.s.name );
+ts = m.getDB( "test" ).foo
+
+before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+for ( i=0; i<10; i++ )
+ assert.eq( 17 , ts.findOne( { _id : 5 } ).x , "D1" )
+
+m.setSlaveOk()
+for ( i=0; i<10; i++ )
+ assert.eq( 17 , ts.findOne( { _id : 5 } ).x , "D2" )
+
+after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+assert.lte( before.query + 10 , after.query , "D3" )
+
+// by shard key
+
+m = new Mongo( s.s.name );
+m.forceWriteMode("commands");
+
+db.printShardingStatus()
+
+ts = m.getDB( "test" ).foo
+
+before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+for ( i=0; i<10; i++ )
+ assert.eq( 57 , ts.findOne( { x : 57 } ).x , "E1" )
+
+m.setSlaveOk()
+for ( i=0; i<10; i++ )
+ assert.eq( 57 , ts.findOne( { x : 57 } ).x , "E2" )
+
+after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+assert.lte( before.query + 10 , after.query , "E3" )
+
+assert.eq( 100 , ts.count() , "E4" )
+assert.eq( 100 , ts.find().itcount() , "E5" )
+printjson( ts.find().batchSize(5).explain() )
+
+// fsyncLock the secondaries
+rs.test.getSecondaries().forEach(function(secondary) {
+ secondary.getDB( "test" ).fsyncLock();
+})
+// Modify data only on the primary replica of the primary shard.
+// { x: 60 } goes to the shard of "rs", which is the primary shard.
+assert.writeOK( ts.insert( { primaryOnly: true, x: 60 } ) );
+// Read from secondary through mongos, the doc is not there due to replication delay or fsync.
+// But we can guarantee not to read from primary.
+assert.eq( 0, ts.find({ primaryOnly: true, x: 60 }).itcount() );
+// Unlock the secondaries
+rs.test.getSecondaries().forEach(function(secondary) {
+ secondary.getDB( "test" ).fsyncUnlock();
+})
+// Clean up the data
+assert.writeOK( ts.remove( { primaryOnly: true, x: 60 }, { writeConcern: { w: 3 }} ) );
+
+for ( i=0; i<10; i++ ) {
+ m = new Mongo( s.s.name );
+ m.setSlaveOk();
+ ts = m.getDB( "test" ).foo
+ assert.eq( 100 , ts.find().batchSize(5).itcount() , "F2." + i )
+}
+
+for ( i=0; i<10; i++ ) {
+ m = new Mongo( s.s.name );
+ ts = m.getDB( "test" ).foo
+ assert.eq( 100 , ts.find().batchSize(5).itcount() , "F3." + i )
+}
+
+
+printjson( db.adminCommand( "getShardMap" ) );
+
+s.stop();
+
+})();
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index f2a682a82d2..2d32a539c35 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -1,6 +1,8 @@
+(function() {
-s = new ShardingTest( "sort1" , 2 , 0 , 2 )
-s.stopBalancer();
+var s = new ShardingTest({ name: "sort1",
+ shards: 2,
+ mongos: 2 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
@@ -98,3 +100,5 @@ assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0, 'sub.num':1 } ) , "D
assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0, 'sub.num':1 } ) , "D12" )
s.stop();
+
+})();
diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js
index 4307fa9e64b..117d17361e0 100644
--- a/jstests/sharding/split_with_force.js
+++ b/jstests/sharding/split_with_force.js
@@ -2,7 +2,7 @@
// Tests autosplit locations with force : true
//
-var options = { chunksize : 1, // MB
+var options = { chunkSize: 1, // MB
mongosOptions : { noAutoSplit : "" }
};
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 8b313590a52..86fb4667132 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -2,7 +2,7 @@
// Tests autosplit locations with force : true, for small collections
//
-var options = { chunksize : 1, // MB
+var options = { chunkSize: 1, // MB
mongosOptions : { noAutoSplit : "" }
};
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index 52ec40556d7..de08c73d83d 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -1,4 +1,11 @@
-s = new ShardingTest( "stats" , 2 , 1 , 1, { enableBalancer : 1 } );
+(function () {
+
+var s = new ShardingTest({ name: "stats",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: { enableBalancer: true } });
+
s.adminCommand( { enablesharding : "test" } );
a = s._connections[0].getDB( "test" );
@@ -188,4 +195,6 @@ collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
}());
-s.stop()
+s.stop();
+
+})();
diff --git a/jstests/sharding/sync_cluster_config/parallel.js b/jstests/sharding/sync_cluster_config/parallel.js
index be2aab23d17..facc29ea361 100644
--- a/jstests/sharding/sync_cluster_config/parallel.js
+++ b/jstests/sharding/sync_cluster_config/parallel.js
@@ -1,7 +1,11 @@
// This test fails when run with authentication because benchRun with auth is broken: SERVER-6388
-numShards = 3
-s = new ShardingTest( "parallel" , numShards , 2 , 2 , { sync : true } );
-s.setBalancer( false )
+var numShards = 3
+
+var s = new ShardingTest({ name: "parallel",
+ shards: numShards,
+ mongos: 2,
+ verbose: 1,
+ other: { sync : true } });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
diff --git a/jstests/sharding/sync_cluster_config/sync2.js b/jstests/sharding/sync_cluster_config/sync2.js
index de4ea6b2ddc..4b94eb03fd9 100644
--- a/jstests/sharding/sync_cluster_config/sync2.js
+++ b/jstests/sharding/sync_cluster_config/sync2.js
@@ -1,7 +1,9 @@
-// sync2.js
+(function () {
-var s = new ShardingTest( "sync2" , 3 , 50 , 2 , { sync : true } );
-s.stopBalancer()
+var s = new ShardingTest({ name: "sync2",
+ shards: 3,
+ mongos: 2,
+ other: { sync : true } });
var s2 = s._mongos[1];
@@ -112,3 +114,5 @@ for (i = 1; i < hashes.length; i++) {
}
s.stop();
+
+})();
diff --git a/jstests/sharding/sync_cluster_config/sync7.js b/jstests/sharding/sync_cluster_config/sync7.js
index 33cf31bc899..25e95fdafc3 100644
--- a/jstests/sharding/sync_cluster_config/sync7.js
+++ b/jstests/sharding/sync_cluster_config/sync7.js
@@ -1,6 +1,9 @@
// Test that the clock skew of the distributed lock disallows getting locks for moving and splitting.
+(function() {
-s = new ShardingTest( "moveDistLock", 3, 0, undefined, { sync : true } );
+var s = new ShardingTest({ name: "moveDistLock",
+ shards: 3,
+ other: { sync : true } });
// Enable sharding on DB and collection before skewing the clocks
result = s.getDB("admin").runCommand( { enablesharding : "test1" } );
@@ -66,3 +69,5 @@ printjson(result);
assert.eq( result.ok, 1, "Move command should have succeeded again!" )
s.stop();
+
+})();
diff --git a/jstests/sharding/sync_cluster_config/sync_conn_cmd.js b/jstests/sharding/sync_cluster_config/sync_conn_cmd.js
index bbb9adda16a..5bf44cb5969 100644
--- a/jstests/sharding/sync_cluster_config/sync_conn_cmd.js
+++ b/jstests/sharding/sync_cluster_config/sync_conn_cmd.js
@@ -3,9 +3,11 @@
* Test SyncClusterConnection commands using call instead of findOne
*/
-// Note: count command uses call
+(function() {
-var st = new ShardingTest({ shards: [], other: { sync: true }});
+var st = new ShardingTest({ name: 'sync_conn_cmd',
+ shards: 0,
+ other: { sync: true }});
var configDB = st.config;
var coll = configDB.test;
@@ -58,3 +60,4 @@ testInvalidCount();
st.stop();
+})();
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index 5e2fe256619..a239ad88c01 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -1,6 +1,9 @@
// test to make sure that tag ranges get split
-s = new ShardingTest( "tag_auto_split", 2, 0, 1, { nopreallocj : true, enableBalancer : true } );
+var s = new ShardingTest({ name: "tag_auto_split",
+ shards: 2,
+ mongos: 1,
+ other: { enableBalancer : true } });
db = s.getDB( "test" );
diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js
index e934a0b01e9..897433001e2 100644
--- a/jstests/sharding/tag_range.js
+++ b/jstests/sharding/tag_range.js
@@ -4,7 +4,9 @@ function countTags( num, message ) {
assert.eq( s.config.tags.count() , num , message );
}
-s = new ShardingTest( "tag_range" , 2 , 0 , 1 , { nopreallocj : true } );
+var s = new ShardingTest({ name: "tag_range",
+ shards: 2,
+ mongos: 1 });
// this set up is not required but prevents warnings in the remove
db = s.getDB( "tag_range" );
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 0ebd8bf1a60..30040c7979e 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -99,7 +99,7 @@ function runTest(test) {
// Main
var dbName = "test";
var collName = "topchunk";
-var st = shardSetup({name: "topchunk", shards: 4, chunksize: 1}, dbName, collName);
+var st = shardSetup({name: "topchunk", shards: 4, chunkSize: 1}, dbName, collName);
var db = st.getDB(dbName);
var coll = db[collName];
var configDB = st.s.getDB('config');
@@ -243,7 +243,7 @@ for (var i = 0; i < tests.length; i++) {
st.stop();
// Single node shard Tests
-st = shardSetup({name: "singleNode", shards: 1, chunksize: 1}, dbName, collName);
+st = shardSetup({name: "singleNode", shards: 1, chunkSize: 1}, dbName, collName);
db = st.getDB(dbName);
coll = db[collName];
configDB = st.s.getDB('config');
@@ -276,7 +276,7 @@ st.stop();
// maxSize test
// To set maxSize, must manually add the shards
-st = shardSetup({name: "maxSize", shards: 2, chunksize: 1, other: {manualAddShard: true}},
+st = shardSetup({name: "maxSize", shards: 2, chunkSize: 1, other: {manualAddShard: true}},
dbName,
collName);
db = st.getDB(dbName);
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index 948781e6d66..805cda0c487 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -1,15 +1,18 @@
// Test simple updates issued through mongos. Updates have different constraints through mongos,
// since shard key is immutable.
+(function() {
-s = new ShardingTest( "auto1" , 2 , 1 , 1 );
+var s = new ShardingTest({ name: "auto1", shards: 2, mongos: 1 });
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
+
// repeat same tests with hashed shard key, to ensure identical behavior
s.adminCommand( { shardcollection : "test.update0" , key : { key : 1 } } );
s.adminCommand( { shardcollection : "test.update1" , key : { key : "hashed" } } );
db = s.getDB( "test" )
+
for(i=0; i < 2; i++){
coll = db.getCollection("update" + i);
@@ -96,5 +99,6 @@ for(i=0; i < 2; i++){
assert.writeOK(coll.update({_id : ObjectId(), 'key.x' : 1}, {$set : {x : 1}}, {multi : false}));
}
-s.stop()
+s.stop();
+})();
diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js
index e5b5f8a41dd..f2a8d626492 100644
--- a/jstests/sharding/user_flags_sharded.js
+++ b/jstests/sharding/user_flags_sharded.js
@@ -1,8 +1,8 @@
// Test that when user flags are set on a collection,
// then collection is sharded, flags get carried over.
+(function() {
if (jsTest.options().storageEngine === "mmapv1") {
-
// the dbname and collection we'll be working with
var dbname = "testDB";
var coll = "userFlagsColl";
@@ -39,7 +39,7 @@ if (jsTest.options().storageEngine === "mmapv1") {
assert.eq( collstats.userFlags , 0 , "modified collection should have userFlags = 0 ");
// start up a new sharded cluster, and add previous mongod
- var s = new ShardingTest( "user_flags", 1 );
+ var s = new ShardingTest({ name: "user_flags", shards: 1 });
assert( s.admin.runCommand( { addshard: newShardConn.host , name: "myShard" } ).ok,
"did not accept new shard" );
@@ -60,5 +60,6 @@ if (jsTest.options().storageEngine === "mmapv1") {
MongoRunner.stopMongod(newShardConn);
s.stop();
-
}
+
+})();
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index afe3f709fad..d3d317122b2 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -1,6 +1,6 @@
-// version1.js
+(function() {
-s = new ShardingTest( "version1" , 1 , 2 )
+var s = new ShardingTest({ name: "version1", shards: 1, verbose: 2 });
s.adminCommand( { enablesharding : "alleyinsider" } );
s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
@@ -69,3 +69,5 @@ assert( a.runCommand({ setShardVersion: "alleyinsider.foo",
// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get version B" );
s.stop();
+
+})();
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 5a375f89c21..ae6109ca141 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -1,6 +1,6 @@
-// version2.js
+(function() {
-s = new ShardingTest( "version2" , 1 , 2 )
+var s = new ShardingTest({ name: "version2", shards: 1, verbose: 2 });
s.adminCommand( { enablesharding : "alleyinsider" } );
s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
@@ -63,5 +63,6 @@ assert.throws( simpleFindOne , [] , "should complain about not in sharded mode 1
// simpleFindOne(); // newer version is ok
-
s.stop();
+
+})();
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index 79813ff06fd..9a63268fac2 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -7,7 +7,7 @@ function setupTest() {
mongos : 1,
other: { rs: true,
numReplicas: 2,
- chunksize : 1,
+ chunkSize: 1,
rsOptions: { oplogSize : 50 },
enableBalancer : 1
} } );