From c6d86af073687d119a4d707bfc1a2d0f0df7419a Mon Sep 17 00:00:00 2001 From: Esha Maharishi Date: Wed, 28 Sep 2016 13:51:37 -0400 Subject: SERVER-26309 Disable auto splitting in ShardingTest by default --- jstests/sharding/auto1.js | 79 ----------- jstests/sharding/auto2.js | 152 --------------------- jstests/sharding/auto_rebalance.js | 2 +- jstests/sharding/autosplit.js | 79 +++++++++++ jstests/sharding/autosplit_heuristics.js | 7 +- jstests/sharding/autosplit_with_balancer.js | 152 +++++++++++++++++++++ jstests/sharding/conf_server_write_concern.js | 4 +- .../csrs_upgrade_mongod_using_movechunk.js | 2 +- jstests/sharding/cursor1.js | 3 +- jstests/sharding/disable_autosplit.js | 3 +- jstests/sharding/findandmodify2.js | 8 +- jstests/sharding/forget_mr_temp_ns.js | 2 +- jstests/sharding/in_memory_sort_limit.js | 2 +- .../sharding/movechunk_with_default_paranoia.js | 2 +- jstests/sharding/movechunk_with_moveParanoia.js | 7 +- jstests/sharding/movechunk_with_noMoveParanoia.js | 7 +- jstests/sharding/mrShardedOutput.js | 2 +- jstests/sharding/mrShardedOutputAuth.js | 2 +- jstests/sharding/sharding_balance4.js | 8 +- jstests/sharding/split_with_force.js | 3 +- jstests/sharding/split_with_force_small.js | 3 +- jstests/sharding/top_chunk_autosplit.js | 16 ++- jstests/sharding/write_cmd_auto_split.js | 2 +- jstests/sharding/zbigMapReduce.js | 2 +- jstests/ssl/libs/ssl_helpers.js | 8 +- jstests/tool/dumprestore9.js | 2 +- src/mongo/shell/shardingtest.js | 14 ++ 27 files changed, 307 insertions(+), 266 deletions(-) delete mode 100644 jstests/sharding/auto1.js delete mode 100644 jstests/sharding/auto2.js create mode 100644 jstests/sharding/autosplit.js create mode 100644 jstests/sharding/autosplit_with_balancer.js diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js deleted file mode 100644 index c893902843e..00000000000 --- a/jstests/sharding/auto1.js +++ /dev/null @@ -1,79 +0,0 @@ -(function() { - - var s = new ShardingTest({name: "auto1", shards: 2, mongos: 1, other: {enableBalancer: 1}}); - - s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); - s.adminCommand({shardcollection: "test.foo", key: {num: 1}}); - - bigString = ""; - while (bigString.length < 1024 * 50) - bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad"; - - db = s.getDB("test"); - coll = db.foo; - - var i = 0; - - var bulk = coll.initializeUnorderedBulkOp(); - for (; i < 100; i++) { - bulk.insert({num: i, s: bigString}); - } - assert.writeOK(bulk.execute()); - - primary = s.getServer("test").getDB("test"); - - counts = []; - - s.printChunks(); - counts.push(s.config.chunks.count()); - assert.eq(100, db.foo.find().itcount()); - - print("datasize: " + - tojson(s.getServer("test").getDB("admin").runCommand({datasize: "test.foo"}))); - - bulk = coll.initializeUnorderedBulkOp(); - for (; i < 200; i++) { - bulk.insert({num: i, s: bigString}); - } - assert.writeOK(bulk.execute()); - - s.printChunks(); - s.printChangeLog(); - counts.push(s.config.chunks.count()); - - bulk = coll.initializeUnorderedBulkOp(); - for (; i < 400; i++) { - bulk.insert({num: i, s: bigString}); - } - assert.writeOK(bulk.execute()); - - s.printChunks(); - s.printChangeLog(); - counts.push(s.config.chunks.count()); - - bulk = coll.initializeUnorderedBulkOp(); - for (; i < 700; i++) { - bulk.insert({num: i, s: bigString}); - } - assert.writeOK(bulk.execute()); - - s.printChunks(); - s.printChangeLog(); - counts.push(s.config.chunks.count()); - - assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts)); - sorted = counts.slice(0); - // Sort doesn't sort numbers correctly by default, resulting in fail - sorted.sort(function(a, b) { - return a - b; - }); - assert.eq(counts, sorted, "counts 2 : " + tojson(counts)); - - print(counts); - - printjson(db.stats()); - - s.stop(); - -})(); diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js deleted file mode 100644 index 9e88d667fbe..00000000000 --- a/jstests/sharding/auto2.js +++ /dev/null @@ -1,152 +0,0 @@ -(function() { - - var s = new ShardingTest({name: "auto2", shards: 2, mongos: 2}); - - s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); - s.adminCommand({shardcollection: "test.foo", key: {num: 1}}); - - var bigString = ""; - while (bigString.length < 1024 * 50) { - bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad"; - } - - var db = s.getDB("test"); - var coll = db.foo; - - var i = 0; - for (var j = 0; j < 30; j++) { - print("j:" + j + " : " + - Date.timeFunc(function() { - var bulk = coll.initializeUnorderedBulkOp(); - for (var k = 0; k < 100; k++) { - bulk.insert({num: i, s: bigString}); - i++; - } - assert.writeOK(bulk.execute()); - })); - } - - s.startBalancer(); - - assert.eq(i, j * 100, "setup"); - - // Until SERVER-9715 is fixed, the sync command must be run on a diff connection - new Mongo(s.s.host).adminCommand("connpoolsync"); - - print("done inserting data"); - - print("datasize: " + - tojson(s.getServer("test").getDB("admin").runCommand({datasize: "test.foo"}))); - s.printChunks(); - - function doCountsGlobal() { - counta = s._connections[0].getDB("test").foo.count(); - countb = s._connections[1].getDB("test").foo.count(); - return counta + countb; - } - - // Wait for the chunks to distribute - assert.soon(function() { - doCountsGlobal(); - print("Counts: " + counta + countb); - - return counta > 0 && countb > 0; - }); - - print("checkpoint B"); - - var missing = []; - - for (i = 0; i < j * 100; i++) { - var x = coll.findOne({num: i}); - if (!x) { - missing.push(i); - print("can't find: " + i); - sleep(5000); - x = coll.findOne({num: i}); - if (!x) { - print("still can't find: " + i); - - for (var zzz = 0; zzz < s._connections.length; zzz++) { - if (s._connections[zzz].getDB("test").foo.findOne({num: i})) { - print("found on wrong server: " + s._connections[zzz]); - } - } - } - } - } - - s.printChangeLog(); - - print("missing: " + tojson(missing)); - assert.soon(function(z) { - return doCountsGlobal() == j * 100; - }, "from each a:" + counta + " b:" + countb + " i:" + i); - print("checkpoint B.a"); - s.printChunks(); - assert.eq(j * 100, coll.find().limit(100000000).itcount(), "itcount A"); - assert.eq(j * 100, counta + countb, "from each 2 a:" + counta + " b:" + countb + " i:" + i); - assert(missing.length == 0, "missing : " + tojson(missing)); - - print("checkpoint C"); - - assert(Array.unique(s.config.chunks.find().toArray().map(function(z) { - return z.shard; - })).length == 2, - "should be using both servers"); - - for (i = 0; i < 100; i++) { - cursor = coll.find().batchSize(5); - cursor.next(); - cursor = null; - gc(); - } - - print("checkpoint D"); - - // test not-sharded cursors - db = s.getDB("test2"); - t = db.foobar; - for (i = 0; i < 100; i++) - t.save({_id: i}); - for (i = 0; i < 100; i++) { - t.find().batchSize(2).next(); - assert.lt(0, db.serverStatus().metrics.cursor.open.total, "cursor1"); - gc(); - } - - for (i = 0; i < 100; i++) { - gc(); - } - assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2"); - - // Stop the balancer, otherwise it may grab some connections from the pool for itself - s.stopBalancer(); - - print("checkpoint E"); - - assert(t.findOne(), "check close 0"); - - for (i = 0; i < 20; i++) { - var conn = new Mongo(db.getMongo().host); - temp2 = conn.getDB("test2").foobar; - assert.eq(conn._fullNameSpace, t._fullNameSpace, "check close 1"); - assert(temp2.findOne(), "check close 2"); - conn = null; - gc(); - } - - print("checkpoint F"); - - assert.throws(function() { - s.getDB("test").foo.find().sort({s: 1}).forEach(function(x) { - printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length)); - }); - }); - - print("checkpoint G"); - - s.stop(); - -})(); diff --git a/jstests/sharding/auto_rebalance.js b/jstests/sharding/auto_rebalance.js index 3153442e5d0..1c20c1ce89f 100644 --- a/jstests/sharding/auto_rebalance.js +++ b/jstests/sharding/auto_rebalance.js @@ -5,7 +5,7 @@ 'use strict'; var st = new ShardingTest( - {name: 'auto_rebalance_rs', mongos: 1, shards: 2, chunksize: 1, rs: {nodes: 3}}); + {name: 'auto_rebalance_rs', mongos: 1, shards: 2, chunksize: 1, enableAutoSplit: true, rs: {nodes: 3}}); assert.writeOK(st.getDB("config").settings.update( {_id: "balancer"}, {$set: {"_secondaryThrottle": false}}, {upsert: true})); diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js new file mode 100644 index 00000000000..8786e05a646 --- /dev/null +++ b/jstests/sharding/autosplit.js @@ -0,0 +1,79 @@ +(function() { + + var s = new ShardingTest({name: "auto1", shards: 2, mongos: 1, other: {enableAutoSplit: true}}); + + s.adminCommand({enablesharding: "test"}); + s.ensurePrimaryShard('test', 'shard0001'); + s.adminCommand({shardcollection: "test.foo", key: {num: 1}}); + + bigString = ""; + while (bigString.length < 1024 * 50) + bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad"; + + db = s.getDB("test"); + coll = db.foo; + + var i = 0; + + var bulk = coll.initializeUnorderedBulkOp(); + for (; i < 100; i++) { + bulk.insert({num: i, s: bigString}); + } + assert.writeOK(bulk.execute()); + + primary = s.getServer("test").getDB("test"); + + counts = []; + + s.printChunks(); + counts.push(s.config.chunks.count()); + assert.eq(100, db.foo.find().itcount()); + + print("datasize: " + + tojson(s.getServer("test").getDB("admin").runCommand({datasize: "test.foo"}))); + + bulk = coll.initializeUnorderedBulkOp(); + for (; i < 200; i++) { + bulk.insert({num: i, s: bigString}); + } + assert.writeOK(bulk.execute()); + + s.printChunks(); + s.printChangeLog(); + counts.push(s.config.chunks.count()); + + bulk = coll.initializeUnorderedBulkOp(); + for (; i < 400; i++) { + bulk.insert({num: i, s: bigString}); + } + assert.writeOK(bulk.execute()); + + s.printChunks(); + s.printChangeLog(); + counts.push(s.config.chunks.count()); + + bulk = coll.initializeUnorderedBulkOp(); + for (; i < 700; i++) { + bulk.insert({num: i, s: bigString}); + } + assert.writeOK(bulk.execute()); + + s.printChunks(); + s.printChangeLog(); + counts.push(s.config.chunks.count()); + + assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts)); + sorted = counts.slice(0); + // Sort doesn't sort numbers correctly by default, resulting in fail + sorted.sort(function(a, b) { + return a - b; + }); + assert.eq(counts, sorted, "counts 2 : " + tojson(counts)); + + print(counts); + + printjson(db.stats()); + + s.stop(); + +})(); diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js index c4d415ce0de..618541e314b 100644 --- a/jstests/sharding/autosplit_heuristics.js +++ b/jstests/sharding/autosplit_heuristics.js @@ -3,8 +3,11 @@ // works as expected even after splitting. // -var st = - new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: {chunkSize: 1, verbose: 2}}}); +var st = new ShardingTest({ + shards: 1, + mongos: 1, + other: {mongosOptions: {chunkSize: 1, verbose: 2}, enableAutoSplit: true} +}); // The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk // moves/splits depending on the timing. diff --git a/jstests/sharding/autosplit_with_balancer.js b/jstests/sharding/autosplit_with_balancer.js new file mode 100644 index 00000000000..09054dc9bd2 --- /dev/null +++ b/jstests/sharding/autosplit_with_balancer.js @@ -0,0 +1,152 @@ +(function() { + + var s = new ShardingTest({name: "auto2", shards: 2, mongos: 2, other: {enableAutoSplit: true}}); + + s.adminCommand({enablesharding: "test"}); + s.ensurePrimaryShard('test', 'shard0001'); + s.adminCommand({shardcollection: "test.foo", key: {num: 1}}); + + var bigString = ""; + while (bigString.length < 1024 * 50) { + bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad"; + } + + var db = s.getDB("test"); + var coll = db.foo; + + var i = 0; + for (var j = 0; j < 30; j++) { + print("j:" + j + " : " + + Date.timeFunc(function() { + var bulk = coll.initializeUnorderedBulkOp(); + for (var k = 0; k < 100; k++) { + bulk.insert({num: i, s: bigString}); + i++; + } + assert.writeOK(bulk.execute()); + })); + } + + s.startBalancer(); + + assert.eq(i, j * 100, "setup"); + + // Until SERVER-9715 is fixed, the sync command must be run on a diff connection + new Mongo(s.s.host).adminCommand("connpoolsync"); + + print("done inserting data"); + + print("datasize: " + + tojson(s.getServer("test").getDB("admin").runCommand({datasize: "test.foo"}))); + s.printChunks(); + + function doCountsGlobal() { + counta = s._connections[0].getDB("test").foo.count(); + countb = s._connections[1].getDB("test").foo.count(); + return counta + countb; + } + + // Wait for the chunks to distribute + assert.soon(function() { + doCountsGlobal(); + print("Counts: " + counta + countb); + + return counta > 0 && countb > 0; + }); + + print("checkpoint B"); + + var missing = []; + + for (i = 0; i < j * 100; i++) { + var x = coll.findOne({num: i}); + if (!x) { + missing.push(i); + print("can't find: " + i); + sleep(5000); + x = coll.findOne({num: i}); + if (!x) { + print("still can't find: " + i); + + for (var zzz = 0; zzz < s._connections.length; zzz++) { + if (s._connections[zzz].getDB("test").foo.findOne({num: i})) { + print("found on wrong server: " + s._connections[zzz]); + } + } + } + } + } + + s.printChangeLog(); + + print("missing: " + tojson(missing)); + assert.soon(function(z) { + return doCountsGlobal() == j * 100; + }, "from each a:" + counta + " b:" + countb + " i:" + i); + print("checkpoint B.a"); + s.printChunks(); + assert.eq(j * 100, coll.find().limit(100000000).itcount(), "itcount A"); + assert.eq(j * 100, counta + countb, "from each 2 a:" + counta + " b:" + countb + " i:" + i); + assert(missing.length == 0, "missing : " + tojson(missing)); + + print("checkpoint C"); + + assert(Array.unique(s.config.chunks.find().toArray().map(function(z) { + return z.shard; + })).length == 2, + "should be using both servers"); + + for (i = 0; i < 100; i++) { + cursor = coll.find().batchSize(5); + cursor.next(); + cursor = null; + gc(); + } + + print("checkpoint D"); + + // test not-sharded cursors + db = s.getDB("test2"); + t = db.foobar; + for (i = 0; i < 100; i++) + t.save({_id: i}); + for (i = 0; i < 100; i++) { + t.find().batchSize(2).next(); + assert.lt(0, db.serverStatus().metrics.cursor.open.total, "cursor1"); + gc(); + } + + for (i = 0; i < 100; i++) { + gc(); + } + assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2"); + + // Stop the balancer, otherwise it may grab some connections from the pool for itself + s.stopBalancer(); + + print("checkpoint E"); + + assert(t.findOne(), "check close 0"); + + for (i = 0; i < 20; i++) { + var conn = new Mongo(db.getMongo().host); + temp2 = conn.getDB("test2").foobar; + assert.eq(conn._fullNameSpace, t._fullNameSpace, "check close 1"); + assert(temp2.findOne(), "check close 2"); + conn = null; + gc(); + } + + print("checkpoint F"); + + assert.throws(function() { + s.getDB("test").foo.find().sort({s: 1}).forEach(function(x) { + printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length)); + }); + }); + + print("checkpoint G"); + + s.stop(); + +})(); diff --git a/jstests/sharding/conf_server_write_concern.js b/jstests/sharding/conf_server_write_concern.js index c4e08939548..f7c68e477e4 100644 --- a/jstests/sharding/conf_server_write_concern.js +++ b/jstests/sharding/conf_server_write_concern.js @@ -22,11 +22,11 @@ function writeToConfigTest() { /** * Test write concern with w parameter will not cause an error when writes to mongos - * would trigger writes to config servers (in this test, split chunks is used). + * would trigger writes to config servers (in this test, autosplit is used). */ function configTest() { jsTestLog("Testing metadata writes to config server with write concern"); - var st = new ShardingTest({shards: 1, rs: true, other: {chunkSize: 1}}); + var st = new ShardingTest({shards: 1, rs: true, other: {chunkSize: 1, enableAutoSplit: true}}); var mongos = st.s; var testDB = mongos.getDB('test'); diff --git a/jstests/sharding/csrs_upgrade/csrs_upgrade_mongod_using_movechunk.js b/jstests/sharding/csrs_upgrade/csrs_upgrade_mongod_using_movechunk.js index 9965d9c1b8d..57ca122ad5f 100644 --- a/jstests/sharding/csrs_upgrade/csrs_upgrade_mongod_using_movechunk.js +++ b/jstests/sharding/csrs_upgrade/csrs_upgrade_mongod_using_movechunk.js @@ -16,7 +16,7 @@ var st; var dataCollectionName = testDBName + ".data"; jsTest.log("Setting up CSRS sharded cluster"); - st = new ShardingTest({name: "csrs", mongos: 2, mongosOptions: {noAutoSplit: ""}, shards: 2}); + st = new ShardingTest({name: "csrs", mongos: 2, shards: 2}); jsTest.log("Enabling sharding on " + testDBName); assert.commandWorked(st.s0.adminCommand({enablesharding: testDBName})); diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js index ab387870751..e7d54ac85e4 100644 --- a/jstests/sharding/cursor1.js +++ b/jstests/sharding/cursor1.js @@ -3,8 +3,7 @@ (function() { // Turn off auto-splitting, because this test handles chunk splitting manually. - var s = new ShardingTest( - {name: "sharding_cursor1", shards: 2, other: {mongosOptions: {noAutoSplit: ""}}}); + var s = new ShardingTest({name: "sharding_cursor1", shards: 2}); s.config.settings.find().forEach(printjson); // create a sharded 'test.foo', for the moment with just one chunk diff --git a/jstests/sharding/disable_autosplit.js b/jstests/sharding/disable_autosplit.js index c6b7b7d5e1f..f5e5cce1f2c 100644 --- a/jstests/sharding/disable_autosplit.js +++ b/jstests/sharding/disable_autosplit.js @@ -4,8 +4,7 @@ var chunkSize = 1; // In MB - var st = new ShardingTest( - {shards: 1, mongos: 1, other: {chunksize: chunkSize, mongosOptions: {noAutoSplit: ""}}}); + var st = new ShardingTest({shards: 1, mongos: 1, other: {chunksize: chunkSize}}); var data = "x"; while (data.length < chunkSize * 1024 * 1024) { diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js index 525788e70f9..1de7f8556ee 100644 --- a/jstests/sharding/findandmodify2.js +++ b/jstests/sharding/findandmodify2.js @@ -1,5 +1,9 @@ -var s = new ShardingTest( - {name: "find_and_modify_sharded_2", shards: 2, mongos: 1, other: {chunkSize: 1}}); +var s = new ShardingTest({ + name: "find_and_modify_sharded_2", + shards: 2, + mongos: 1, + other: {chunkSize: 1, enableAutoSplit: true} +}); s.adminCommand({enablesharding: "test"}); var db = s.getDB("test"); diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js index fd950bcf43c..6043b300455 100644 --- a/jstests/sharding/forget_mr_temp_ns.js +++ b/jstests/sharding/forget_mr_temp_ns.js @@ -2,7 +2,7 @@ // Tests whether we forget M/R's temporary namespaces for sharded output // -var st = new ShardingTest({shards: 1, mongos: 1}); +var st = new ShardingTest({shards: 1, mongos: 1, other: {enableAutoSplit: true}}); var mongos = st.s0; var admin = mongos.getDB("admin"); diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js index 1c7e8c73447..1dd9bc4e37c 100644 --- a/jstests/sharding/in_memory_sort_limit.js +++ b/jstests/sharding/in_memory_sort_limit.js @@ -4,7 +4,7 @@ (function() { "use strict"; - var st = new ShardingTest({shards: 2}); + var st = new ShardingTest({shards: 2, other: {enableAutoSplit: true}}); var db = st.s.getDB('test'); var mongosCol = db.getCollection('skip'); db.adminCommand({enableSharding: 'test'}); diff --git a/jstests/sharding/movechunk_with_default_paranoia.js b/jstests/sharding/movechunk_with_default_paranoia.js index a6f4704ec90..a5cc5421093 100644 --- a/jstests/sharding/movechunk_with_default_paranoia.js +++ b/jstests/sharding/movechunk_with_default_paranoia.js @@ -2,7 +2,7 @@ * This test checks that moveParanoia defaults to off (ie the moveChunk directory will not * be created). */ -var st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1}}); +var st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}}); load("jstests/sharding/movechunk_include.js"); setupMoveChunkTest(st); diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js index 96348d827bf..73b5e37cd61 100644 --- a/jstests/sharding/movechunk_with_moveParanoia.js +++ b/jstests/sharding/movechunk_with_moveParanoia.js @@ -1,8 +1,11 @@ /** * This test sets moveParanoia flag and then check that the directory is created with the moved data */ -var st = new ShardingTest( - {shards: 2, mongos: 1, other: {chunkSize: 1, shardOptions: {moveParanoia: ""}}}); +var st = new ShardingTest({ + shards: 2, + mongos: 1, + other: {chunkSize: 1, enableAutoSplit: true, shardOptions: {moveParanoia: ""}} +}); load("jstests/sharding/movechunk_include.js"); setupMoveChunkTest(st); diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js index ae8ef5899a8..5d9cb8f090d 100644 --- a/jstests/sharding/movechunk_with_noMoveParanoia.js +++ b/jstests/sharding/movechunk_with_noMoveParanoia.js @@ -1,8 +1,11 @@ /** * This test sets moveParanoia flag and then check that the directory is created with the moved data */ -var st = new ShardingTest( - {shards: 2, mongos: 1, other: {chunkSize: 1, shardOptions: {noMoveParanoia: ""}}}); +var st = new ShardingTest({ + shards: 2, + mongos: 1, + other: {chunkSize: 1, enableAutoSplit: true, shardOptions: {noMoveParanoia: ""}} +}); load("jstests/sharding/movechunk_include.js"); setupMoveChunkTest(st); diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js index 6224b49f2dc..3ba3dacdaf9 100644 --- a/jstests/sharding/mrShardedOutput.js +++ b/jstests/sharding/mrShardedOutput.js @@ -4,7 +4,7 @@ // collection input twice the size of the first and outputs it to the new sharded // collection created in the first pass. -var st = new ShardingTest({shards: 2, other: {chunkSize: 1}}); +var st = new ShardingTest({shards: 2, other: {chunkSize: 1, enableAutoSplit: true}}); var config = st.getDB("config"); st.adminCommand({enablesharding: "test"}); diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js index 5e3b25e67cc..5fc1282d5b2 100644 --- a/jstests/sharding/mrShardedOutputAuth.js +++ b/jstests/sharding/mrShardedOutputAuth.js @@ -42,7 +42,7 @@ name: "mrShardedOutputAuth", shards: 1, mongos: 1, - other: {extraOptions: {"keyFile": "jstests/libs/key1"}} + other: {enableAutoSplit: true, extraOptions: {"keyFile": "jstests/libs/key1"}} }); // Setup the users to the input, output and admin databases diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js index aaa9cbd02b2..5ca3c00ee5a 100644 --- a/jstests/sharding/sharding_balance4.js +++ b/jstests/sharding/sharding_balance4.js @@ -1,8 +1,12 @@ // Check that doing updates done during a migrate all go to the right place (function() { - var s = new ShardingTest( - {name: "slow_sharding_balance4", shards: 2, mongos: 1, other: {chunkSize: 1}}); + var s = new ShardingTest({ + name: "slow_sharding_balance4", + shards: 2, + mongos: 1, + other: {chunkSize: 1, enableAutoSplit: true} + }); s.adminCommand({enablesharding: "test"}); s.ensurePrimaryShard('test', 'shard0001'); diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js index c66d2f145eb..3337e3e4a59 100644 --- a/jstests/sharding/split_with_force.js +++ b/jstests/sharding/split_with_force.js @@ -3,8 +3,7 @@ // var options = { - chunkSize: 1, // MB - mongosOptions: {noAutoSplit: ""} + chunkSize: 1 // MB }; var st = new ShardingTest({shards: 1, mongos: 1, other: options}); diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js index ad14f8642cb..0148c924993 100644 --- a/jstests/sharding/split_with_force_small.js +++ b/jstests/sharding/split_with_force_small.js @@ -3,8 +3,7 @@ // var options = { - chunkSize: 1, // MB - mongosOptions: {noAutoSplit: ""} + chunkSize: 1 // MB }; var st = new ShardingTest({shards: 1, mongos: 1, other: options}); diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js index 74a3e942cae..e91d5872b59 100644 --- a/jstests/sharding/top_chunk_autosplit.js +++ b/jstests/sharding/top_chunk_autosplit.js @@ -100,7 +100,8 @@ function runTest(test) { // Main var dbName = "test"; var collName = "topchunk"; -var st = shardSetup({name: "topchunk", shards: 4, chunkSize: 1}, dbName, collName); +var st = shardSetup( + {name: "topchunk", shards: 4, chunkSize: 1, other: {enableAutoSplit: true}}, dbName, collName); var db = st.getDB(dbName); var coll = db[collName]; var configDB = st.s.getDB('config'); @@ -294,7 +295,9 @@ for (var i = 0; i < tests.length; i++) { st.stop(); // Single node shard Tests -st = shardSetup({name: "singleNode", shards: 1, chunkSize: 1}, dbName, collName); +st = shardSetup({name: "singleNode", shards: 1, chunkSize: 1, other: {enableAutoSplit: true}}, + dbName, + collName); db = st.getDB(dbName); coll = db[collName]; configDB = st.s.getDB('config'); @@ -331,7 +334,14 @@ st.stop(); // maxSize test // To set maxSize, must manually add the shards st = shardSetup( - {name: "maxSize", shards: 2, chunkSize: 1, other: {manualAddShard: true}}, dbName, collName); + { + name: "maxSize", + shards: 2, + chunkSize: 1, + other: {manualAddShard: true, enableAutoSplit: true} + }, + dbName, + collName); db = st.getDB(dbName); coll = db[collName]; configDB = st.s.getDB('config'); diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js index 110fa7ddd9f..ebd554b480d 100644 --- a/jstests/sharding/write_cmd_auto_split.js +++ b/jstests/sharding/write_cmd_auto_split.js @@ -4,7 +4,7 @@ (function() { 'use strict'; - var st = new ShardingTest({shards: 1, other: {chunkSize: 1}}); + var st = new ShardingTest({shards: 1, other: {chunkSize: 1, enableAutoSplit: true}}); var configDB = st.s.getDB('config'); assert.commandWorked(configDB.adminCommand({enableSharding: 'test'})); diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js index fda81e12df8..4872592e533 100644 --- a/jstests/sharding/zbigMapReduce.js +++ b/jstests/sharding/zbigMapReduce.js @@ -9,7 +9,7 @@ function setupTest() { numReplicas: 2, chunkSize: 1, rsOptions: {oplogSize: 50}, - enableBalancer: 1 + enableBalancer: true } }); diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js index 5fab2f1f030..5b9ad0142a5 100644 --- a/jstests/ssl/libs/ssl_helpers.js +++ b/jstests/ssl/libs/ssl_helpers.js @@ -62,8 +62,12 @@ var replShouldFail = function(name, opt1, opt2) { */ function mixedShardTest(options1, options2, shouldSucceed) { try { - var st = new ShardingTest( - {mongos: [options1], config: [options1], shards: [options1, options2]}); + var st = new ShardingTest({ + mongos: [options1], + config: [options1], + shards: [options1, options2], + other: {enableAutoSplit: true} + }); st.stopBalancer(); // Test mongos talking to config servers diff --git a/jstests/tool/dumprestore9.js b/jstests/tool/dumprestore9.js index 69797603c16..fb0a9d694e3 100644 --- a/jstests/tool/dumprestore9.js +++ b/jstests/tool/dumprestore9.js @@ -13,7 +13,7 @@ if (0) { name: "dumprestore9a", shards: 2, mongos: 3, - other: {chunkSize: 1, enableBalancer: 1} + other: {chunkSize: 1, enableBalancer: true} }); step("Shard collection"); diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js index 7f630ae01f4..e9df265f39b 100644 --- a/src/mongo/shell/shardingtest.js +++ b/src/mongo/shell/shardingtest.js @@ -952,6 +952,15 @@ var ShardingTest = function(params) { var numMongos = otherParams.hasOwnProperty('mongos') ? otherParams.mongos : 1; var numConfigs = otherParams.hasOwnProperty('config') ? otherParams.config : 3; + // Default enableBalancer to false. + otherParams.enableBalancer = + ("enableBalancer" in otherParams) && (otherParams.enableBalancer === true); + + // Let autosplit behavior match that of the balancer if autosplit is not explicitly set. + if (!("enableAutoSplit" in otherParams)) { + otherParams.enableAutoSplit = otherParams.enableBalancer; + } + // Allow specifying mixed-type options like this: // { mongos : [ { noprealloc : "" } ], // config : [ { smallfiles : "" } ], @@ -1347,6 +1356,11 @@ var ShardingTest = function(params) { options.port = options.port || allocatePort(); + // Disable autosplitting unless it is explicitly turned on. + if (!otherParams.enableAutoSplit) { + options.noAutoSplit = ""; + } + if (otherParams.useBridge) { var bridgeOptions = Object.merge(otherParams.bridgeOptions, options.bridgeOptions || {}); -- cgit v1.2.1