summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-06-12 18:53:48 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-06-13 15:19:44 -0400
commit4649ed8d47ac507d195d9fb4204f1cfa40e4e363 (patch)
tree2a26d28f9de7bcdf8ffcd289db4849773287f99a
parent97ad19ed5bc71835e9783b932411b0ea9f83d572 (diff)
downloadmongo-4649ed8d47ac507d195d9fb4204f1cfa40e4e363.tar.gz
SERVER-24467 Add assert.commandWorked to unit-tests
Also makes them 'use strict'.
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js61
-rw-r--r--jstests/sharding/shard2.js432
-rw-r--r--jstests/sharding/shard_with_special_db_names.js15
-rw-r--r--jstests/sharding/top_chunk_autosplit.js119
4 files changed, 319 insertions, 308 deletions
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index 95d6d7526a8..0bde640569c 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -12,47 +12,44 @@
* connPoolStats to synchronize the test and make sure that the monitor
* was able to refresh before proceeding to check.
*/
+(function() {
+ 'use strict';
-var rsOpt = {oplogSize: 10};
-var st = new ShardingTest({shards: 1, rs: rsOpt});
-var mongos = st.s;
-var replTest = st.rs0;
+ var st = new ShardingTest({shards: 1, rs: {oplogSize: 10}});
+ var replTest = st.rs0;
-var adminDB = mongos.getDB('admin');
-// adminDB.runCommand({ addShard: replTest.getURL() });
+ assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
-adminDB.runCommand({enableSharding: 'test'});
-adminDB.runCommand({shardCollection: 'test.user', key: {x: 1}});
+ // The cluster now has the shard information. Then kill the replica set so when mongos restarts
+ // and tries to create a ReplSetMonitor for that shard, it will not be able to connect to any of
+ // the seed servers.
+ replTest.stopSet();
-/* The cluster now has the shard information. Then kill the replica set so
- * when mongos restarts and tries to create a ReplSetMonitor for that shard,
- * it will not be able to connect to any of the seed servers.
- */
-replTest.stopSet();
-st.restartMongos(0);
-mongos = st.s; // refresh mongos with the new one
+ st.restartMongos(0);
-var coll = mongos.getDB('test').user;
+ var coll = st.s0.getDB('test').user;
-var verifyInsert = function() {
- var beforeCount = coll.find().count();
- coll.insert({x: 1});
- var afterCount = coll.find().count();
+ var verifyInsert = function() {
+ var beforeCount = coll.find().count();
+ coll.insert({x: 1});
+ var afterCount = coll.find().count();
- assert.eq(beforeCount + 1, afterCount);
-};
+ assert.eq(beforeCount + 1, afterCount);
+ };
-jsTest.log('Insert to a downed replSet');
-assert.throws(verifyInsert);
+ jsTest.log('Insert to a downed replSet');
+ assert.throws(verifyInsert);
-replTest.startSet({oplogSize: 10});
-replTest.initiate();
-replTest.awaitSecondaryNodes();
+ replTest.startSet({oplogSize: 10});
+ replTest.initiate();
+ replTest.awaitSecondaryNodes();
-jsTest.log('Insert to an online replSet');
+ jsTest.log('Insert to an online replSet');
+ verifyInsert();
-// Verify that the replSetMonitor can reach the restarted set.
-ReplSetTest.awaitRSClientHosts(mongos, replTest.nodes, {ok: true});
-verifyInsert();
+ // Verify that the replSetMonitor can reach the restarted set.
+ ReplSetTest.awaitRSClientHosts(st.s0, replTest.nodes, {ok: true});
-st.stop();
+ st.stop();
+})();
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index 6f563aebbb0..5f576b07dc3 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -1,238 +1,250 @@
-// shard2.js
+(function() {
+ 'use strict';
-/**
-* test basic sharding
-*/
-
-placeCheck = function(num) {
- print("shard2 step: " + num);
-};
-
-printAll = function() {
- print("****************");
- db.foo.find().forEach(printjsononeline);
- print("++++++++++++++++++");
- primary.foo.find().forEach(printjsononeline);
- print("++++++++++++++++++");
- secondary.foo.find().forEach(printjsononeline);
- print("---------------------");
-};
-
-s = new ShardingTest({name: "shard2", shards: 2});
-
-// We're doing a lot of checks here that can get screwed up by the balancer, now that
-// it moves small #s of chunks too
-s.stopBalancer();
-
-db = s.getDB("test");
-
-s.adminCommand({enablesharding: "test"});
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-assert.eq(1, s.config.chunks.count(), "sanity check 1");
-
-s.adminCommand({split: "test.foo", middle: {num: 0}});
-assert.eq(2, s.config.chunks.count(), "should be 2 shards");
-chunks = s.config.chunks.find().toArray();
-assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
-
-db.foo.save({num: 1, name: "eliot"});
-db.foo.save({num: 2, name: "sara"});
-db.foo.save({num: -1, name: "joe"});
-
-assert.eq(
- 3, s.getPrimaryShard("test").getDB("test").foo.find().length(), "not right directly to db A");
-assert.eq(3, db.foo.find().length(), "not right on shard");
-
-primary = s.getPrimaryShard("test").getDB("test");
-secondary = s.getOther(primary).getDB("test");
-
-assert.eq(3, primary.foo.find().length(), "primary wrong B");
-assert.eq(0, secondary.foo.find().length(), "secondary wrong C");
-assert.eq(3, db.foo.find().sort({num: 1}).length());
-
-placeCheck(2);
-
-// NOTE: at this point we have 2 shard on 1 server
-
-// test move shard
-assert.throws(function() {
- s.adminCommand(
- {movechunk: "test.foo", find: {num: 1}, to: primary.getMongo().name, _waitForDelete: true});
-});
-assert.throws(function() {
- s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true});
-});
-
-s.adminCommand(
- {movechunk: "test.foo", find: {num: 1}, to: secondary.getMongo().name, _waitForDelete: true});
-assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
-assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
+ function placeCheck(num) {
+ print("shard2 step: " + num);
+ }
-assert.eq(
- 2, s.config.chunks.count(), "still should have 2 shards after move not:" + s.getChunksString());
-chunks = s.config.chunks.find().toArray();
-assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
+ function printAll() {
+ print("****************");
+ db.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ primary.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ secondary.foo.find().forEach(printjsononeline);
+ print("---------------------");
+ }
-placeCheck(3);
+ var s = new ShardingTest({shards: 2});
+ var db = s.getDB("test");
+
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+ s.ensurePrimaryShard('test', s.shard1.shardName);
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+ assert.eq(1, s.config.chunks.count(), "sanity check 1");
+
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 0}}));
+ assert.eq(2, s.config.chunks.count(), "should be 2 shards");
+ var chunks = s.config.chunks.find().toArray();
+ assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
+
+ assert.writeOK(db.foo.save({num: 1, name: "eliot"}));
+ assert.writeOK(db.foo.save({num: 2, name: "sara"}));
+ assert.writeOK(db.foo.save({num: -1, name: "joe"}));
+
+ assert.eq(3,
+ s.getPrimaryShard("test").getDB("test").foo.find().length(),
+ "not right directly to db A");
+ assert.eq(3, db.foo.find().length(), "not right on shard");
+
+ var primary = s.getPrimaryShard("test").getDB("test");
+ var secondary = s.getOther(primary).getDB("test");
+
+ assert.eq(3, primary.foo.find().length(), "primary wrong B");
+ assert.eq(0, secondary.foo.find().length(), "secondary wrong C");
+ assert.eq(3, db.foo.find().sort({num: 1}).length());
+
+ placeCheck(2);
+
+ // NOTE: at this point we have 2 shard on 1 server
+
+ // Test move to the same shard
+ assert.commandFailed(s.s0.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 1},
+ to: primary.getMongo().name,
+ _waitForDelete: true
+ }));
+
+ // Test move shard to unexisting shard
+ assert.commandFailedWithCode(
+ s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true}),
+ ErrorCodes.ShardNotFound);
+
+ assert.commandWorked(s.s0.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 1},
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+ }));
+ assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
+ assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
+
+ assert.eq(2,
+ s.config.chunks.count(),
+ "still should have 2 shards after move not:" + s.getChunksString());
+ var chunks = s.config.chunks.find().toArray();
+ assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
+
+ placeCheck(3);
+
+ // Test inserts go to right server/shard
+ assert.writeOK(db.foo.save({num: 3, name: "bob"}));
+ assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
+ assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+
+ assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
+ assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
+ assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+
+ assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
+ assert.eq(2, primary.foo.find().length(), "boundary A");
+ assert.eq(4, secondary.foo.find().length(), "boundary B");
+
+ placeCheck(4);
+
+ // findOne
+ assert.eq("eliot", db.foo.findOne({num: 1}).name);
+ assert.eq("funny man", db.foo.findOne({num: -2}).name);
+
+ // getAll
+ function sumQuery(c) {
+ var sum = 0;
+ c.toArray().forEach(function(z) {
+ sum += z.num;
+ });
+ return sum;
+ }
+ assert.eq(6, db.foo.find().length(), "sharded query 1");
+ assert.eq(3, sumQuery(db.foo.find()), "sharded query 2");
-// test inserts go to right server/shard
+ placeCheck(5);
-assert.writeOK(db.foo.save({num: 3, name: "bob"}));
-assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
-assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+ // sort by num
-assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
-assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
-assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+ assert.eq(3, sumQuery(db.foo.find().sort({num: 1})), "sharding query w/sort 1");
+ assert.eq(3, sumQuery(db.foo.find().sort({num: -1})), "sharding query w/sort 2");
-assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
-assert.eq(2, primary.foo.find().length(), "boundary A");
-assert.eq(4, secondary.foo.find().length(), "boundary B");
+ assert.eq(
+ "funny man", db.foo.find().sort({num: 1})[0].name, "sharding query w/sort 3 order wrong");
+ assert.eq(-2, db.foo.find().sort({num: 1})[0].num, "sharding query w/sort 4 order wrong");
-placeCheck(4);
+ assert.eq("bob", db.foo.find().sort({num: -1})[0].name, "sharding query w/sort 5 order wrong");
+ assert.eq(3, db.foo.find().sort({num: -1})[0].num, "sharding query w/sort 6 order wrong");
-// findOne
-assert.eq("eliot", db.foo.findOne({num: 1}).name);
-assert.eq("funny man", db.foo.findOne({num: -2}).name);
+ placeCheck(6);
-// getAll
-function sumQuery(c) {
- var sum = 0;
- c.toArray().forEach(function(z) {
- sum += z.num;
+ // Sort by name
+ function getNames(c) {
+ return c.toArray().map(function(z) {
+ return z.name;
+ });
+ }
+ var correct = getNames(db.foo.find()).sort();
+ assert.eq(correct, getNames(db.foo.find().sort({name: 1})));
+ correct = correct.reverse();
+ assert.eq(correct, getNames(db.foo.find().sort({name: -1})));
+
+ assert.eq(3, sumQuery(db.foo.find().sort({name: 1})), "sharding query w/non-shard sort 1");
+ assert.eq(3, sumQuery(db.foo.find().sort({name: -1})), "sharding query w/non-shard sort 2");
+
+ // sort by num multiple shards per server
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 2}}));
+ assert.eq("funny man",
+ db.foo.find().sort({num: 1})[0].name,
+ "sharding query w/sort and another split 1 order wrong");
+ assert.eq("bob",
+ db.foo.find().sort({num: -1})[0].name,
+ "sharding query w/sort and another split 2 order wrong");
+ assert.eq("funny man",
+ db.foo.find({num: {$lt: 100}}).sort({num: 1}).arrayAccess(0).name,
+ "sharding query w/sort and another split 3 order wrong");
+
+ placeCheck(7);
+
+ db.foo.find().sort({_id: 1}).forEach(function(z) {
+ print(z._id);
});
- return sum;
-}
-assert.eq(6, db.foo.find().length(), "sharded query 1");
-assert.eq(3, sumQuery(db.foo.find()), "sharded query 2");
-
-placeCheck(5);
-
-// sort by num
-assert.eq(3, sumQuery(db.foo.find().sort({num: 1})), "sharding query w/sort 1");
-assert.eq(3, sumQuery(db.foo.find().sort({num: -1})), "sharding query w/sort 2");
-
-assert.eq("funny man", db.foo.find().sort({num: 1})[0].name, "sharding query w/sort 3 order wrong");
-assert.eq(-2, db.foo.find().sort({num: 1})[0].num, "sharding query w/sort 4 order wrong");
-
-assert.eq("bob", db.foo.find().sort({num: -1})[0].name, "sharding query w/sort 5 order wrong");
-assert.eq(3, db.foo.find().sort({num: -1})[0].num, "sharding query w/sort 6 order wrong");
-
-placeCheck(6);
-// sory by name
-
-function getNames(c) {
- return c.toArray().map(function(z) {
- return z.name;
- });
-}
-correct = getNames(db.foo.find()).sort();
-assert.eq(correct, getNames(db.foo.find().sort({name: 1})));
-correct = correct.reverse();
-assert.eq(correct, getNames(db.foo.find().sort({name: -1})));
-
-assert.eq(3, sumQuery(db.foo.find().sort({name: 1})), "sharding query w/non-shard sort 1");
-assert.eq(3, sumQuery(db.foo.find().sort({name: -1})), "sharding query w/non-shard sort 2");
-
-// sort by num multiple shards per server
-s.adminCommand({split: "test.foo", middle: {num: 2}});
-assert.eq("funny man",
- db.foo.find().sort({num: 1})[0].name,
- "sharding query w/sort and another split 1 order wrong");
-assert.eq("bob",
- db.foo.find().sort({num: -1})[0].name,
- "sharding query w/sort and another split 2 order wrong");
-assert.eq("funny man",
- db.foo.find({num: {$lt: 100}}).sort({num: 1}).arrayAccess(0).name,
- "sharding query w/sort and another split 3 order wrong");
-
-placeCheck(7);
-
-db.foo.find().sort({_id: 1}).forEach(function(z) {
- print(z._id);
-});
-
-zzz = db.foo.find().explain("executionStats").executionStats;
-assert.eq(0, zzz.totalKeysExamined, "EX1a");
-assert.eq(6, zzz.nReturned, "EX1b");
-assert.eq(6, zzz.totalDocsExamined, "EX1c");
-
-zzz = db.foo.find().hint({_id: 1}).sort({_id: 1}).explain("executionStats").executionStats;
-assert.eq(6, zzz.totalKeysExamined, "EX2a");
-assert.eq(6, zzz.nReturned, "EX2b");
-assert.eq(6, zzz.totalDocsExamined, "EX2c");
-
-// getMore
-assert.eq(4, db.foo.find().limit(-4).toArray().length, "getMore 1");
-function countCursor(c) {
- var num = 0;
- while (c.hasNext()) {
- c.next();
- num++;
+ var zzz = db.foo.find().explain("executionStats").executionStats;
+ assert.eq(0, zzz.totalKeysExamined, "EX1a");
+ assert.eq(6, zzz.nReturned, "EX1b");
+ assert.eq(6, zzz.totalDocsExamined, "EX1c");
+
+ zzz = db.foo.find().hint({_id: 1}).sort({_id: 1}).explain("executionStats").executionStats;
+ assert.eq(6, zzz.totalKeysExamined, "EX2a");
+ assert.eq(6, zzz.nReturned, "EX2b");
+ assert.eq(6, zzz.totalDocsExamined, "EX2c");
+
+ // getMore
+ assert.eq(4, db.foo.find().limit(-4).toArray().length, "getMore 1");
+ function countCursor(c) {
+ var num = 0;
+ while (c.hasNext()) {
+ c.next();
+ num++;
+ }
+ return num;
}
- return num;
-}
-assert.eq(6, countCursor(db.foo.find()._exec()), "getMore 2");
-assert.eq(6, countCursor(db.foo.find().batchSize(1)._exec()), "getMore 3");
-
-// find by non-shard-key
-db.foo.find().forEach(function(z) {
- var y = db.foo.findOne({_id: z._id});
- assert(y, "_id check 1 : " + tojson(z));
- assert.eq(z.num, y.num, "_id check 2 : " + tojson(z));
-});
+ assert.eq(6, countCursor(db.foo.find()._exec()), "getMore 2");
+ assert.eq(6, countCursor(db.foo.find().batchSize(1)._exec()), "getMore 3");
+
+ // find by non-shard-key
+ db.foo.find().forEach(function(z) {
+ var y = db.foo.findOne({_id: z._id});
+ assert(y, "_id check 1 : " + tojson(z));
+ assert.eq(z.num, y.num, "_id check 2 : " + tojson(z));
+ });
-// update
-person = db.foo.findOne({num: 3});
-assert.eq("bob", person.name, "update setup 1");
-person.name = "bob is gone";
-db.foo.update({num: 3}, person);
-person = db.foo.findOne({num: 3});
-assert.eq("bob is gone", person.name, "update test B");
+ // update
+ var person = db.foo.findOne({num: 3});
+ assert.eq("bob", person.name, "update setup 1");
+ person.name = "bob is gone";
+ db.foo.update({num: 3}, person);
+ person = db.foo.findOne({num: 3});
+ assert.eq("bob is gone", person.name, "update test B");
-// remove
-assert(db.foo.findOne({num: 3}) != null, "remove test A");
-db.foo.remove({num: 3});
-assert.isnull(db.foo.findOne({num: 3}), "remove test B");
+ // remove
+ assert(db.foo.findOne({num: 3}) != null, "remove test A");
+ db.foo.remove({num: 3});
+ assert.isnull(db.foo.findOne({num: 3}), "remove test B");
-db.foo.save({num: 3, name: "eliot2"});
-person = db.foo.findOne({num: 3});
-assert(person, "remove test C");
-assert.eq(person.name, "eliot2");
+ db.foo.save({num: 3, name: "eliot2"});
+ person = db.foo.findOne({num: 3});
+ assert(person, "remove test C");
+ assert.eq(person.name, "eliot2");
-db.foo.remove({_id: person._id});
-assert.isnull(db.foo.findOne({num: 3}), "remove test E");
+ db.foo.remove({_id: person._id});
+ assert.isnull(db.foo.findOne({num: 3}), "remove test E");
-placeCheck(8);
+ placeCheck(8);
-// more update stuff
+ // more update stuff
-printAll();
-total = db.foo.find().count();
-var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
-printAll();
-assert.eq(total, res.nModified, res.toString());
+ printAll();
+ var total = db.foo.find().count();
+ var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
+ printAll();
+ assert.eq(total, res.nModified, res.toString());
-res = db.foo.update({num: -1}, {$inc: {x: 1}}, false, true);
-assert.eq(1, res.nModified, res.toString());
+ res = db.foo.update({num: -1}, {$inc: {x: 1}}, false, true);
+ assert.eq(1, res.nModified, res.toString());
-// ---- move all to the secondary
+ // ---- move all to the secondary
-assert.eq(2, s.onNumShards("foo"), "on 2 shards");
+ assert.eq(2, s.onNumShards("foo"), "on 2 shards");
-secondary.foo.insert({num: -3});
+ secondary.foo.insert({num: -3});
-s.adminCommand(
- {movechunk: "test.foo", find: {num: -2}, to: secondary.getMongo().name, _waitForDelete: true});
-assert.eq(1, s.onNumShards("foo"), "on 1 shards");
+ assert.commandWorked(s.s0.adminCommand({
+ movechunk: "test.foo",
+ find: {num: -2},
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+ }));
+ assert.eq(1, s.onNumShards("foo"), "on 1 shards");
-s.adminCommand(
- {movechunk: "test.foo", find: {num: -2}, to: primary.getMongo().name, _waitForDelete: true});
-assert.eq(2, s.onNumShards("foo"), "on 2 shards again");
-assert.eq(3, s.config.chunks.count(), "only 3 chunks");
+ assert.commandWorked(s.s0.adminCommand({
+ movechunk: "test.foo",
+ find: {num: -2},
+ to: primary.getMongo().name,
+ _waitForDelete: true
+ }));
+ assert.eq(2, s.onNumShards("foo"), "on 2 shards again");
+ assert.eq(3, s.config.chunks.count(), "only 3 chunks");
-print("YO : " + tojson(db.runCommand("serverStatus")));
+ print("YO : " + tojson(db.runCommand("serverStatus")));
-s.stop();
+ s.stop();
+})();
diff --git a/jstests/sharding/shard_with_special_db_names.js b/jstests/sharding/shard_with_special_db_names.js
index e218a08609b..2eb6a3993da 100644
--- a/jstests/sharding/shard_with_special_db_names.js
+++ b/jstests/sharding/shard_with_special_db_names.js
@@ -1,18 +1,18 @@
(function() {
+ 'use strict';
- var s = new ShardingTest({name: "shard_with_special_db_names", shards: 2, mongos: 2});
+ var s = new ShardingTest({shards: 2, mongos: 2});
var specialDB = "[a-z]+";
var specialNS = specialDB + ".special";
- s.adminCommand({enablesharding: "test"});
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'shard0001');
- s.adminCommand({shardcollection: "test.data", key: {num: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {num: 1}}));
- // Test that the database will not complain "cannot have 2 database names that
- // differs on case"
- s.adminCommand({enablesharding: specialDB});
+ // Test that the database will not complain "cannot have 2 database names that differs on case"
+ assert.commandWorked(s.s0.adminCommand({enablesharding: specialDB}));
s.ensurePrimaryShard(specialDB, 'shard0000');
- s.adminCommand({shardcollection: specialNS, key: {num: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: specialNS, key: {num: 1}}));
var exists = s.getDB("config").collections.find({_id: specialNS}).count();
assert.eq(exists, 1);
@@ -25,4 +25,5 @@
assert.eq(cursor.count(), 1);
assert(cursor.next()["dropped"]);
+ s.stop();
})();
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index b4bf2c96e60..5d23386230c 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -1,8 +1,6 @@
function shardSetup(shardConfig, dbName, collName) {
var st = new ShardingTest(shardConfig);
var db = st.getDB(dbName);
- var coll = db[collName];
- var configDB = st.s.getDB('config');
// Disable the balancer to not interfere with the test, but keep the balancer settings on
// (with default empty document) so the auto split logic will be able to move chunks around.
@@ -21,7 +19,7 @@ function getNumberOfChunks(configDB) {
}
function runTest(test) {
- jsTest.log(tojson(test));
+ jsTest.log('Running: ' + tojson(test));
// Setup
// Shard collection
@@ -37,15 +35,18 @@ function runTest(test) {
if (j + chunkSize >= MAXVAL) {
continue;
}
- db.adminCommand({split: coll + "", middle: {x: j + chunkSize}});
+ assert.commandWorked(db.adminCommand({split: coll + "", middle: {x: j + chunkSize}}));
db.adminCommand({moveChunk: coll + "", find: {x: j}, to: test.shards[i].name});
}
+
// Make sure to move chunk when there's only 1 chunk in shard
db.adminCommand({moveChunk: coll + "", find: {x: startRange}, to: test.shards[i].name});
+
// Make sure to move highest chunk
if (test.shards[i].range.max == MAXVAL) {
db.adminCommand({moveChunk: coll + "", find: {x: MAXVAL}, to: test.shards[i].name});
}
+
// Add tags to each shard
var tags = test.shards[i].tags || [];
for (j = 0; j < tags.length; j++) {
@@ -90,18 +91,11 @@ function runTest(test) {
sh.removeShardTag(test.shards[i].name, tags[j]);
}
}
- configDB.tags.remove({ns: db + "." + collName});
+
+ assert.writeOK(configDB.tags.remove({ns: db + "." + collName}));
// End of test cleanup
}
-// Main
-var dbName = "test";
-var collName = "topchunk";
-var st = shardSetup({name: "topchunk", shards: 4, chunkSize: 1}, dbName, collName);
-var db = st.getDB(dbName);
-var coll = db[collName];
-var configDB = st.s.getDB('config');
-
// Define shard key ranges for each of the shard nodes
var MINVAL = -500;
var MAXVAL = 1500;
@@ -250,25 +244,9 @@ var tests = [
{name: "shard0003", range: midChunkRange2, chunks: 5}
],
inserts: highChunkInserts
- }
+ },
];
-assert.commandWorked(db.adminCommand({enableSharding: dbName}));
-db.adminCommand({movePrimary: dbName, to: 'shard0000'});
-
-// Execute all test objects
-for (var i = 0; i < tests.length; i++) {
- runTest(tests[i]);
-}
-
-st.stop();
-
-// Single node shard Tests
-st = shardSetup({name: "singleNode", shards: 1, chunkSize: 1}, dbName, collName);
-db = st.getDB(dbName);
-coll = db[collName];
-configDB = st.s.getDB('config');
-
var singleNodeTests = [
{
// Test auto-split on the "low" top chunk on single node shard
@@ -285,35 +263,12 @@ var singleNodeTests = [
movedToShard: "shard0000",
shards: [{name: "shard0000", range: highChunkRange, chunks: 2}],
inserts: highChunkInserts
- }
+ },
];
-assert.commandWorked(db.adminCommand({enableSharding: dbName}));
-db.adminCommand({movePrimary: dbName, to: 'shard0000'});
-
-// Execute all test objects
-for (var i = 0; i < singleNodeTests.length; i++) {
- runTest(singleNodeTests[i]);
-}
-
-st.stop();
-
-// maxSize test
-// To set maxSize, must manually add the shards
-st = shardSetup(
- {name: "maxSize", shards: 2, chunkSize: 1, other: {manualAddShard: true}}, dbName, collName);
-db = st.getDB(dbName);
-coll = db[collName];
-configDB = st.s.getDB('config');
-
-// maxSize on shard0000 - 5MB, on shard0001 - 1MB
-st.adminCommand({addshard: st.getConnNames()[0], maxSize: 5});
-st.adminCommand({addshard: st.getConnNames()[1], maxSize: 1});
-
var maxSizeTests = [
{
- // Test auto-split on the "low" top chunk with maxSize on
- // destination shard
+ // Test auto-split on the "low" top chunk with maxSize on destination shard
name: "maxSize - low top chunk",
lowOrHigh: lowChunk,
movedToShard: "shard0000",
@@ -324,8 +279,7 @@ var maxSizeTests = [
inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk with maxSize on
- // destination shard
+ // Test auto-split on the "high" top chunk with maxSize on destination shard
name: "maxSize - high top chunk",
lowOrHigh: highChunk,
movedToShard: "shard0000",
@@ -334,15 +288,62 @@ var maxSizeTests = [
{name: "shard0001", range: lowChunkRange, chunks: 1}
],
inserts: highChunkInserts
- }
+ },
];
+// Main
+var dbName = "TopChunkDB";
+var collName = "coll";
+
+var st = shardSetup({name: "topchunk", shards: 4, chunkSize: 1}, dbName, collName);
+var db = st.getDB(dbName);
+var coll = db[collName];
+var configDB = st.s.getDB('config');
+
+assert.commandWorked(db.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, 'shard0000');
+
+// Execute all test objects
+for (var i = 0; i < tests.length; i++) {
+ runTest(tests[i]);
+}
+
+st.stop();
+
+// Single node shard tests
+st = shardSetup({name: "singleNode", shards: 1, chunkSize: 1}, dbName, collName);
+db = st.getDB(dbName);
+coll = db[collName];
+configDB = st.s.getDB('config');
+
+assert.commandWorked(db.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, 'shard0000');
+
+// Execute all test objects
+for (var i = 0; i < singleNodeTests.length; i++) {
+ runTest(singleNodeTests[i]);
+}
+
+st.stop();
+
+// maxSize test
+// To set maxSize, must manually add the shards
+st = shardSetup(
+ {name: "maxSize", shards: 2, chunkSize: 1, other: {manualAddShard: true}}, dbName, collName);
+db = st.getDB(dbName);
+coll = db[collName];
+configDB = st.s.getDB('config');
+
+// maxSize on shard0000 - 5MB, on shard0001 - 1MB
+assert.commandWorked(db.adminCommand({addshard: st.getConnNames()[0], maxSize: 5}));
+assert.commandWorked(db.adminCommand({addshard: st.getConnNames()[1], maxSize: 1}));
+
// SERVER-17070 Auto split moves to shard node running WiredTiger, if exceeding maxSize
var unsupported = ["wiredTiger", "rocksdb", "inMemory", "ephemeralForTest"];
if (unsupported.indexOf(st.d0.adminCommand({serverStatus: 1}).storageEngine.name) == -1 &&
unsupported.indexOf(st.d1.adminCommand({serverStatus: 1}).storageEngine.name) == -1) {
assert.commandWorked(db.adminCommand({enableSharding: dbName}));
- db.adminCommand({movePrimary: dbName, to: 'shard0000'});
+ st.ensurePrimaryShard(dbName, 'shard0000');
// Execute all test objects
for (var i = 0; i < maxSizeTests.length; i++) {