summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-06-06 10:45:06 +0300
committerDianna Hohensee <dianna.hohensee@10gen.com>2017-09-20 09:23:22 -0400
commit41798bb7ecfa6d5d491f02f9dba68681da486355 (patch)
tree3062eb61aae67a53b16521ba58fa67d22e529b2e
parent1ebd782eccc7a48766a2f3b97271c09129cd7c4b (diff)
downloadmongo-41798bb7ecfa6d5d491f02f9dba68681da486355.tar.gz
SERVER-22512 Remove unnecessary calls to stopBalancer
Modified some to work in v3.2 -- e.g. removed getShardPrimary which doesn't exist in v3.2 (cherry picked from commit 2477b8c33b2e8f26fcde47c38c19c3fbb8b99839)
-rw-r--r--jstests/aggregation/bugs/server6118.js17
-rw-r--r--jstests/aggregation/bugs/server6179.js19
-rw-r--r--jstests/aggregation/bugs/server7781.js17
-rw-r--r--jstests/aggregation/bugs/server9444.js154
-rw-r--r--jstests/gle/gle_sharded_write.js376
-rw-r--r--jstests/noPassthroughWithMongod/no_balance_collection.js8
-rw-r--r--jstests/sharding/balance_repl.js21
-rw-r--r--jstests/sharding/balance_tags2.js15
-rw-r--r--jstests/sharding/explain_cmd.js357
-rw-r--r--jstests/sharding/explain_find_and_modify_sharded.js2
-rw-r--r--jstests/sharding/hash_shard_unique_compound.js60
-rw-r--r--jstests/sharding/migrateBig.js46
-rw-r--r--jstests/sharding/printShardingStatus.js2
-rw-r--r--jstests/sharding/shard3.js10
-rw-r--r--jstests/sharding/split_with_force_small.js99
-rw-r--r--jstests/sharding/stale_version_write.js46
-rw-r--r--jstests/slow2/mr_during_migrate.js159
-rw-r--r--src/mongo/s/SConscript2
-rw-r--r--src/mongo/s/catalog/catalog_manager.h6
19 files changed, 712 insertions, 704 deletions
diff --git a/jstests/aggregation/bugs/server6118.js b/jstests/aggregation/bugs/server6118.js
index f891135de72..3c55ae5ce33 100644
--- a/jstests/aggregation/bugs/server6118.js
+++ b/jstests/aggregation/bugs/server6118.js
@@ -1,12 +1,12 @@
// SERVER-6118: support for sharded sorts
(function() {
+ 'use strict';
- var s = new ShardingTest({name: "aggregation_sort1", shards: 2, mongos: 1});
- s.stopBalancer();
+ var s = new ShardingTest({shards: 2});
- s.adminCommand({enablesharding: "test"});
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'shard0001');
- s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {_id: 1}}));
var d = s.getDB("test");
@@ -20,12 +20,12 @@
bulkOp.execute();
// Split the data into 3 chunks
- s.adminCommand({split: "test.data", middle: {_id: 33}});
- s.adminCommand({split: "test.data", middle: {_id: 66}});
+ assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 33}}));
+ assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 66}}));
// Migrate the middle chunk to another shard
- s.adminCommand(
- {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getServer("test")).name});
+ assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getServer("test")).name}));
// Check that the results are in order.
var result = d.data.aggregate({$sort: {_id: 1}}).toArray();
@@ -36,5 +36,4 @@
}
s.stop();
-
})();
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js
index 1109ddaa67e..503e91a70d1 100644
--- a/jstests/aggregation/bugs/server6179.js
+++ b/jstests/aggregation/bugs/server6179.js
@@ -1,12 +1,12 @@
// SERVER-6179: support for two $groups in sharded agg
(function() {
+ 'use strict';
- var s = new ShardingTest({name: "aggregation_multiple_group", shards: 2, mongos: 1});
- s.stopBalancer();
+ var s = new ShardingTest({shards: 2});
- s.adminCommand({enablesharding: "test"});
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'shard0001');
- s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {_id: 1}}));
var d = s.getDB("test");
@@ -20,18 +20,18 @@
bulkOp.execute();
// Split the data into 3 chunks
- s.adminCommand({split: "test.data", middle: {_id: 33}});
- s.adminCommand({split: "test.data", middle: {_id: 66}});
+ assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 33}}));
+ assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 66}}));
// Migrate the middle chunk to another shard
- s.adminCommand(
- {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getServer("test")).name});
+ assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getServer("test")).name}));
// Check that we get results rather than an error
var result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
{$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
{$sort: {_id: 1}}).toArray();
- expected = [
+ var expected = [
{"_id": 0, "avg_id": 45},
{"_id": 1, "avg_id": 46},
{"_id": 2, "avg_id": 47},
@@ -47,5 +47,4 @@
assert.eq(result, expected);
s.stop();
-
})();
diff --git a/jstests/aggregation/bugs/server7781.js b/jstests/aggregation/bugs/server7781.js
index 230a8a64c9f..c3918aeb8d2 100644
--- a/jstests/aggregation/bugs/server7781.js
+++ b/jstests/aggregation/bugs/server7781.js
@@ -1,5 +1,6 @@
// SERVER-7781 $geoNear pipeline stage
(function() {
+ 'use strict';
load('jstests/libs/geo_near_random.js');
load('jstests/aggregation/extras/utils.js');
@@ -59,10 +60,12 @@
shards.push(shard._id);
});
- db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}});
+ assert.commandWorked(
+ db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}));
for (var i = 1; i < 10; i++) {
// split at 0.1, 0.2, ... 0.9
- db.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}});
+ assert.commandWorked(
+ db.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}));
db.adminCommand({
moveChunk: db[coll].getFullName(),
find: {rand: i / 10},
@@ -87,13 +90,13 @@
// test with defaults
var queryPoint = pointMaker.mkPt(0.25); // stick to center of map
- geoCmd = {
+ var geoCmd = {
geoNear: coll,
near: queryPoint,
includeLocs: true,
spherical: true
};
- aggCmd = {
+ var aggCmd = {
$geoNear: {
near: queryPoint,
includeLocs: 'stats.loc',
@@ -134,7 +137,7 @@
geoCmd.num = 40;
geoCmd.near = queryPoint;
aggCmd.$geoNear.near = queryPoint;
- aggArr = [aggCmd, {$limit: 50}, {$limit: 60}, {$limit: 40}];
+ var aggArr = [aggCmd, {$limit: 50}, {$limit: 60}, {$limit: 40}];
checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggArr), 40);
// Test $geoNear with an initial batchSize of 0. Regression test for SERVER-20935.
@@ -157,13 +160,11 @@
test(db, false, '2dsphere');
var sharded = new ShardingTest({shards: 3, mongos: 1});
- sharded.stopBalancer();
- sharded.adminCommand({enablesharding: "test"});
+ assert.commandWorked(sharded.s0.adminCommand({enablesharding: "test"}));
sharded.ensurePrimaryShard('test', 'shard0001');
test(sharded.getDB('test'), true, '2d');
test(sharded.getDB('test'), true, '2dsphere');
sharded.stop();
-
})();
diff --git a/jstests/aggregation/bugs/server9444.js b/jstests/aggregation/bugs/server9444.js
index ad5f4b03ca6..f3dc2748b0a 100644
--- a/jstests/aggregation/bugs/server9444.js
+++ b/jstests/aggregation/bugs/server9444.js
@@ -1,76 +1,80 @@
// server-9444 support disk storage of intermediate results in aggregation
-
-var t = db.server9444;
-t.drop();
-
-var sharded = (typeof(RUNNING_IN_SHARDED_AGG_TEST) != 'undefined'); // see end of testshard1.js
-if (sharded) {
- db.adminCommand({shardcollection: t.getFullName(), key: {"_id": 'hashed'}});
-}
-
-var memoryLimitMB = sharded ? 200 : 100;
-
-function loadData() {
- var bigStr = Array(1024 * 1024 + 1).toString(); // 1MB of ','
- for (var i = 0; i < memoryLimitMB + 1; i++)
- t.insert({_id: i, bigStr: i + bigStr, random: Math.random()});
-
- assert.gt(t.stats().size, memoryLimitMB * 1024 * 1024);
-}
-loadData();
-
-function test(pipeline, outOfMemoryCode) {
- // ensure by default we error out if exceeding memory limit
- var res = t.runCommand('aggregate', {pipeline: pipeline});
- assert.commandFailed(res);
- assert.eq(res.code, outOfMemoryCode);
-
- // ensure allowDiskUse: false does what it says
- var res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: false});
- assert.commandFailed(res);
- assert.eq(res.code, outOfMemoryCode);
-
- // allowDiskUse only supports bool. In particular, numbers aren't allowed.
- var res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: 1});
- assert.commandFailed(res);
- assert.eq(res.code, 16949);
-
- // ensure we work when allowDiskUse === true
- var res = t.aggregate(pipeline, {allowDiskUse: true});
- assert.eq(res.itcount(), t.count()); // all tests output one doc per input doc
-}
-
-var groupCode = 16945;
-var sortCode = 16819;
-var sortLimitCode = 16820;
-
-test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], groupCode);
-
-// sorting with _id would use index which doesn't require extsort
-test([{$sort: {random: 1}}], sortCode);
-test([{$sort: {bigStr: 1}}], sortCode); // big key and value
-
-// make sure sort + large limit won't crash the server (SERVER-10136)
-test([{$sort: {bigStr: 1}}, {$limit: 1000 * 1000 * 1000}], sortLimitCode);
-
-// test combining two extSorts in both same and different orders
-test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: 1}}], groupCode);
-test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: -1}}], groupCode);
-test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {random: 1}}], groupCode);
-test([{$sort: {random: 1}}, {$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], sortCode);
-
-var origDB = db;
-if (sharded) {
- // Stop balancer first before dropping so there will be no contention on the ns lock.
- // It's alright to modify the global db variable since sharding tests never run in parallel.
- db = db.getSiblingDB('config');
- sh.stopBalancer();
-}
-
-// don't leave large collection laying around
-t.drop();
-
-if (sharded) {
- sh.startBalancer();
- db = origDB;
-}
+(function() {
+ 'use strict';
+
+ var t = db.server9444;
+ t.drop();
+
+ var sharded = (typeof(RUNNING_IN_SHARDED_AGG_TEST) != 'undefined'); // see end of testshard1.js
+ if (sharded) {
+ assert.commandWorked(
+ db.adminCommand({shardcollection: t.getFullName(), key: {"_id": 'hashed'}}));
+ }
+
+ var memoryLimitMB = sharded ? 200 : 100;
+
+ function loadData() {
+ var bigStr = Array(1024 * 1024 + 1).toString(); // 1MB of ','
+ for (var i = 0; i < memoryLimitMB + 1; i++)
+ t.insert({_id: i, bigStr: i + bigStr, random: Math.random()});
+
+ assert.gt(t.stats().size, memoryLimitMB * 1024 * 1024);
+ }
+ loadData();
+
+ function test(pipeline, outOfMemoryCode) {
+ // ensure by default we error out if exceeding memory limit
+ var res = t.runCommand('aggregate', {pipeline: pipeline});
+ assert.commandFailed(res);
+ assert.eq(res.code, outOfMemoryCode);
+
+ // ensure allowDiskUse: false does what it says
+ var res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: false});
+ assert.commandFailed(res);
+ assert.eq(res.code, outOfMemoryCode);
+
+ // allowDiskUse only supports bool. In particular, numbers aren't allowed.
+ var res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: 1});
+ assert.commandFailed(res);
+ assert.eq(res.code, 16949);
+
+ // ensure we work when allowDiskUse === true
+ var res = t.aggregate(pipeline, {allowDiskUse: true});
+ assert.eq(res.itcount(), t.count()); // all tests output one doc per input doc
+ }
+
+ var groupCode = 16945;
+ var sortCode = 16819;
+ var sortLimitCode = 16820;
+
+ test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], groupCode);
+
+ // sorting with _id would use index which doesn't require extsort
+ test([{$sort: {random: 1}}], sortCode);
+ test([{$sort: {bigStr: 1}}], sortCode); // big key and value
+
+ // make sure sort + large limit won't crash the server (SERVER-10136)
+ test([{$sort: {bigStr: 1}}, {$limit: 1000 * 1000 * 1000}], sortLimitCode);
+
+ // test combining two extSorts in both same and different orders
+ test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: 1}}], groupCode);
+ test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: -1}}], groupCode);
+ test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {random: 1}}], groupCode);
+ test([{$sort: {random: 1}}, {$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], sortCode);
+
+ var origDB = db;
+ if (sharded) {
+ // Stop balancer first before dropping so there will be no contention on the ns lock.
+ // It's alright to modify the global db variable since sharding tests never run in parallel.
+ db = db.getSiblingDB('config');
+ sh.stopBalancer();
+ }
+
+ // don't leave large collection laying around
+ t.drop();
+
+ if (sharded) {
+ sh.startBalancer();
+ db = origDB;
+ }
+})();
diff --git a/jstests/gle/gle_sharded_write.js b/jstests/gle/gle_sharded_write.js
index f1feffed5b2..8d2a21cd758 100644
--- a/jstests/gle/gle_sharded_write.js
+++ b/jstests/gle/gle_sharded_write.js
@@ -2,192 +2,192 @@
// Ensures GLE correctly reports basic write stats and failures
// Note that test should work correctly with and without write commands.
//
-
-var st = new ShardingTest({shards: 2, mongos: 1});
-st.stopBalancer();
-
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var config = mongos.getDB("config");
-var coll = mongos.getCollection(jsTestName() + ".coll");
-var shards = config.shards.find().toArray();
-
-assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
-printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id}));
-assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
-assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}}));
-assert.commandWorked(
- admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id}));
-
-st.printShardingStatus();
-
-var gle = null;
-
-//
-// Successful insert
-coll.remove({});
-coll.insert({_id: -1});
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert('err' in gle);
-assert(!gle.err);
-assert.eq(coll.count(), 1);
-
-//
-// Successful update
-coll.remove({});
-coll.insert({_id: 1});
-coll.update({_id: 1}, {$set: {foo: "bar"}});
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert('err' in gle);
-assert(!gle.err);
-assert(gle.updatedExisting);
-assert.eq(gle.n, 1);
-assert.eq(coll.count(), 1);
-
-//
-// Successful multi-update
-coll.remove({});
-coll.insert({_id: 1});
-coll.update({}, {$set: {foo: "bar"}}, false, true);
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert('err' in gle);
-assert(!gle.err);
-assert(gle.updatedExisting);
-assert.eq(gle.n, 1);
-assert.eq(coll.count(), 1);
-
-//
-// Successful upsert
-coll.remove({});
-coll.update({_id: 1}, {_id: 1}, true);
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert('err' in gle);
-assert(!gle.err);
-assert(!gle.updatedExisting);
-assert.eq(gle.n, 1);
-assert.eq(gle.upserted, 1);
-assert.eq(coll.count(), 1);
-
-//
-// Successful upserts
-coll.remove({});
-coll.update({_id: -1}, {_id: -1}, true);
-coll.update({_id: 1}, {_id: 1}, true);
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert('err' in gle);
-assert(!gle.err);
-assert(!gle.updatedExisting);
-assert.eq(gle.n, 1);
-assert.eq(gle.upserted, 1);
-assert.eq(coll.count(), 2);
-
-//
-// Successful remove
-coll.remove({});
-coll.insert({_id: 1});
-coll.remove({_id: 1});
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert('err' in gle);
-assert(!gle.err);
-assert.eq(gle.n, 1);
-assert.eq(coll.count(), 0);
-
-//
-// Error on one host during update
-coll.remove({});
-coll.update({_id: 1}, {$invalid: "xxx"}, true);
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert(gle.err);
-assert(gle.code);
-assert(!gle.errmsg);
-assert(gle.singleShard);
-assert.eq(coll.count(), 0);
-
-//
-// Error on two hosts during remove
-coll.remove({});
-coll.remove({$invalid: 'remove'});
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert(gle.err);
-assert(gle.code);
-assert(!gle.errmsg);
-assert(gle.shards);
-assert.eq(coll.count(), 0);
-
-//
-// Repeated calls to GLE should work
-coll.remove({});
-coll.update({_id: 1}, {$invalid: "xxx"}, true);
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert(gle.err);
-assert(gle.code);
-assert(!gle.errmsg);
-assert(gle.singleShard);
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert(gle.err);
-assert(gle.code);
-assert(!gle.errmsg);
-assert(gle.singleShard);
-assert.eq(coll.count(), 0);
-
-//
-// Geo $near is not supported on mongos
-coll.ensureIndex({loc: "2dsphere"});
-coll.remove({});
-var query = {
- loc: {
- $near: {
- $geometry: {type: "Point", coordinates: [0, 0]},
- $maxDistance: 1000,
+(function() {
+ 'use strict';
+
+ var st = new ShardingTest({shards: 2, mongos: 1});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection(jsTestName() + ".coll");
+ var shards = config.shards.find().toArray();
+
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+ printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id}));
+
+ st.printShardingStatus();
+
+ var gle = null;
+
+ //
+ // Successful insert
+ coll.remove({});
+ coll.insert({_id: -1});
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert('err' in gle);
+ assert(!gle.err);
+ assert.eq(coll.count(), 1);
+
+ //
+ // Successful update
+ coll.remove({});
+ coll.insert({_id: 1});
+ coll.update({_id: 1}, {$set: {foo: "bar"}});
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert('err' in gle);
+ assert(!gle.err);
+ assert(gle.updatedExisting);
+ assert.eq(gle.n, 1);
+ assert.eq(coll.count(), 1);
+
+ //
+ // Successful multi-update
+ coll.remove({});
+ coll.insert({_id: 1});
+ coll.update({}, {$set: {foo: "bar"}}, false, true);
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert('err' in gle);
+ assert(!gle.err);
+ assert(gle.updatedExisting);
+ assert.eq(gle.n, 1);
+ assert.eq(coll.count(), 1);
+
+ //
+ // Successful upsert
+ coll.remove({});
+ coll.update({_id: 1}, {_id: 1}, true);
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert('err' in gle);
+ assert(!gle.err);
+ assert(!gle.updatedExisting);
+ assert.eq(gle.n, 1);
+ assert.eq(gle.upserted, 1);
+ assert.eq(coll.count(), 1);
+
+ //
+ // Successful upserts
+ coll.remove({});
+ coll.update({_id: -1}, {_id: -1}, true);
+ coll.update({_id: 1}, {_id: 1}, true);
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert('err' in gle);
+ assert(!gle.err);
+ assert(!gle.updatedExisting);
+ assert.eq(gle.n, 1);
+ assert.eq(gle.upserted, 1);
+ assert.eq(coll.count(), 2);
+
+ //
+ // Successful remove
+ coll.remove({});
+ coll.insert({_id: 1});
+ coll.remove({_id: 1});
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert('err' in gle);
+ assert(!gle.err);
+ assert.eq(gle.n, 1);
+ assert.eq(coll.count(), 0);
+
+ //
+ // Error on one host during update
+ coll.remove({});
+ coll.update({_id: 1}, {$invalid: "xxx"}, true);
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert(gle.err);
+ assert(gle.code);
+ assert(!gle.errmsg);
+ assert(gle.singleShard);
+ assert.eq(coll.count(), 0);
+
+ //
+ // Error on two hosts during remove
+ coll.remove({});
+ coll.remove({$invalid: 'remove'});
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert(gle.err);
+ assert(gle.code);
+ assert(!gle.errmsg);
+ assert(gle.shards);
+ assert.eq(coll.count(), 0);
+
+ //
+ // Repeated calls to GLE should work
+ coll.remove({});
+ coll.update({_id: 1}, {$invalid: "xxx"}, true);
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert(gle.err);
+ assert(gle.code);
+ assert(!gle.errmsg);
+ assert(gle.singleShard);
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert(gle.err);
+ assert(gle.code);
+ assert(!gle.errmsg);
+ assert(gle.singleShard);
+ assert.eq(coll.count(), 0);
+
+ //
+ // Geo $near is not supported on mongos
+ coll.ensureIndex({loc: "2dsphere"});
+ coll.remove({});
+ var query = {
+ loc: {
+ $near: {
+ $geometry: {type: "Point", coordinates: [0, 0]},
+ $maxDistance: 1000,
+ }
}
- }
-};
-printjson(coll.remove(query));
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert(gle.err);
-assert(gle.code);
-assert(!gle.errmsg);
-assert(gle.shards);
-assert.eq(coll.count(), 0);
-
-//
-// First shard down
-//
-
-//
-// Successful bulk insert on two hosts, host dies before gle (error contacting host)
-coll.remove({});
-coll.insert([{_id: 1}, {_id: -1}]);
-// Wait for write to be written to shards before shutting it down.
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-MongoRunner.stopMongod(st.shard0);
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-// Should get an error about contacting dead host.
-assert(!gle.ok);
-assert(gle.errmsg);
-
-//
-// Failed insert on two hosts, first host dead
-// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
-// successful writes from.
-coll.remove({_id: 1});
-coll.insert([{_id: 1}, {_id: -1}]);
-printjson(gle = coll.getDB().runCommand({getLastError: 1}));
-assert(gle.ok);
-assert(gle.err);
-assert.eq(coll.count({_id: 1}), 1);
-
-jsTest.log("DONE!");
-
-st.stop();
+ };
+ printjson(coll.remove(query));
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert(gle.err);
+ assert(gle.code);
+ assert(!gle.errmsg);
+ assert(gle.shards);
+ assert.eq(coll.count(), 0);
+
+ //
+ // First shard down
+ //
+
+ //
+ // Successful bulk insert on two hosts, host dies before gle (error contacting host)
+ coll.remove({});
+ coll.insert([{_id: 1}, {_id: -1}]);
+ // Wait for write to be written to shards before shutting it down.
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ MongoRunner.stopMongod(st.shard0);
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ // Should get an error about contacting dead host.
+ assert(!gle.ok);
+ assert(gle.errmsg);
+
+ //
+ // Failed insert on two hosts, first host dead
+ // NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
+ // successful writes from.
+ coll.remove({_id: 1});
+ coll.insert([{_id: 1}, {_id: -1}]);
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert(gle.err);
+ assert.eq(coll.count({_id: 1}), 1);
+
+ st.stop();
+})();
diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js
index cfec6199ca2..1c2f1aae009 100644
--- a/jstests/noPassthroughWithMongod/no_balance_collection.js
+++ b/jstests/noPassthroughWithMongod/no_balance_collection.js
@@ -1,14 +1,11 @@
// Tests whether the noBalance flag disables balancing for collections
-var st = new ShardingTest({shards: 2, mongos: 1, verbose: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
// First, test that shell helpers require an argument
assert.throws(sh.disableBalancing, [], "sh.disableBalancing requires a collection");
assert.throws(sh.enableBalancing, [], "sh.enableBalancing requires a collection");
-// Initially stop balancing
-st.stopBalancer();
-
var shardAName = st._shardNames[0];
var shardBName = st._shardNames[1];
@@ -70,10 +67,11 @@ jsTest.log("Chunks for " + collB + " are balanced.");
// Re-disable balancing for collB
sh.disableBalancing(collB);
+
// Wait for the balancer to fully finish the last migration and write the changelog
// MUST set db var here, ugly but necessary
db = st.s0.getDB("config");
-sh.waitForBalancer(true);
+st.waitForBalancerRound();
// Make sure auto-migrates on insert don't move chunks
var lastMigration = sh._lastMigration(collB);
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 46404646995..59cc694fc42 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -3,7 +3,7 @@
//
(function() {
- "use strict";
+ 'use strict';
// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
// from stepping down during migrations on slow evergreen builders.
@@ -27,14 +27,16 @@
}
assert.writeOK(bulk.execute());
- s.adminCommand({enablesharding: "test"});
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'test-rs0');
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- for (i = 0; i < 20; i++)
- s.adminCommand({split: "test.foo", middle: {_id: i * 100}});
+ for (i = 0; i < 20; i++) {
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {_id: i * 100}}));
+ }
assert.eq(2100, db.foo.find().itcount());
+
var coll = db.foo;
coll.setSlaveOk();
@@ -42,10 +44,9 @@
var other = s.config.shards.findOne({_id: {$ne: serverName}});
for (i = 0; i < 20; i++) {
- // Needs to waitForDelete because we'll be performing a slaveOk query,
- // and secondaries don't have a chunk manager so it doesn't know how to
- // filter out docs it doesn't own.
- assert(s.adminCommand({
+ // Needs to waitForDelete because we'll be performing a slaveOk query, and secondaries don't
+ // have a chunk manager so it doesn't know how to filter out docs it doesn't own.
+ assert.commandWorked(s.s0.adminCommand({
moveChunk: "test.foo",
find: {_id: i * 100},
to: other._id,
@@ -53,9 +54,9 @@
writeConcern: {w: 2},
_waitForDelete: true
}));
+
assert.eq(2100, coll.find().itcount());
}
s.stop();
-
}());
diff --git a/jstests/sharding/balance_tags2.js b/jstests/sharding/balance_tags2.js
index e4bf370d1cd..a7f1161d6dc 100644
--- a/jstests/sharding/balance_tags2.js
+++ b/jstests/sharding/balance_tags2.js
@@ -1,27 +1,26 @@
// Test balancing all chunks to one shard by tagging the full shard-key range on that collection
-var s = new ShardingTest(
- {name: "balance_tags2", shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+var s = new ShardingTest({shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-s.adminCommand({enablesharding: "test"});
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'shard0001');
var db = s.getDB("test");
var bulk = db.foo.initializeUnorderedBulkOp();
-for (i = 0; i < 21; i++) {
+for (var i = 0; i < 21; i++) {
bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
-sh.shardCollection("test.foo", {_id: 1});
+assert.commandWorked(s.s0.adminCommand({shardCollection: "test.foo", key: {_id: 1}}));
-sh.stopBalancer();
+s.stopBalancer();
-for (i = 0; i < 20; i++) {
+for (var i = 0; i < 20; i++) {
sh.splitAt("test.foo", {_id: i});
}
-sh.startBalancer();
+s.startBalancer();
sh.status(true);
diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js
index 3b8a8ef1240..b4ec0db35e9 100644
--- a/jstests/sharding/explain_cmd.js
+++ b/jstests/sharding/explain_cmd.js
@@ -1,174 +1,185 @@
// Tests for the mongos explain command.
-
-// Create a cluster with 3 shards.
-var st = new ShardingTest({shards: 2});
-st.stopBalancer();
-
-var db = st.s.getDB("test");
-var explain;
-
-// Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on 'b'.
-var collSharded = db.getCollection("mongos_explain_cmd");
-collSharded.drop();
-collSharded.ensureIndex({a: 1});
-collSharded.ensureIndex({b: 1});
-
-// Enable sharding.
-assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
-st.ensurePrimaryShard(db.getName(), 'shard0001');
-db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
-
-// Pre-split the collection to ensure that both shards have chunks. Explicitly
-// move chunks since the balancer is disabled.
-for (var i = 1; i <= 2; i++) {
- assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: i}}));
-
- var shardName = "shard000" + (i - 1);
- printjson(db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: i}, to: shardName}));
-}
-
-// Put data on each shard.
-for (var i = 0; i < 3; i++) {
- collSharded.insert({_id: i, a: i, b: 1});
-}
-
-printjson(sh.status());
-
-// Test a scatter-gather count command.
-assert.eq(3, collSharded.count({b: 1}));
-
-// Explain the scatter-gather count.
-explain = db.runCommand(
- {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
-
-// Validate some basic properties of the result.
-printjson(explain);
-assert.commandWorked(explain);
-assert("queryPlanner" in explain);
-assert("executionStats" in explain);
-assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
-assert.eq(2, explain.executionStats.executionStages.shards.length);
-
-// An explain of a command that doesn't exist should fail gracefully.
-explain = db.runCommand({
- explain: {nonexistent: collSharded.getName(), query: {b: 1}},
- verbosity: "allPlansExecution"
-});
-printjson(explain);
-assert.commandFailed(explain);
-
-// -------
-
-// Setup a collection that is not sharded.
-var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded");
-collUnsharded.drop();
-collUnsharded.ensureIndex({a: 1});
-collUnsharded.ensureIndex({b: 1});
-
-for (var i = 0; i < 3; i++) {
- collUnsharded.insert({_id: i, a: i, b: 1});
-}
-assert.eq(3, collUnsharded.count({b: 1}));
-
-explain = db.runCommand({
- explain: {
- group: {
- ns: collUnsharded.getName(),
- key: "a",
- cond: "b",
- $reduce: function(curr, result) {},
- initial: {}
- }
- },
- verbosity: "allPlansExecution"
-});
-
-// Basic validation: a group command can only be passed through to an unsharded collection,
-// so we should confirm that the mongos stage is always SINGLE_SHARD.
-printjson(explain);
-assert.commandWorked(explain);
-assert("queryPlanner" in explain);
-assert("executionStats" in explain);
-assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
-
-// The same group should fail over the sharded collection, because group is only supported
-// if it is passed through to an unsharded collection.
-explain = db.runCommand({
- explain: {
- group: {
- ns: collSharded.getName(),
- key: "a",
- cond: "b",
- $reduce: function(curr, result) {},
- initial: {}
- }
- },
- verbosity: "allPlansExecution"
-});
-printjson(explain);
-assert.commandFailed(explain);
-
-// -------
-
-// Explain a delete operation and verify that it hits all shards without the shard key
-explain = db.runCommand({
- explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
- verbosity: "allPlansExecution"
-});
-assert.commandWorked(explain, tojson(explain));
-assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
-assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
-assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
-// Check that the deletes didn't actually happen.
-assert.eq(3, collSharded.count({b: 1}));
-
-// Explain a delete operation and verify that it hits only one shard with the shard key
-explain = db.runCommand({
- explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
- verbosity: "allPlansExecution"
-});
-assert.commandWorked(explain, tojson(explain));
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
-// Check that the deletes didn't actually happen.
-assert.eq(3, collSharded.count({b: 1}));
-
-// Check that we fail gracefully if we try to do an explain of a write batch that has more
-// than one operation in it.
-explain = db.runCommand({
- explain:
- {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]},
- verbosity: "allPlansExecution"
-});
-assert.commandFailed(explain, tojson(explain));
-
-// Explain a multi upsert operation and verify that it hits all shards
-explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
- verbosity: "allPlansExecution"
-});
-assert.commandWorked(explain, tojson(explain));
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
-assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
-assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE");
-assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE");
-// Check that the update didn't actually happen.
-assert.eq(0, collSharded.count({b: 10}));
-
-// Explain an upsert operation and verify that it hits only a single shard
-explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
- verbosity: "allPlansExecution"
-});
-assert.commandWorked(explain, tojson(explain));
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
-// Check that the upsert didn't actually happen.
-assert.eq(0, collSharded.count({a: 10}));
-
-// Explain an upsert operation which cannot be targeted, ensure an error is thrown
-explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
- verbosity: "allPlansExecution"
-});
-assert.commandFailed(explain, tojson(explain));
+(function() {
+ 'use strict';
+
+ // Create a cluster with 3 shards.
+ var st = new ShardingTest({shards: 2});
+
+ var db = st.s.getDB("test");
+ var explain;
+
+ // Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on
+ // 'b'.
+ var collSharded = db.getCollection("mongos_explain_cmd");
+ collSharded.drop();
+ collSharded.ensureIndex({a: 1});
+ collSharded.ensureIndex({b: 1});
+
+ // Enable sharding.
+ assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+ st.ensurePrimaryShard(db.getName(), 'shard0001');
+ db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
+
+ // Pre-split the collection to ensure that both shards have chunks. Explicitly
+ // move chunks since the balancer is disabled.
+ for (var i = 1; i <= 2; i++) {
+ assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: i}}));
+
+ var shardName = "shard000" + (i - 1);
+ printjson(
+ db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: i}, to: shardName}));
+ }
+
+ // Put data on each shard.
+ for (var i = 0; i < 3; i++) {
+ collSharded.insert({_id: i, a: i, b: 1});
+ }
+
+ st.printShardingStatus();
+
+ // Test a scatter-gather count command.
+ assert.eq(3, collSharded.count({b: 1}));
+
+ // Explain the scatter-gather count.
+ explain = db.runCommand(
+ {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
+
+ // Validate some basic properties of the result.
+ printjson(explain);
+ assert.commandWorked(explain);
+ assert("queryPlanner" in explain);
+ assert("executionStats" in explain);
+ assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
+ assert.eq(2, explain.executionStats.executionStages.shards.length);
+
+ // An explain of a command that doesn't exist should fail gracefully.
+ explain = db.runCommand({
+ explain: {nonexistent: collSharded.getName(), query: {b: 1}},
+ verbosity: "allPlansExecution"
+ });
+ printjson(explain);
+ assert.commandFailed(explain);
+
+ // -------
+
+ // Setup a collection that is not sharded.
+ var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded");
+ collUnsharded.drop();
+ collUnsharded.ensureIndex({a: 1});
+ collUnsharded.ensureIndex({b: 1});
+
+ for (var i = 0; i < 3; i++) {
+ collUnsharded.insert({_id: i, a: i, b: 1});
+ }
+ assert.eq(3, collUnsharded.count({b: 1}));
+
+ explain = db.runCommand({
+ explain: {
+ group: {
+ ns: collUnsharded.getName(),
+ key: "a",
+ cond: "b",
+ $reduce: function(curr, result) {},
+ initial: {}
+ }
+ },
+ verbosity: "allPlansExecution"
+ });
+
+ // Basic validation: a group command can only be passed through to an unsharded collection,
+ // so we should confirm that the mongos stage is always SINGLE_SHARD.
+ printjson(explain);
+ assert.commandWorked(explain);
+ assert("queryPlanner" in explain);
+ assert("executionStats" in explain);
+ assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
+
+ // The same group should fail over the sharded collection, because group is only supported
+ // if it is passed through to an unsharded collection.
+ explain = db.runCommand({
+ explain: {
+ group: {
+ ns: collSharded.getName(),
+ key: "a",
+ cond: "b",
+ $reduce: function(curr, result) {},
+ initial: {}
+ }
+ },
+ verbosity: "allPlansExecution"
+ });
+ printjson(explain);
+ assert.commandFailed(explain);
+
+ // -------
+
+ // Explain a delete operation and verify that it hits all shards without the shard key
+ explain = db.runCommand({
+ explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandWorked(explain, tojson(explain));
+ assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+ assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
+ assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
+ // Check that the deletes didn't actually happen.
+ assert.eq(3, collSharded.count({b: 1}));
+
+ // Explain a delete operation and verify that it hits only one shard with the shard key
+ explain = db.runCommand({
+ explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandWorked(explain, tojson(explain));
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
+ // Check that the deletes didn't actually happen.
+ assert.eq(3, collSharded.count({b: 1}));
+
+ // Check that we fail gracefully if we try to do an explain of a write batch that has more
+ // than one operation in it.
+ explain = db.runCommand({
+ explain: {
+ delete: collSharded.getName(),
+ deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]
+ },
+ verbosity: "allPlansExecution"
+ });
+ assert.commandFailed(explain, tojson(explain));
+
+ // Explain a multi upsert operation and verify that it hits all shards
+ explain = db.runCommand({
+ explain:
+ {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandWorked(explain, tojson(explain));
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+ assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+ assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE");
+ assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE");
+ // Check that the update didn't actually happen.
+ assert.eq(0, collSharded.count({b: 10}));
+
+ // Explain an upsert operation and verify that it hits only a single shard
+ explain = db.runCommand({
+ explain:
+ {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandWorked(explain, tojson(explain));
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
+ // Check that the upsert didn't actually happen.
+ assert.eq(0, collSharded.count({a: 10}));
+
+ // Explain an upsert operation which cannot be targeted, ensure an error is thrown
+ explain = db.runCommand({
+ explain:
+ {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandFailed(explain, tojson(explain));
+
+ st.stop();
+})();
diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js
index 40af14f6265..e8c69adc222 100644
--- a/jstests/sharding/explain_find_and_modify_sharded.js
+++ b/jstests/sharding/explain_find_and_modify_sharded.js
@@ -9,7 +9,6 @@
// Create a cluster with 2 shards.
var st = new ShardingTest({shards: 2});
- st.stopBalancer();
var testDB = st.s.getDB('test');
var shardKey = {
@@ -85,4 +84,5 @@
assert.commandWorked(res);
assertExplainResult(res, 'executionStats', 'executionStages', 'shard0001', 'DELETE');
+ st.stop();
})();
diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js
index 5d6d466c1f9..abaf45260b9 100644
--- a/jstests/sharding/hash_shard_unique_compound.js
+++ b/jstests/sharding/hash_shard_unique_compound.js
@@ -2,44 +2,42 @@
// Does 2 things and checks for consistent error:
// 1.) shard collection on hashed "a", ensure unique index {a:1, b:1}
// 2.) reverse order
+(function() {
+ 'use strict';
-var s = new ShardingTest({name: jsTestName(), shards: 1, mongos: 1, verbose: 1});
-var dbName = "test";
-var collName = "foo";
-var ns = dbName + "." + collName;
-var db = s.getDB(dbName);
-var coll = db.getCollection(collName);
+ var s = new ShardingTest({shards: 1, mongos: 1});
+ var dbName = "test";
+ var collName = "foo";
+ var ns = dbName + "." + collName;
+ var db = s.getDB(dbName);
+ var coll = db.getCollection(collName);
-// Enable sharding on DB
-var res = db.adminCommand({enablesharding: dbName});
+ // Enable sharding on DB
+ assert.commandWorked(db.adminCommand({enablesharding: dbName}));
-// for simplicity start by turning off balancer
-var res = s.stopBalancer();
+ // Shard a fresh collection using a hashed shard key
+ assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
-// shard a fresh collection using a hashed shard key
-coll.drop();
-assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
-db.printShardingStatus();
+ // Create unique index
+ assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
-// Create unique index
-assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
+ jsTest.log("------ indexes -------");
+ jsTest.log(tojson(coll.getIndexes()));
-jsTest.log("------ indexes -------");
-jsTest.log(tojson(coll.getIndexes()));
+ // Second Part
+ jsTest.log("------ dropping sharded collection to start part 2 -------");
+ coll.drop();
-// Second Part
-jsTest.log("------ dropping sharded collection to start part 2 -------");
-coll.drop();
+ // Create unique index
+ assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
-// Create unique index
-assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
+ // shard a fresh collection using a hashed shard key
+ assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
+ "shardcollection didn't worked 2");
-// shard a fresh collection using a hashed shard key
-assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
- "shardcollection didn't worked 2");
+ s.printShardingStatus();
+ jsTest.log("------ indexes 2-------");
+ jsTest.log(tojson(coll.getIndexes()));
-db.printShardingStatus();
-jsTest.log("------ indexes 2-------");
-jsTest.log(tojson(coll.getIndexes()));
-
-s.stop();
+ s.stop();
+})();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index e11782baed2..6e6be382795 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -1,64 +1,63 @@
(function() {
+ 'use strict';
var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
- s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
- s.adminCommand({enablesharding: "test"});
+ assert.writeOK(
+ s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'shard0001');
- s.adminCommand({shardcollection: "test.foo", key: {x: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
- db = s.getDB("test");
- coll = db.foo;
+ var db = s.getDB("test");
+ var coll = db.foo;
- big = "";
+ var big = "";
while (big.length < 10000)
big += "eliot";
var bulk = coll.initializeUnorderedBulkOp();
- for (x = 0; x < 100; x++) {
+ for (var x = 0; x < 100; x++) {
bulk.insert({x: x, big: big});
}
assert.writeOK(bulk.execute());
- db.printShardingStatus();
-
- s.adminCommand({split: "test.foo", middle: {x: 30}});
- s.adminCommand({split: "test.foo", middle: {x: 66}});
- s.adminCommand(
- {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getServer("test")).name});
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 30}}));
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 66}}));
+ assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getServer("test")).name}));
db.printShardingStatus();
print("YO : " + s.getServer("test").host);
- direct = new Mongo(s.getServer("test").host);
+ var direct = new Mongo(s.getServer("test").host);
print("direct : " + direct);
- directDB = direct.getDB("test");
+ var directDB = direct.getDB("test");
- for (done = 0; done < 2 * 1024 * 1024; done += big.length) {
+ for (var done = 0; done < 2 * 1024 * 1024; done += big.length) {
assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
}
db.printShardingStatus();
assert.throws(function() {
- s.adminCommand(
- {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getServer("test")).name});
+ assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getServer("test")).name}));
}, [], "move should fail");
- for (i = 0; i < 20; i += 2) {
+ for (var i = 0; i < 20; i += 2) {
try {
- s.adminCommand({split: "test.foo", middle: {x: i}});
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: i}}));
} catch (e) {
- // we may have auto split on some of these
- // which is ok
+ // We may have auto split on some of these, which is ok
print(e);
}
}
db.printShardingStatus();
- s.config.settings.update({_id: "balancer"}, {$set: {stopped: false}}, true);
+ s.startBalancer();
assert.soon(function() {
var x = s.chunkDiff("foo", "test");
@@ -73,5 +72,4 @@
assert.eq(coll.count(), coll.find().itcount());
s.stop();
-
})();
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 05e6eca0d4f..5bfa70c2d8f 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -3,6 +3,7 @@
// headings and the names of sharded collections and their shard keys.
(function() {
+ 'use strict';
var st = new ShardingTest({shards: 1, mongos: 2, config: 1, other: {smallfiles: true}});
@@ -233,5 +234,4 @@
assert(mongos.getDB("test").dropDatabase());
st.stop();
-
})();
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 6800e3f4370..290a9f79719 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,5 +1,4 @@
(function() {
-
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
@@ -17,11 +16,14 @@
}
assert(sh.getBalancerState(), "A1");
- sh.setBalancerState(false);
+
+ sh.stopBalancer();
assert(!sh.getBalancerState(), "A2");
- sh.setBalancerState(true);
+
+ sh.startBalancer();
assert(sh.getBalancerState(), "A3");
- sh.setBalancerState(false);
+
+ sh.stopBalancer();
assert(!sh.getBalancerState(), "A4");
s.config.databases.find().forEach(printjson);
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 0148c924993..be21049650e 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -1,70 +1,69 @@
//
// Tests autosplit locations with force : true, for small collections
//
+(function() {
+ 'use strict';
-var options = {
- chunkSize: 1 // MB
-};
+ var st = new ShardingTest(
+ {shards: 1, mongos: 1, other: {chunkSize: 1, mongosOptions: {noAutoSplit: ""}}});
-var st = new ShardingTest({shards: 1, mongos: 1, other: options});
-st.stopBalancer();
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var shardAdmin = st.shard0.getDB("admin");
+ var coll = mongos.getCollection("foo.bar");
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var config = mongos.getDB("config");
-var shardAdmin = st.shard0.getDB("admin");
-var coll = mongos.getCollection("foo.bar");
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
-assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
-assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
-assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+ jsTest.log("Insert a bunch of data into the low chunk of a collection," +
+ " to prevent relying on stats.");
-jsTest.log("Insert a bunch of data into the low chunk of a collection," +
- " to prevent relying on stats.");
+ var data128k = "x";
+ for (var i = 0; i < 7; i++)
+ data128k += data128k;
-var data128k = "x";
-for (var i = 0; i < 7; i++)
- data128k += data128k;
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 1024; i++) {
+ bulk.insert({_id: -(i + 1)});
+ }
+ assert.writeOK(bulk.execute());
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < 1024; i++) {
- bulk.insert({_id: -(i + 1)});
-}
-assert.writeOK(bulk.execute());
+ jsTest.log("Insert 32 docs into the high chunk of a collection");
-jsTest.log("Insert 32 docs into the high chunk of a collection");
+ bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 32; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
-bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < 32; i++) {
- bulk.insert({_id: i});
-}
-assert.writeOK(bulk.execute());
+ jsTest.log("Split off MaxKey chunk...");
-jsTest.log("Split off MaxKey chunk...");
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 32}}));
-assert(admin.runCommand({split: coll + "", middle: {_id: 32}}).ok);
+ jsTest.log("Keep splitting chunk multiple times...");
-jsTest.log("Keep splitting chunk multiple times...");
-
-st.printShardingStatus();
-
-for (var i = 0; i < 5; i++) {
- assert(admin.runCommand({split: coll + "", find: {_id: 0}}).ok);
st.printShardingStatus();
-}
-// Make sure we can't split further than 5 (2^5) times
-assert(!admin.runCommand({split: coll + "", find: {_id: 0}}).ok);
+ for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", find: {_id: 0}}));
+ st.printShardingStatus();
+ }
+
+ // Make sure we can't split further than 5 (2^5) times
+ assert.commandFailed(admin.runCommand({split: coll + "", find: {_id: 0}}));
-var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
-printjson(chunks);
+ var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
+ printjson(chunks);
-// Make sure the chunks grow by 2x (except the first)
-var nextSize = 1;
-for (var i = 0; i < chunks.size; i++) {
- assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
- if (i != 0)
- nextSize += nextSize;
-}
+ // Make sure the chunks grow by 2x (except the first)
+ var nextSize = 1;
+ for (var i = 0; i < chunks.size; i++) {
+ assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
+ if (i != 0)
+ nextSize += nextSize;
+ }
-st.stop();
+ st.stop();
+})();
diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js
index e5885dcfa41..bd603124548 100644
--- a/jstests/sharding/stale_version_write.js
+++ b/jstests/sharding/stale_version_write.js
@@ -1,37 +1,37 @@
// Tests whether a reset sharding version triggers errors
+(function() {
+ 'use strict';
-jsTest.log("Starting sharded cluster...");
+ var st = new ShardingTest({shards: 1, mongos: 2});
-var st = new ShardingTest({shards: 1, mongos: 2, verbose: 2});
+ var mongosA = st.s0;
+ var mongosB = st.s1;
-st.stopBalancer();
+ jsTest.log("Adding new collections...");
-var mongosA = st.s0;
-var mongosB = st.s1;
+ var collA = mongosA.getCollection(jsTestName() + ".coll");
+ assert.writeOK(collA.insert({hello: "world"}));
-jsTest.log("Adding new collections...");
+ var collB = mongosB.getCollection("" + collA);
+ assert.writeOK(collB.insert({hello: "world"}));
-var collA = mongosA.getCollection(jsTestName() + ".coll");
-assert.writeOK(collA.insert({hello: "world"}));
+ jsTest.log("Enabling sharding...");
-var collB = mongosB.getCollection("" + collA);
-assert.writeOK(collB.insert({hello: "world"}));
+ assert.commandWorked(mongosA.getDB("admin").adminCommand({enableSharding: "" + collA.getDB()}));
+ assert.commandWorked(
+ mongosA.getDB("admin").adminCommand({shardCollection: "" + collA, key: {_id: 1}}));
-jsTest.log("Enabling sharding...");
+ // MongoD doesn't know about the config shard version *until* MongoS tells it
+ collA.findOne();
-printjson(mongosA.getDB("admin").runCommand({enableSharding: "" + collA.getDB()}));
-printjson(mongosA.getDB("admin").runCommand({shardCollection: "" + collA, key: {_id: 1}}));
+ jsTest.log("Trigger shard version mismatch...");
-// MongoD doesn't know about the config shard version *until* MongoS tells it
-collA.findOne();
+ assert.writeOK(collB.insert({goodbye: "world"}));
-jsTest.log("Trigger shard version mismatch...");
+ print("Inserted...");
-assert.writeOK(collB.insert({goodbye: "world"}));
+ assert.eq(3, collA.find().itcount());
+ assert.eq(3, collB.find().itcount());
-print("Inserted...");
-
-assert.eq(3, collA.find().itcount());
-assert.eq(3, collB.find().itcount());
-
-st.stop();
+ st.stop();
+})();
diff --git a/jstests/slow2/mr_during_migrate.js b/jstests/slow2/mr_during_migrate.js
index cb439aeb241..1b3f55721f4 100644
--- a/jstests/slow2/mr_during_migrate.js
+++ b/jstests/slow2/mr_during_migrate.js
@@ -1,113 +1,112 @@
// Do parallel ops with migrates occurring
+(function() {
+ 'use strict';
-var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2});
+ var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2});
-jsTest.log("Doing parallel operations...");
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var coll = st.s.getCollection(jsTest.name() + ".coll");
-// Stop balancer, since it'll just get in the way of these
-st.stopBalancer();
+ var numDocs = 1024 * 1024;
+ var dataSize = 1024; // bytes, must be power of 2
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var coll = st.s.getCollection(jsTest.name() + ".coll");
+ var data = "x";
+ while (data.length < dataSize)
+ data += data;
-var numDocs = 1024 * 1024;
-var dataSize = 1024; // bytes, must be power of 2
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, data: data});
+ }
+ assert.writeOK(bulk.execute());
-var data = "x";
-while (data.length < dataSize)
- data += data;
+ // Make sure everything got inserted
+ assert.eq(numDocs, coll.find().itcount());
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, data: data});
-}
-assert.writeOK(bulk.execute());
+ jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data.");
-// Make sure everything got inserted
-assert.eq(numDocs, coll.find().itcount());
+ // Shard collection
+ st.shardColl(coll, {_id: 1}, false);
-jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data.");
+ st.printShardingStatus();
-// Shard collection
-st.shardColl(coll, {_id: 1}, false);
+ jsTest.log("Sharded collection now initialized, starting migrations...");
-st.printShardingStatus();
-
-jsTest.log("Sharded collection now initialized, starting migrations...");
+ var checkMigrate = function() {
+ print("Result of migrate : ");
+ printjson(this);
+ };
-var checkMigrate = function() {
- print("Result of migrate : ");
- printjson(this);
-};
+ // Creates a number of migrations of random chunks to diff shard servers
+ var ops = [];
+ for (var i = 0; i < st._connections.length; i++) {
+ ops.push({
+ op: "command",
+ ns: "admin",
+ command: {
+ moveChunk: "" + coll,
+ find: {_id: {"#RAND_INT": [0, numDocs]}},
+ to: st._connections[i].shardName,
+ _waitForDelete: true
+ },
+ showResult: true
+ });
+ }
-// Creates a number of migrations of random chunks to diff shard servers
-var ops = [];
-for (var i = 0; i < st._connections.length; i++) {
- ops.push({
- op: "command",
- ns: "admin",
- command: {
- moveChunk: "" + coll,
- find: {_id: {"#RAND_INT": [0, numDocs]}},
- to: st._connections[i].shardName,
- _waitForDelete: true
- },
- showResult: true
- });
-}
+ // TODO: Also migrate output collection
-// TODO: Also migrate output collection
+ jsTest.log("Starting migrations now...");
-jsTest.log("Starting migrations now...");
+ var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
-var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
+ //#######################
+ // Tests during migration
-//#######################
-// Tests during migration
+ var numTests = 5;
-var numTests = 5;
+ for (var t = 0; t < numTests; t++) {
+ jsTest.log("Test #" + t);
-for (var t = 0; t < numTests; t++) {
- jsTest.log("Test #" + t);
+ var mongos = st.s1; // use other mongos so we get stale shard versions
+ var coll = mongos.getCollection(coll + "");
+ var outputColl = mongos.getCollection(coll + "_output");
- var mongos = st.s1; // use other mongos so we get stale shard versions
- var coll = mongos.getCollection(coll + "");
- var outputColl = mongos.getCollection(coll + "_output");
+ var numTypes = 32;
+ var map = function() {
+ emit(this._id % 32 /* must be hardcoded */, {c: 1});
+ };
- var numTypes = 32;
- var map = function() {
- emit(this._id % 32 /* must be hardcoded */, {c: 1});
- };
- var reduce = function(k, vals) {
- var total = 0;
- for (var i = 0; i < vals.length; i++)
- total += vals[i].c;
- return {
- c: total
+ var reduce = function(k, vals) {
+ var total = 0;
+ for (var i = 0; i < vals.length; i++)
+ total += vals[i].c;
+ return {
+ c: total
+ };
};
- };
- printjson(coll.find({_id: 0}).itcount());
+ printjson(coll.find({_id: 0}).itcount());
- jsTest.log("Starting new mapReduce run #" + t);
+ jsTest.log("Starting new mapReduce run #" + t);
- // assert.eq( coll.find().itcount(), numDocs )
+ // assert.eq( coll.find().itcount(), numDocs )
- coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
+ coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
- printjson(coll.mapReduce(
- map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}}));
+ printjson(coll.mapReduce(
+ map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}}));
- jsTest.log("MapReduce run #" + t + " finished.");
+ jsTest.log("MapReduce run #" + t + " finished.");
- assert.eq(outputColl.find().itcount(), numTypes);
+ assert.eq(outputColl.find().itcount(), numTypes);
- outputColl.find().forEach(function(x) {
- assert.eq(x.value.c, numDocs / numTypes);
- });
-}
+ outputColl.find().forEach(function(x) {
+ assert.eq(x.value.c, numDocs / numTypes);
+ });
+ }
-printjson(benchFinish(bid));
+ printjson(benchFinish(bid));
-st.stop();
+ st.stop();
+})();
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 34efb20cf88..cf8d04827a6 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -75,9 +75,9 @@ env.Library(
'$BUILD_DIR/mongo/s/catalog/forwarding_catalog_manager',
'$BUILD_DIR/mongo/s/catalog/replset/catalog_manager_replica_set',
'$BUILD_DIR/mongo/s/coreshard',
- '$BUILD_DIR/mongo/s/mongoscore',
'$BUILD_DIR/mongo/util/clock_source_mock',
'$BUILD_DIR/mongo/util/net/message_port_mock',
+ 'mongoscore',
],
LIBDEPS_TAGS=[
# Depends on coreshard, but that would be circular
diff --git a/src/mongo/s/catalog/catalog_manager.h b/src/mongo/s/catalog/catalog_manager.h
index 5257f53f7db..2f75ba2da69 100644
--- a/src/mongo/s/catalog/catalog_manager.h
+++ b/src/mongo/s/catalog/catalog_manager.h
@@ -451,9 +451,6 @@ public:
*/
virtual bool isMetadataConsistentFromLastCheck(OperationContext* txn) = 0;
-protected:
- CatalogManager() = default;
-
/**
* Obtains a reference to the distributed lock manager instance to use for synchronizing
* system-wide changes.
@@ -462,6 +459,9 @@ protected:
* be cached.
*/
virtual DistLockManager* getDistLockManager() = 0;
+
+protected:
+ CatalogManager() = default;
};
} // namespace mongo