summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-06-06 10:45:06 +0300
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-06-09 13:13:05 -0400
commit2477b8c33b2e8f26fcde47c38c19c3fbb8b99839 (patch)
treeda07b93547289dd370ba02434b0e82551dca9463 /jstests/sharding
parent3f7dce2ea7a4692380e04d09da89388c23133635 (diff)
downloadmongo-2477b8c33b2e8f26fcde47c38c19c3fbb8b99839.tar.gz
SERVER-22512 Remove unnecessary calls to stopBalancer
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/balance_repl.js22
-rw-r--r--jstests/sharding/balance_tags2.js15
-rw-r--r--jstests/sharding/cursor_timeout.js86
-rw-r--r--jstests/sharding/explain_cmd.js353
-rw-r--r--jstests/sharding/explain_find_and_modify_sharded.js2
-rw-r--r--jstests/sharding/hash_shard_unique_compound.js60
-rw-r--r--jstests/sharding/migrateBig.js49
-rw-r--r--jstests/sharding/printShardingStatus.js2
-rw-r--r--jstests/sharding/shard3.js10
-rw-r--r--jstests/sharding/split_with_force_small.js100
-rw-r--r--jstests/sharding/stale_version_write.js46
11 files changed, 421 insertions, 324 deletions
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 39c28b46448..a3c9eefdca8 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -3,7 +3,7 @@
//
(function() {
- "use strict";
+ 'use strict';
// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
// from stepping down during migrations on slow evergreen builders.
@@ -33,25 +33,27 @@
}
assert.writeOK(bulk.execute());
- s.adminCommand({enablesharding: "test"});
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'test-rs0');
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- for (i = 0; i < 20; i++)
- s.adminCommand({split: "test.foo", middle: {_id: i * 100}});
+ for (i = 0; i < 20; i++) {
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {_id: i * 100}}));
+ }
assert.eq(2100, db.foo.find().itcount());
+
var coll = db.foo;
coll.setSlaveOk();
+ assert.eq(2100, coll.find().itcount());
var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
var other = s.config.shards.findOne({_id: {$ne: dbPrimaryShardId}});
for (i = 0; i < 20; i++) {
- // Needs to waitForDelete because we'll be performing a slaveOk query,
- // and secondaries don't have a chunk manager so it doesn't know how to
- // filter out docs it doesn't own.
- assert(s.adminCommand({
+ // Needs to waitForDelete because we'll be performing a slaveOk query, and secondaries don't
+ // have a chunk manager so it doesn't know how to filter out docs it doesn't own.
+ assert.commandWorked(s.s0.adminCommand({
moveChunk: "test.foo",
find: {_id: i * 100},
to: other._id,
@@ -59,9 +61,9 @@
writeConcern: {w: 2},
_waitForDelete: true
}));
+
assert.eq(2100, coll.find().itcount());
}
s.stop();
-
}());
diff --git a/jstests/sharding/balance_tags2.js b/jstests/sharding/balance_tags2.js
index 8c54b2f3fc6..58ce0fa5ccc 100644
--- a/jstests/sharding/balance_tags2.js
+++ b/jstests/sharding/balance_tags2.js
@@ -1,27 +1,26 @@
// Test balancing all chunks to one shard by tagging the full shard-key range on that collection
-var s = new ShardingTest(
- {name: "balance_tags2", shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+var s = new ShardingTest({shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-s.adminCommand({enablesharding: "test"});
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'shard0001');
var db = s.getDB("test");
var bulk = db.foo.initializeUnorderedBulkOp();
-for (i = 0; i < 21; i++) {
+for (var i = 0; i < 21; i++) {
bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
-sh.shardCollection("test.foo", {_id: 1});
+assert.commandWorked(s.s0.adminCommand({shardCollection: "test.foo", key: {_id: 1}}));
-sh.stopBalancer();
+s.stopBalancer();
-for (i = 0; i < 20; i++) {
+for (var i = 0; i < 20; i++) {
sh.splitAt("test.foo", {_id: i});
}
-sh.startBalancer();
+s.startBalancer();
s.printShardingStatus(true);
diff --git a/jstests/sharding/cursor_timeout.js b/jstests/sharding/cursor_timeout.js
new file mode 100644
index 00000000000..cea17d93dcc
--- /dev/null
+++ b/jstests/sharding/cursor_timeout.js
@@ -0,0 +1,86 @@
+// Basic integration tests for the background job that periodically kills idle cursors, in both
+// mongod and mongos. This test creates the following four cursors:
+//
+// 1. A no-timeout cursor through mongos.
+// 2. A no-timeout cursor through mongod.
+// 3. A normal cursor through mongos.
+// 4. A normal cursor through mongod.
+//
+// After a period of inactivity, the test asserts that cursors #1 and #2 are still alive, and that
+// #3 and #4 have been killed.
+(function() {
+ 'use strict';
+
+ var st = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ shardOptions: {setParameter: "cursorTimeoutMillis=1000"},
+ mongosOptions: {setParameter: "cursorTimeoutMillis=1000"}
+ }
+ });
+
+ var adminDB = st.admin;
+ var configDB = st.config;
+ var coll = st.s.getDB('test').user;
+
+ assert.commandWorked(adminDB.runCommand({enableSharding: coll.getDB().getName()}));
+ st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+ assert.commandWorked(adminDB.runCommand({shardCollection: coll.getFullName(), key: {x: 1}}));
+
+ var data = 'c';
+ for (var x = 0; x < 18; x++) {
+ data += data;
+ }
+
+ for (x = 0; x < 200; x++) {
+ coll.insert({x: x, v: data});
+ }
+
+ var chunkDoc = configDB.chunks.findOne();
+ var chunkOwner = chunkDoc.shard;
+ var toShard = configDB.shards.findOne({_id: {$ne: chunkOwner}})._id;
+ var cmd =
+ {moveChunk: coll.getFullName(), find: chunkDoc.min, to: toShard, _waitForDelete: true};
+ var res = adminDB.runCommand(cmd);
+
+ jsTest.log('move result: ' + tojson(res));
+
+ var shardedCursorWithTimeout = coll.find();
+ var shardedCursorWithNoTimeout = coll.find();
+ shardedCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
+
+ // Query directly to mongod
+ var shardHost = configDB.shards.findOne({_id: chunkOwner}).host;
+ var mongod = new Mongo(shardHost);
+ var shardColl = mongod.getCollection(coll.getFullName());
+
+ var cursorWithTimeout = shardColl.find();
+ var cursorWithNoTimeout = shardColl.find();
+ cursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
+
+ shardedCursorWithTimeout.next();
+ shardedCursorWithNoTimeout.next();
+
+ cursorWithTimeout.next();
+ cursorWithNoTimeout.next();
+
+ // Wait until the idle cursor background job has killed the cursors that do not have the "no
+ // timeout" flag set. We use the "cursorTimeoutMillis" setParameter above to reduce the amount
+ // of time we need to wait here.
+ sleep(5000);
+
+ assert.throws(function() {
+ shardedCursorWithTimeout.itcount();
+ });
+ assert.throws(function() {
+ cursorWithTimeout.itcount();
+ });
+
+ // +1 because we already advanced once
+ assert.eq(coll.count(), shardedCursorWithNoTimeout.itcount() + 1);
+
+ assert.eq(shardColl.count(), cursorWithNoTimeout.itcount() + 1);
+
+ st.stop();
+})();
diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js
index c638fccbced..3293c167db1 100644
--- a/jstests/sharding/explain_cmd.js
+++ b/jstests/sharding/explain_cmd.js
@@ -1,172 +1,183 @@
// Tests for the mongos explain command.
-
-// Create a cluster with 3 shards.
-var st = new ShardingTest({shards: 2});
-st.stopBalancer();
-
-var db = st.s.getDB("test");
-var explain;
-
-// Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on 'b'.
-var collSharded = db.getCollection("mongos_explain_cmd");
-collSharded.drop();
-collSharded.ensureIndex({a: 1});
-collSharded.ensureIndex({b: 1});
-
-// Enable sharding.
-assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
-st.ensurePrimaryShard(db.getName(), 'shard0001');
-db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
-
-// Pre-split the collection to ensure that both shards have chunks. Explicitly
-// move chunks since the balancer is disabled.
-for (var i = 1; i <= 2; i++) {
- assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: i}}));
-
- var shardName = "shard000" + (i - 1);
- printjson(db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: i}, to: shardName}));
-}
-
-// Put data on each shard.
-for (var i = 0; i < 3; i++) {
- collSharded.insert({_id: i, a: i, b: 1});
-}
-
-st.printShardingStatus();
-
-// Test a scatter-gather count command.
-assert.eq(3, collSharded.count({b: 1}));
-
-// Explain the scatter-gather count.
-explain = db.runCommand(
- {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
-
-// Validate some basic properties of the result.
-printjson(explain);
-assert.commandWorked(explain);
-assert("queryPlanner" in explain);
-assert("executionStats" in explain);
-assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
-assert.eq(2, explain.executionStats.executionStages.shards.length);
-
-// An explain of a command that doesn't exist should fail gracefully.
-explain = db.runCommand(
- {explain: {nonexistent: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
-printjson(explain);
-assert.commandFailed(explain);
-
-// -------
-
-// Setup a collection that is not sharded.
-var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded");
-collUnsharded.drop();
-collUnsharded.ensureIndex({a: 1});
-collUnsharded.ensureIndex({b: 1});
-
-for (var i = 0; i < 3; i++) {
- collUnsharded.insert({_id: i, a: i, b: 1});
-}
-assert.eq(3, collUnsharded.count({b: 1}));
-
-explain = db.runCommand({
- explain: {
- group: {
- ns: collUnsharded.getName(),
- key: "a",
- cond: "b",
- $reduce: function(curr, result) {},
- initial: {}
- }
- },
- verbosity: "allPlansExecution"
-});
-
-// Basic validation: a group command can only be passed through to an unsharded collection,
-// so we should confirm that the mongos stage is always SINGLE_SHARD.
-printjson(explain);
-assert.commandWorked(explain);
-assert("queryPlanner" in explain);
-assert("executionStats" in explain);
-assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
-
-// The same group should fail over the sharded collection, because group is only supported
-// if it is passed through to an unsharded collection.
-explain = db.runCommand({
- explain: {
- group: {
- ns: collSharded.getName(),
- key: "a",
- cond: "b",
- $reduce: function(curr, result) {},
- initial: {}
- }
- },
- verbosity: "allPlansExecution"
-});
-printjson(explain);
-assert.commandFailed(explain);
-
-// -------
-
-// Explain a delete operation and verify that it hits all shards without the shard key
-explain = db.runCommand({
- explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
- verbosity: "allPlansExecution"
-});
-assert.commandWorked(explain, tojson(explain));
-assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
-assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
-assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
-// Check that the deletes didn't actually happen.
-assert.eq(3, collSharded.count({b: 1}));
-
-// Explain a delete operation and verify that it hits only one shard with the shard key
-explain = db.runCommand({
- explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
- verbosity: "allPlansExecution"
-});
-assert.commandWorked(explain, tojson(explain));
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
-// Check that the deletes didn't actually happen.
-assert.eq(3, collSharded.count({b: 1}));
-
-// Check that we fail gracefully if we try to do an explain of a write batch that has more
-// than one operation in it.
-explain = db.runCommand({
- explain:
- {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]},
- verbosity: "allPlansExecution"
-});
-assert.commandFailed(explain, tojson(explain));
-
-// Explain a multi upsert operation and verify that it hits all shards
-explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
- verbosity: "allPlansExecution"
-});
-assert.commandWorked(explain, tojson(explain));
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
-assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
-assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE");
-assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE");
-// Check that the update didn't actually happen.
-assert.eq(0, collSharded.count({b: 10}));
-
-// Explain an upsert operation and verify that it hits only a single shard
-explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
- verbosity: "allPlansExecution"
-});
-assert.commandWorked(explain, tojson(explain));
-assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
-// Check that the upsert didn't actually happen.
-assert.eq(0, collSharded.count({a: 10}));
-
-// Explain an upsert operation which cannot be targeted, ensure an error is thrown
-explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
- verbosity: "allPlansExecution"
-});
-assert.commandFailed(explain, tojson(explain));
+(function() {
+ 'use strict';
+
+ // Create a cluster with 3 shards.
+ var st = new ShardingTest({shards: 2});
+
+ var db = st.s.getDB("test");
+ var explain;
+
+ // Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on
+ // 'b'.
+ var collSharded = db.getCollection("mongos_explain_cmd");
+ collSharded.drop();
+ collSharded.ensureIndex({a: 1});
+ collSharded.ensureIndex({b: 1});
+
+ // Enable sharding.
+ assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+ st.ensurePrimaryShard(db.getName(), 'shard0001');
+ db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
+
+ // Pre-split the collection to ensure that both shards have chunks. Explicitly
+ // move chunks since the balancer is disabled.
+ for (var i = 1; i <= 2; i++) {
+ assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: i}}));
+
+ var shardName = "shard000" + (i - 1);
+ printjson(
+ db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: i}, to: shardName}));
+ }
+
+ // Put data on each shard.
+ for (var i = 0; i < 3; i++) {
+ collSharded.insert({_id: i, a: i, b: 1});
+ }
+
+ st.printShardingStatus();
+
+ // Test a scatter-gather count command.
+ assert.eq(3, collSharded.count({b: 1}));
+
+ // Explain the scatter-gather count.
+ explain = db.runCommand(
+ {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
+
+ // Validate some basic properties of the result.
+ printjson(explain);
+ assert.commandWorked(explain);
+ assert("queryPlanner" in explain);
+ assert("executionStats" in explain);
+ assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
+ assert.eq(2, explain.executionStats.executionStages.shards.length);
+
+ // An explain of a command that doesn't exist should fail gracefully.
+ explain = db.runCommand({
+ explain: {nonexistent: collSharded.getName(), query: {b: 1}},
+ verbosity: "allPlansExecution"
+ });
+ printjson(explain);
+ assert.commandFailed(explain);
+
+ // -------
+
+ // Setup a collection that is not sharded.
+ var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded");
+ collUnsharded.drop();
+ collUnsharded.ensureIndex({a: 1});
+ collUnsharded.ensureIndex({b: 1});
+
+ for (var i = 0; i < 3; i++) {
+ collUnsharded.insert({_id: i, a: i, b: 1});
+ }
+ assert.eq(3, collUnsharded.count({b: 1}));
+
+ explain = db.runCommand({
+ explain: {
+ group: {
+ ns: collUnsharded.getName(),
+ key: "a",
+ cond: "b",
+ $reduce: function(curr, result) {},
+ initial: {}
+ }
+ },
+ verbosity: "allPlansExecution"
+ });
+
+ // Basic validation: a group command can only be passed through to an unsharded collection,
+ // so we should confirm that the mongos stage is always SINGLE_SHARD.
+ printjson(explain);
+ assert.commandWorked(explain);
+ assert("queryPlanner" in explain);
+ assert("executionStats" in explain);
+ assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
+
+ // The same group should fail over the sharded collection, because group is only supported
+ // if it is passed through to an unsharded collection.
+ explain = db.runCommand({
+ explain: {
+ group: {
+ ns: collSharded.getName(),
+ key: "a",
+ cond: "b",
+ $reduce: function(curr, result) {},
+ initial: {}
+ }
+ },
+ verbosity: "allPlansExecution"
+ });
+ printjson(explain);
+ assert.commandFailed(explain);
+
+ // -------
+
+ // Explain a delete operation and verify that it hits all shards without the shard key
+ explain = db.runCommand({
+ explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandWorked(explain, tojson(explain));
+ assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+ assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
+ assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
+ // Check that the deletes didn't actually happen.
+ assert.eq(3, collSharded.count({b: 1}));
+
+ // Explain a delete operation and verify that it hits only one shard with the shard key
+ explain = db.runCommand({
+ explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandWorked(explain, tojson(explain));
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
+ // Check that the deletes didn't actually happen.
+ assert.eq(3, collSharded.count({b: 1}));
+
+ // Check that we fail gracefully if we try to do an explain of a write batch that has more
+ // than one operation in it.
+ explain = db.runCommand({
+ explain: {
+ delete: collSharded.getName(),
+ deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]
+ },
+ verbosity: "allPlansExecution"
+ });
+ assert.commandFailed(explain, tojson(explain));
+
+ // Explain a multi upsert operation and verify that it hits all shards
+ explain = db.runCommand({
+ explain:
+ {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandWorked(explain, tojson(explain));
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+ assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+ assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE");
+ assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE");
+ // Check that the update didn't actually happen.
+ assert.eq(0, collSharded.count({b: 10}));
+
+ // Explain an upsert operation and verify that it hits only a single shard
+ explain = db.runCommand({
+ explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandWorked(explain, tojson(explain));
+ assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
+ // Check that the upsert didn't actually happen.
+ assert.eq(0, collSharded.count({a: 10}));
+
+ // Explain an upsert operation which cannot be targeted, ensure an error is thrown
+ explain = db.runCommand({
+ explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
+ verbosity: "allPlansExecution"
+ });
+ assert.commandFailed(explain, tojson(explain));
+
+ st.stop();
+})();
diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js
index 62ffa2d35f8..b4bb1a5ccb9 100644
--- a/jstests/sharding/explain_find_and_modify_sharded.js
+++ b/jstests/sharding/explain_find_and_modify_sharded.js
@@ -9,7 +9,6 @@
// Create a cluster with 2 shards.
var st = new ShardingTest({shards: 2});
- st.stopBalancer();
var testDB = st.s.getDB('test');
var shardKey = {a: 1};
@@ -84,4 +83,5 @@
assert.commandWorked(res);
assertExplainResult(res, 'executionStats', 'executionStages', 'shard0001', 'DELETE');
+ st.stop();
})();
diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js
index 3d82c2452b5..abaf45260b9 100644
--- a/jstests/sharding/hash_shard_unique_compound.js
+++ b/jstests/sharding/hash_shard_unique_compound.js
@@ -2,44 +2,42 @@
// Does 2 things and checks for consistent error:
// 1.) shard collection on hashed "a", ensure unique index {a:1, b:1}
// 2.) reverse order
+(function() {
+ 'use strict';
-var s = new ShardingTest({name: jsTestName(), shards: 1, mongos: 1, verbose: 1});
-var dbName = "test";
-var collName = "foo";
-var ns = dbName + "." + collName;
-var db = s.getDB(dbName);
-var coll = db.getCollection(collName);
+ var s = new ShardingTest({shards: 1, mongos: 1});
+ var dbName = "test";
+ var collName = "foo";
+ var ns = dbName + "." + collName;
+ var db = s.getDB(dbName);
+ var coll = db.getCollection(collName);
-// Enable sharding on DB
-var res = db.adminCommand({enablesharding: dbName});
+ // Enable sharding on DB
+ assert.commandWorked(db.adminCommand({enablesharding: dbName}));
-// for simplicity start by turning off balancer
-var res = s.stopBalancer();
+ // Shard a fresh collection using a hashed shard key
+ assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
-// shard a fresh collection using a hashed shard key
-coll.drop();
-assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
-s.printShardingStatus();
+ // Create unique index
+ assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
-// Create unique index
-assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
+ jsTest.log("------ indexes -------");
+ jsTest.log(tojson(coll.getIndexes()));
-jsTest.log("------ indexes -------");
-jsTest.log(tojson(coll.getIndexes()));
+ // Second Part
+ jsTest.log("------ dropping sharded collection to start part 2 -------");
+ coll.drop();
-// Second Part
-jsTest.log("------ dropping sharded collection to start part 2 -------");
-coll.drop();
+ // Create unique index
+ assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
-// Create unique index
-assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
+ // shard a fresh collection using a hashed shard key
+ assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
+ "shardcollection didn't worked 2");
-// shard a fresh collection using a hashed shard key
-assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
- "shardcollection didn't worked 2");
+ s.printShardingStatus();
+ jsTest.log("------ indexes 2-------");
+ jsTest.log(tojson(coll.getIndexes()));
-s.printShardingStatus();
-jsTest.log("------ indexes 2-------");
-jsTest.log(tojson(coll.getIndexes()));
-
-s.stop();
+ s.stop();
+})();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 01260123b67..6166682bd83 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -1,64 +1,66 @@
(function() {
+ 'use strict';
var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
- s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
- s.adminCommand({enablesharding: "test"});
+ assert.writeOK(
+ s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'shard0001');
- s.adminCommand({shardcollection: "test.foo", key: {x: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
- db = s.getDB("test");
- coll = db.foo;
+ var db = s.getDB("test");
+ var coll = db.foo;
- big = "";
+ var big = "";
while (big.length < 10000)
big += "eliot";
var bulk = coll.initializeUnorderedBulkOp();
- for (x = 0; x < 100; x++) {
+ for (var x = 0; x < 100; x++) {
bulk.insert({x: x, big: big});
}
assert.writeOK(bulk.execute());
- s.printShardingStatus();
-
- s.adminCommand({split: "test.foo", middle: {x: 30}});
- s.adminCommand({split: "test.foo", middle: {x: 66}});
- s.adminCommand(
- {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name});
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 30}}));
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 66}}));
+ assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name}));
s.printShardingStatus();
print("YO : " + s.getPrimaryShard("test").host);
- direct = new Mongo(s.getPrimaryShard("test").host);
+ var direct = new Mongo(s.getPrimaryShard("test").host);
print("direct : " + direct);
- directDB = direct.getDB("test");
+ var directDB = direct.getDB("test");
- for (done = 0; done < 2 * 1024 * 1024; done += big.length) {
+ for (var done = 0; done < 2 * 1024 * 1024; done += big.length) {
assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
}
s.printShardingStatus();
assert.throws(function() {
- s.adminCommand(
- {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getPrimaryShard("test")).name});
+ assert.commandWorked(s.s0.adminCommand({
+ movechunk: "test.foo",
+ find: {x: 50},
+ to: s.getOther(s.getPrimaryShard("test")).name
+ }));
}, [], "move should fail");
- for (i = 0; i < 20; i += 2) {
+ for (var i = 0; i < 20; i += 2) {
try {
- s.adminCommand({split: "test.foo", middle: {x: i}});
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: i}}));
} catch (e) {
- // we may have auto split on some of these
- // which is ok
+ // We may have auto split on some of these, which is ok
print(e);
}
}
s.printShardingStatus();
- s.config.settings.update({_id: "balancer"}, {$set: {stopped: false}}, true);
+ s.startBalancer();
assert.soon(function() {
var x = s.chunkDiff("foo", "test");
@@ -73,5 +75,4 @@
assert.eq(coll.count(), coll.find().itcount());
s.stop();
-
})();
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 63b5ef3090c..798338c39c0 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -3,6 +3,7 @@
// headings and the names of sharded collections and their shard keys.
(function() {
+ 'use strict';
var st = new ShardingTest({shards: 1, mongos: 2, config: 1, other: {smallfiles: true}});
@@ -230,5 +231,4 @@
assert(mongos.getDB("test").dropDatabase());
st.stop();
-
})();
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 926b350c7e9..64716ede81e 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,5 +1,4 @@
(function() {
-
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
@@ -17,11 +16,14 @@
}
assert(sh.getBalancerState(), "A1");
- sh.setBalancerState(false);
+
+ sh.stopBalancer();
assert(!sh.getBalancerState(), "A2");
- sh.setBalancerState(true);
+
+ sh.startBalancer();
assert(sh.getBalancerState(), "A3");
- sh.setBalancerState(false);
+
+ sh.stopBalancer();
assert(!sh.getBalancerState(), "A4");
s.config.databases.find().forEach(printjson);
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index ad14f8642cb..be21049650e 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -1,71 +1,69 @@
//
// Tests autosplit locations with force : true, for small collections
//
+(function() {
+ 'use strict';
-var options = {
- chunkSize: 1, // MB
- mongosOptions: {noAutoSplit: ""}
-};
+ var st = new ShardingTest(
+ {shards: 1, mongos: 1, other: {chunkSize: 1, mongosOptions: {noAutoSplit: ""}}});
-var st = new ShardingTest({shards: 1, mongos: 1, other: options});
-st.stopBalancer();
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var shardAdmin = st.shard0.getDB("admin");
+ var coll = mongos.getCollection("foo.bar");
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var config = mongos.getDB("config");
-var shardAdmin = st.shard0.getDB("admin");
-var coll = mongos.getCollection("foo.bar");
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
-assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
-assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
-assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+ jsTest.log("Insert a bunch of data into the low chunk of a collection," +
+ " to prevent relying on stats.");
-jsTest.log("Insert a bunch of data into the low chunk of a collection," +
- " to prevent relying on stats.");
+ var data128k = "x";
+ for (var i = 0; i < 7; i++)
+ data128k += data128k;
-var data128k = "x";
-for (var i = 0; i < 7; i++)
- data128k += data128k;
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 1024; i++) {
+ bulk.insert({_id: -(i + 1)});
+ }
+ assert.writeOK(bulk.execute());
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < 1024; i++) {
- bulk.insert({_id: -(i + 1)});
-}
-assert.writeOK(bulk.execute());
+ jsTest.log("Insert 32 docs into the high chunk of a collection");
-jsTest.log("Insert 32 docs into the high chunk of a collection");
+ bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 32; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
-bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < 32; i++) {
- bulk.insert({_id: i});
-}
-assert.writeOK(bulk.execute());
+ jsTest.log("Split off MaxKey chunk...");
-jsTest.log("Split off MaxKey chunk...");
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 32}}));
-assert(admin.runCommand({split: coll + "", middle: {_id: 32}}).ok);
+ jsTest.log("Keep splitting chunk multiple times...");
-jsTest.log("Keep splitting chunk multiple times...");
-
-st.printShardingStatus();
-
-for (var i = 0; i < 5; i++) {
- assert(admin.runCommand({split: coll + "", find: {_id: 0}}).ok);
st.printShardingStatus();
-}
-// Make sure we can't split further than 5 (2^5) times
-assert(!admin.runCommand({split: coll + "", find: {_id: 0}}).ok);
+ for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", find: {_id: 0}}));
+ st.printShardingStatus();
+ }
+
+ // Make sure we can't split further than 5 (2^5) times
+ assert.commandFailed(admin.runCommand({split: coll + "", find: {_id: 0}}));
-var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
-printjson(chunks);
+ var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
+ printjson(chunks);
-// Make sure the chunks grow by 2x (except the first)
-var nextSize = 1;
-for (var i = 0; i < chunks.size; i++) {
- assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
- if (i != 0)
- nextSize += nextSize;
-}
+ // Make sure the chunks grow by 2x (except the first)
+ var nextSize = 1;
+ for (var i = 0; i < chunks.size; i++) {
+ assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
+ if (i != 0)
+ nextSize += nextSize;
+ }
-st.stop();
+ st.stop();
+})();
diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js
index e5885dcfa41..bd603124548 100644
--- a/jstests/sharding/stale_version_write.js
+++ b/jstests/sharding/stale_version_write.js
@@ -1,37 +1,37 @@
// Tests whether a reset sharding version triggers errors
+(function() {
+ 'use strict';
-jsTest.log("Starting sharded cluster...");
+ var st = new ShardingTest({shards: 1, mongos: 2});
-var st = new ShardingTest({shards: 1, mongos: 2, verbose: 2});
+ var mongosA = st.s0;
+ var mongosB = st.s1;
-st.stopBalancer();
+ jsTest.log("Adding new collections...");
-var mongosA = st.s0;
-var mongosB = st.s1;
+ var collA = mongosA.getCollection(jsTestName() + ".coll");
+ assert.writeOK(collA.insert({hello: "world"}));
-jsTest.log("Adding new collections...");
+ var collB = mongosB.getCollection("" + collA);
+ assert.writeOK(collB.insert({hello: "world"}));
-var collA = mongosA.getCollection(jsTestName() + ".coll");
-assert.writeOK(collA.insert({hello: "world"}));
+ jsTest.log("Enabling sharding...");
-var collB = mongosB.getCollection("" + collA);
-assert.writeOK(collB.insert({hello: "world"}));
+ assert.commandWorked(mongosA.getDB("admin").adminCommand({enableSharding: "" + collA.getDB()}));
+ assert.commandWorked(
+ mongosA.getDB("admin").adminCommand({shardCollection: "" + collA, key: {_id: 1}}));
-jsTest.log("Enabling sharding...");
+ // MongoD doesn't know about the config shard version *until* MongoS tells it
+ collA.findOne();
-printjson(mongosA.getDB("admin").runCommand({enableSharding: "" + collA.getDB()}));
-printjson(mongosA.getDB("admin").runCommand({shardCollection: "" + collA, key: {_id: 1}}));
+ jsTest.log("Trigger shard version mismatch...");
-// MongoD doesn't know about the config shard version *until* MongoS tells it
-collA.findOne();
+ assert.writeOK(collB.insert({goodbye: "world"}));
-jsTest.log("Trigger shard version mismatch...");
+ print("Inserted...");
-assert.writeOK(collB.insert({goodbye: "world"}));
+ assert.eq(3, collA.find().itcount());
+ assert.eq(3, collB.find().itcount());
-print("Inserted...");
-
-assert.eq(3, collA.find().itcount());
-assert.eq(3, collB.find().itcount());
-
-st.stop();
+ st.stop();
+})();