summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/auth.js2
-rw-r--r--jstests/sharding/auth2.js37
-rw-r--r--jstests/sharding/authCommands.js37
-rw-r--r--jstests/sharding/authConnectionHook.js68
-rw-r--r--jstests/sharding/auth_add_shard.js4
-rw-r--r--jstests/sharding/auth_copydb.js10
-rw-r--r--jstests/sharding/auth_no_config_primary.js4
-rw-r--r--jstests/sharding/auth_slaveok_routing.js194
-rw-r--r--jstests/sharding/authmr.js8
-rw-r--r--jstests/sharding/authwhere.js8
-rw-r--r--jstests/sharding/cleanup_orphaned_auth.js5
-rw-r--r--jstests/sharding/localhostAuthBypass.js404
-rw-r--r--jstests/sharding/migrateBig.js4
-rw-r--r--jstests/sharding/mrShardedOutputAuth.js8
-rw-r--r--jstests/sharding/remove2.js7
-rw-r--r--jstests/sharding/sharding_rs2.js45
16 files changed, 409 insertions, 436 deletions
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index e8521a3b0d8..f85ad22c6d5 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -37,7 +37,7 @@
name: "auth",
mongos: 1,
shards: 0,
- other: {extraOptions: {"keyFile": "jstests/libs/key1"}, chunkSize: 1},
+ other: {keyFile: "jstests/libs/key1", chunkSize: 1},
});
if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
diff --git a/jstests/sharding/auth2.js b/jstests/sharding/auth2.js
index e26c58dccf1..41e18aa9c80 100644
--- a/jstests/sharding/auth2.js
+++ b/jstests/sharding/auth2.js
@@ -1,23 +1,24 @@
-var st = new ShardingTest({
- keyFile: 'jstests/libs/key1',
- shards: 2,
- chunkSize: 1,
- verbose: 2,
- other: {nopreallocj: 1, verbose: 2, useHostname: true, configOptions: {verbose: 2}}
-});
+(function() {
+ 'use strict';
-var mongos = st.s;
-var adminDB = mongos.getDB('admin');
-var db = mongos.getDB('test');
+ var st = new ShardingTest({
+ shards: 2,
+ other: {chunkSize: 1, useHostname: true, keyFile: 'jstests/libs/key1'},
+ });
-adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
+ var mongos = st.s;
+ var adminDB = mongos.getDB('admin');
+ var db = mongos.getDB('test');
-jsTestLog("Add user was successful");
+ adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
-// Test for SERVER-6549, make sure that repeatedly logging in always passes.
-for (var i = 0; i < 100; i++) {
- adminDB = new Mongo(mongos.host).getDB('admin');
- assert(adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i);
-}
+ jsTestLog("Add user was successful");
-st.stop();
+ // Test for SERVER-6549, make sure that repeatedly logging in always passes.
+ for (var i = 0; i < 100; i++) {
+ adminDB = new Mongo(mongos.host).getDB('admin');
+ assert(adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i);
+ }
+
+ st.stop();
+})();
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index deb6512a6b0..ba015c556af 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -1,15 +1,17 @@
/**
* This tests using DB commands with authentication enabled when sharded.
*/
-var doTest = function() {
+(function() {
+ 'use strict';
- var rsOpts = {oplogSize: 10, useHostname: false};
var st = new ShardingTest({
- keyFile: 'jstests/libs/key1',
shards: 2,
- chunkSize: 2,
- rs: rsOpts,
- other: {useHostname: false},
+ rs: {oplogSize: 10, useHostname: false},
+ other: {
+ keyFile: 'jstests/libs/key1',
+ useHostname: false,
+ chunkSize: 2,
+ },
});
var mongos = st.s;
@@ -35,7 +37,7 @@ var doTest = function() {
testDB.createUser({user: rwUser, pwd: password, roles: jsTest.basicUserRoles});
testDB.createUser({user: roUser, pwd: password, roles: jsTest.readOnlyUserRoles});
- authenticatedConn = new Mongo(mongos.host);
+ var authenticatedConn = new Mongo(mongos.host);
authenticatedConn.getDB('admin').auth(rwUser, password);
// Add user to shards to prevent localhost connections from having automatic full access
@@ -85,6 +87,7 @@ var doTest = function() {
var map = function() {
emit(this.i, this.j);
};
+
var reduce = function(key, values) {
var jCount = 0;
values.forEach(function(j) {
@@ -96,7 +99,7 @@ var doTest = function() {
var checkCommandSucceeded = function(db, cmdObj) {
print("Running command that should succeed: ");
printjson(cmdObj);
- resultObj = db.runCommand(cmdObj);
+ var resultObj = db.runCommand(cmdObj);
printjson(resultObj);
assert(resultObj.ok);
return resultObj;
@@ -105,7 +108,7 @@ var doTest = function() {
var checkCommandFailed = function(db, cmdObj) {
print("Running command that should fail: ");
printjson(cmdObj);
- resultObj = db.runCommand(cmdObj);
+ var resultObj = db.runCommand(cmdObj);
printjson(resultObj);
assert(!resultObj.ok);
return resultObj;
@@ -154,7 +157,7 @@ var doTest = function() {
if (hasWriteAuth) {
print("Checking write operations, should work");
testDB.foo.insert({a: 1, i: 1, j: 1});
- res = checkCommandSucceeded(
+ var res = checkCommandSucceeded(
testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
assert.eq(1, res.value.a);
assert.eq(null, res.value.b);
@@ -187,11 +190,11 @@ var doTest = function() {
{mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
checkCommandFailed(testDB, {drop: 'foo'});
checkCommandFailed(testDB, {dropDatabase: 1});
- passed = true;
+ var passed = true;
try {
// For some reason when create fails it throws an exception instead of just
// returning ok:0
- res = testDB.runCommand({create: 'baz'});
+ var res = testDB.runCommand({create: 'baz'});
if (!res.ok) {
passed = false;
}
@@ -213,7 +216,7 @@ var doTest = function() {
checkCommandSucceeded(adminDB, {isdbgrid: 1});
checkCommandSucceeded(adminDB, {ismaster: 1});
checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- chunk = configDB.chunks.findOne({shard: st.rs0.name});
+ var chunk = configDB.chunks.findOne({shard: st.rs0.name});
checkCommandSucceeded(
adminDB,
{moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true});
@@ -226,7 +229,7 @@ var doTest = function() {
checkCommandSucceeded(adminDB, {isdbgrid: 1});
checkCommandSucceeded(adminDB, {ismaster: 1});
checkCommandFailed(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- chunkKey = {i: {$minKey: 1}, j: {$minKey: 1}};
+ var chunkKey = {i: {$minKey: 1}, j: {$minKey: 1}};
checkCommandFailed(
adminDB,
{moveChunk: 'test.foo', find: chunkKey, to: st.rs1.name, _waitForDelete: true});
@@ -239,7 +242,7 @@ var doTest = function() {
checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
// Wait for shard to be completely removed
checkRemoveShard = function() {
- res = checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
+ var res = checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
return res.msg == 'removeshard completed successfully';
};
assert.soon(checkRemoveShard, "failed to remove shard");
@@ -297,6 +300,4 @@ var doTest = function() {
st.printShardingStatus();
st.stop();
-};
-
-doTest();
+})();
diff --git a/jstests/sharding/authConnectionHook.js b/jstests/sharding/authConnectionHook.js
index 516b0d34554..da9c6949d74 100644
--- a/jstests/sharding/authConnectionHook.js
+++ b/jstests/sharding/authConnectionHook.js
@@ -1,47 +1,47 @@
// Test for SERVER-8786 - if the first operation on an authenticated shard is moveChunk, it breaks
// the cluster.
-var st = new ShardingTest({
- keyFile: 'jstests/libs/key1',
- shards: 2,
- chunkSize: 1,
- verbose: 2,
- other: {nopreallocj: 1, verbose: 2, useHostname: true, configOptions: {verbose: 2}}
-});
+(function() {
+ 'use strict';
-var mongos = st.s;
-var adminDB = mongos.getDB('admin');
-var db = mongos.getDB('test');
+ var st = new ShardingTest(
+ {shards: 2, other: {keyFile: 'jstests/libs/key1', useHostname: true, chunkSize: 1}});
-adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
+ var mongos = st.s;
+ var adminDB = mongos.getDB('admin');
+ var db = mongos.getDB('test');
-adminDB.auth('admin', 'password');
+ adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
-adminDB.runCommand({enableSharding: "test"});
-st.ensurePrimaryShard('test', 'shard0001');
-adminDB.runCommand({shardCollection: "test.foo", key: {x: 1}});
+ adminDB.auth('admin', 'password');
-for (var i = 0; i < 100; i++) {
- db.foo.insert({x: i});
-}
+ adminDB.runCommand({enableSharding: "test"});
+ st.ensurePrimaryShard('test', 'shard0001');
+ adminDB.runCommand({shardCollection: "test.foo", key: {x: 1}});
-adminDB.runCommand({split: "test.foo", middle: {x: 50}});
-var curShard = st.getShard("test.foo", {x: 75});
-var otherShard = st.getOther(curShard).name;
-adminDB.runCommand({moveChunk: "test.foo", find: {x: 25}, to: otherShard, _waitForDelete: true});
+ for (var i = 0; i < 100; i++) {
+ db.foo.insert({x: i});
+ }
-st.printShardingStatus();
+ adminDB.runCommand({split: "test.foo", middle: {x: 50}});
+ var curShard = st.getShard("test.foo", {x: 75});
+ var otherShard = st.getOther(curShard).name;
+ adminDB.runCommand(
+ {moveChunk: "test.foo", find: {x: 25}, to: otherShard, _waitForDelete: true});
-MongoRunner.stopMongod(st.shard0);
-st.shard0 = MongoRunner.runMongod({restart: st.shard0});
+ st.printShardingStatus();
-// May fail the first couple times due to socket exceptions
-assert.soon(function() {
- var res = adminDB.runCommand({moveChunk: "test.foo", find: {x: 75}, to: otherShard});
- printjson(res);
- return res.ok;
-});
+ MongoRunner.stopMongod(st.shard0);
+ st.shard0 = MongoRunner.runMongod({restart: st.shard0});
-printjson(db.foo.findOne({x: 25}));
-printjson(db.foo.findOne({x: 75}));
+ // May fail the first couple times due to socket exceptions
+ assert.soon(function() {
+ var res = adminDB.runCommand({moveChunk: "test.foo", find: {x: 75}, to: otherShard});
+ printjson(res);
+ return res.ok;
+ });
-st.stop();
+ printjson(db.foo.findOne({x: 25}));
+ printjson(db.foo.findOne({x: 75}));
+
+ st.stop();
+})();
diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js
index b24afd0172c..f1cf6eccea4 100644
--- a/jstests/sharding/auth_add_shard.js
+++ b/jstests/sharding/auth_add_shard.js
@@ -14,8 +14,7 @@
var adminUser = {db: "admin", username: "foo", password: "bar"};
// set up a 2 shard cluster with keyfile
- var st = new ShardingTest(
- {name: "auth_add_shard1", shards: 1, mongos: 1, keyFile: "jstests/libs/key1"});
+ var st = new ShardingTest({shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1'}});
var mongos = st.s0;
var admin = mongos.getDB("admin");
@@ -97,5 +96,4 @@
MongoRunner.stopMongod(conn);
st.stop();
-
})();
diff --git a/jstests/sharding/auth_copydb.js b/jstests/sharding/auth_copydb.js
index 8c73214019e..4c215f666f7 100644
--- a/jstests/sharding/auth_copydb.js
+++ b/jstests/sharding/auth_copydb.js
@@ -1,7 +1,8 @@
// Tests the copydb command on mongos with auth
-var runTest = function() {
+(function() {
+ 'use strict';
- var st = new ShardingTest({shards: 1, mongos: 1, keyFile: "jstests/libs/key1"});
+ var st = new ShardingTest({shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1'}});
var mongos = st.s0;
var destAdminDB = mongos.getDB('admin');
var destTestDB = mongos.getDB('test');
@@ -36,7 +37,4 @@ var runTest = function() {
assert.eq(1, destTestDB.foo.findOne().a);
st.stop();
-
-};
-
-runTest();
+})();
diff --git a/jstests/sharding/auth_no_config_primary.js b/jstests/sharding/auth_no_config_primary.js
index 3bb1ea1cf4c..0fdd2f31a89 100644
--- a/jstests/sharding/auth_no_config_primary.js
+++ b/jstests/sharding/auth_no_config_primary.js
@@ -6,9 +6,9 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
+ 'use strict';
- var st = new ShardingTest({shards: 1, keyFile: 'jstests/libs/key1'});
+ var st = new ShardingTest({shards: 1, other: {keyFile: 'jstests/libs/key1'}});
st.s.getDB('admin').createUser({user: 'root', pwd: 'pass', roles: ['root']});
st.s.getDB('admin').auth('root', 'pass');
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index c950730c799..42f34955d5a 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -9,101 +9,99 @@
* config and will be unable to elect a primary.
* @tags: [requires_persistence]
*/
-
-/**
- * Checks if a query to the given collection will be routed to the secondary.
- *
- * @param {DBCollection} coll
- * @param {Object} query
- *
- * @return {boolean} true if query was routed to a secondary node.
- */
-function doesRouteToSec(coll, query) {
- var explain = coll.find(query).explain();
- assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
- var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
- var conn = new Mongo(serverInfo.host + ":" + serverInfo.port.toString());
- var cmdRes = conn.getDB('admin').runCommand({isMaster: 1});
-
- jsTest.log('isMaster: ' + tojson(cmdRes));
-
- return cmdRes.secondary;
-}
-
-var rsOpts = {oplogSize: 50};
-var st = new ShardingTest(
- {keyFile: 'jstests/libs/key1', shards: 1, rs: rsOpts, other: {nopreallocj: 1}});
-
-var mongos = st.s;
-var replTest = st.rs0;
-var testDB = mongos.getDB('AAAAA');
-var coll = testDB.user;
-var nodeCount = replTest.nodes.length;
-
-/* Add an admin user to the replica member to simulate connecting from
- * remote location. This is because mongod allows unautheticated
- * connections to access the server from localhost connections if there
- * is no admin user.
- */
-var adminDB = mongos.getDB('admin');
-adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
-adminDB.auth('user', 'password');
-var priAdminDB = replTest.getPrimary().getDB('admin');
-priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 30000});
-
-coll.drop();
-coll.setSlaveOk(true);
-
-/* Secondaries should be up here, but they can still be in RECOVERY
- * state, which will make the ReplicaSetMonitor mark them as
- * ok = false and not eligible for slaveOk queries.
- */
-ReplSetTest.awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
-
-var bulk = coll.initializeUnorderedBulkOp();
-for (var x = 0; x < 20; x++) {
- bulk.insert({v: x, k: 10});
-}
-assert.writeOK(bulk.execute({w: nodeCount}));
-
-/* Although mongos never caches query results, try to do a different query
- * everytime just to be sure.
- */
-var vToFind = 0;
-
-jsTest.log('First query to SEC');
-assert(doesRouteToSec(coll, {v: vToFind++}));
-
-var SIG_TERM = 15;
-replTest.stopSet(SIG_TERM, true, {auth: {user: 'user', pwd: 'password'}});
-
-for (var n = 0; n < nodeCount; n++) {
- replTest.restart(n, rsOpts);
-}
-
-replTest.awaitSecondaryNodes();
-
-coll.setSlaveOk(true);
-
-/* replSetMonitor does not refresh the nodes information when getting secondaries.
- * A node that is previously labeled as secondary can now be a primary, so we
- * wait for the replSetMonitorWatcher thread to refresh the nodes information.
- */
-ReplSetTest.awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
-//
-// We also need to wait for the primary, it's possible that the mongos may think a node is a
-// secondary but it actually changed to a primary before we send our final query.
-//
-ReplSetTest.awaitRSClientHosts(mongos, replTest.getPrimary(), {ok: true, ismaster: true});
-
-// Recheck if we can still query secondaries after refreshing connections.
-jsTest.log('Final query to SEC');
-assert(doesRouteToSec(coll, {v: vToFind++}));
-
-// Cleanup auth so Windows will be able to shutdown gracefully
-priAdminDB = replTest.getPrimary().getDB('admin');
-priAdminDB.auth('user', 'password');
-priAdminDB.dropUser('user');
-
-st.stop();
+(function() {
+ 'use strict';
+
+ /**
+ * Checks if a query to the given collection will be routed to the secondary. Returns true if
+ * query was routed to a secondary node.
+ */
+ function doesRouteToSec(coll, query) {
+ var explain = coll.find(query).explain();
+ assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
+ var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
+ var conn = new Mongo(serverInfo.host + ":" + serverInfo.port.toString());
+ var cmdRes = conn.getDB('admin').runCommand({isMaster: 1});
+
+ jsTest.log('isMaster: ' + tojson(cmdRes));
+
+ return cmdRes.secondary;
+ }
+
+ var rsOpts = {oplogSize: 50};
+ var st = new ShardingTest({shards: 1, rs: rsOpts, other: {keyFile: 'jstests/libs/key1'}});
+
+ var mongos = st.s;
+ var replTest = st.rs0;
+ var testDB = mongos.getDB('AAAAA');
+ var coll = testDB.user;
+ var nodeCount = replTest.nodes.length;
+
+ /* Add an admin user to the replica member to simulate connecting from
+ * remote location. This is because mongod allows unautheticated
+ * connections to access the server from localhost connections if there
+ * is no admin user.
+ */
+ var adminDB = mongos.getDB('admin');
+ adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
+ adminDB.auth('user', 'password');
+ var priAdminDB = replTest.getPrimary().getDB('admin');
+ priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 30000});
+
+ coll.drop();
+ coll.setSlaveOk(true);
+
+ /* Secondaries should be up here, but they can still be in RECOVERY
+ * state, which will make the ReplicaSetMonitor mark them as
+ * ok = false and not eligible for slaveOk queries.
+ */
+ ReplSetTest.awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
+
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var x = 0; x < 20; x++) {
+ bulk.insert({v: x, k: 10});
+ }
+ assert.writeOK(bulk.execute({w: nodeCount}));
+
+ /* Although mongos never caches query results, try to do a different query
+ * everytime just to be sure.
+ */
+ var vToFind = 0;
+
+ jsTest.log('First query to SEC');
+ assert(doesRouteToSec(coll, {v: vToFind++}));
+
+ var SIG_TERM = 15;
+ replTest.stopSet(SIG_TERM, true, {auth: {user: 'user', pwd: 'password'}});
+
+ for (var n = 0; n < nodeCount; n++) {
+ replTest.restart(n, rsOpts);
+ }
+
+ replTest.awaitSecondaryNodes();
+
+ coll.setSlaveOk(true);
+
+ /* replSetMonitor does not refresh the nodes information when getting secondaries.
+ * A node that is previously labeled as secondary can now be a primary, so we
+ * wait for the replSetMonitorWatcher thread to refresh the nodes information.
+ */
+ ReplSetTest.awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
+ //
+ // We also need to wait for the primary, it's possible that the mongos may think a node is a
+ // secondary but it actually changed to a primary before we send our final query.
+ //
+ ReplSetTest.awaitRSClientHosts(mongos, replTest.getPrimary(), {ok: true, ismaster: true});
+
+ // Recheck if we can still query secondaries after refreshing connections.
+ jsTest.log('Final query to SEC');
+ assert(doesRouteToSec(coll, {v: vToFind++}));
+
+ // Cleanup auth so Windows will be able to shutdown gracefully
+ priAdminDB = replTest.getPrimary().getDB('admin');
+ priAdminDB.auth('user', 'password');
+ priAdminDB.dropUser('user');
+
+ st.stop();
+})();
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 67f87fc59b4..fb5b5a94486 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -28,12 +28,8 @@
assert.writeOK(collection.insert(obj));
}
- var cluster = new ShardingTest({
- name: "authmr",
- shards: 1,
- mongos: 1,
- other: {extraOptions: {keyFile: "jstests/libs/key1"}}
- });
+ var cluster = new ShardingTest(
+ {name: "authmr", shards: 1, mongos: 1, other: {keyFile: "jstests/libs/key1"}});
// Set up the test data.
(function() {
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index f9af413f470..7443a91005a 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -28,12 +28,8 @@
assert.writeOK(collection.insert(obj));
}
- var cluster = new ShardingTest({
- name: "authwhere",
- shards: 1,
- mongos: 1,
- other: {extraOptions: {keyFile: "jstests/libs/key1"}}
- });
+ var cluster = new ShardingTest(
+ {name: "authwhere", shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1'}});
// Set up the test data.
(function() {
diff --git a/jstests/sharding/cleanup_orphaned_auth.js b/jstests/sharding/cleanup_orphaned_auth.js
index 0b50742ad70..834ad613a38 100644
--- a/jstests/sharding/cleanup_orphaned_auth.js
+++ b/jstests/sharding/cleanup_orphaned_auth.js
@@ -3,7 +3,7 @@
//
(function() {
- "use strict";
+ 'use strict';
function assertUnauthorized(res, msg) {
if (assert._debug && msg)
@@ -20,7 +20,7 @@
}
var st =
- new ShardingTest({auth: true, keyFile: 'jstests/libs/key1', other: {useHostname: false}});
+ new ShardingTest({auth: true, other: {keyFile: 'jstests/libs/key1', useHostname: false}});
var shardAdmin = st.shard0.getDB('admin');
shardAdmin.createUser(
@@ -52,5 +52,4 @@
assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
st.stop();
-
})();
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index b36972da685..26e2c677dbf 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -2,66 +2,145 @@
//
// This test is to ensure that localhost authentication works correctly against a sharded
// cluster whether they are hosted with "localhost" or a hostname.
-
-var replSetName = "replsets_server-6591";
-var keyfile = "jstests/libs/key1";
-var numShards = 2;
-var username = "foo";
-var password = "bar";
-
-var createUser = function(mongo) {
- print("============ adding a user.");
- mongo.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
-};
-
-var addUsersToEachShard = function(st) {
- for (i = 0; i < numShards; i++) {
- print("============ adding a user to shard " + i);
- var d = st["shard" + i];
- d.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
- }
-};
-
-var addShard = function(st, shouldPass) {
- var m = MongoRunner.runMongod({auth: "", keyFile: keyfile, useHostname: false});
- var res = st.getDB("admin").runCommand({addShard: m.host});
- if (shouldPass) {
- assert.commandWorked(res, "Add shard");
- } else {
- assert.commandFailed(res, "Add shard");
- }
- return m.port;
-};
-
-var findEmptyShard = function(st, ns) {
- var counts = st.chunkCounts("foo");
-
- for (shard in counts) {
- if (counts[shard] == 0) {
- return shard;
+(function() {
+ 'use strict';
+
+ var replSetName = "replsets_server-6591";
+ var keyfile = "jstests/libs/key1";
+ var numShards = 2;
+ var username = "foo";
+ var password = "bar";
+
+ var createUser = function(mongo) {
+ print("============ adding a user.");
+ mongo.getDB("admin").createUser(
+ {user: username, pwd: password, roles: jsTest.adminUserRoles});
+ };
+
+ var addUsersToEachShard = function(st) {
+ for (var i = 0; i < numShards; i++) {
+ print("============ adding a user to shard " + i);
+ var d = st["shard" + i];
+ d.getDB("admin").createUser(
+ {user: username, pwd: password, roles: jsTest.adminUserRoles});
+ }
+ };
+
+ var addShard = function(st, shouldPass) {
+ var m = MongoRunner.runMongod({auth: "", keyFile: keyfile, useHostname: false});
+ var res = st.getDB("admin").runCommand({addShard: m.host});
+ if (shouldPass) {
+ assert.commandWorked(res, "Add shard");
+ } else {
+ assert.commandFailed(res, "Add shard");
}
- }
+ return m.port;
+ };
- return null;
-};
+ var findEmptyShard = function(st, ns) {
+ var counts = st.chunkCounts("foo");
-var assertCannotRunCommands = function(mongo, st) {
- print("============ ensuring that commands cannot be run.");
+ for (var shard in counts) {
+ if (counts[shard] == 0) {
+ return shard;
+ }
+ }
- // CRUD
- var test = mongo.getDB("test");
- assert.throws(function() {
+ return null;
+ };
+
+ var assertCannotRunCommands = function(mongo, st) {
+ print("============ ensuring that commands cannot be run.");
+
+ // CRUD
+ var test = mongo.getDB("test");
+ assert.throws(function() {
+ test.system.users.findOne();
+ });
+ assert.writeError(test.foo.save({_id: 0}));
+ assert.throws(function() {
+ test.foo.findOne({_id: 0});
+ });
+ assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeError(test.foo.remove({_id: 0}));
+
+ // Multi-shard
+ assert.throws(function() {
+ test.foo.mapReduce(
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
+ });
+
+ // Config
+ assert.throws(function() {
+ mongo.getDB("config").shards.findOne();
+ });
+
+ var authorizeErrorCode = 13;
+ var res = mongo.getDB("admin").runCommand({
+ moveChunk: "test.foo",
+ find: {_id: 1},
+ to: "shard0000" // Arbitrary shard.
+ });
+ assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk");
+ assert.commandFailedWithCode(mongo.getDB("test").copyDatabase("admin", "admin2"),
+ authorizeErrorCode,
+ "copyDatabase");
+ // Create collection
+ assert.commandFailedWithCode(
+ mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
+ authorizeErrorCode,
+ "createCollection");
+ // Set/Get system parameters
+ var params = [
+ {param: "journalCommitInterval", val: 200},
+ {param: "logLevel", val: 2},
+ {param: "logUserIds", val: 1},
+ {param: "notablescan", val: 1},
+ {param: "quiet", val: 1},
+ {param: "replApplyBatchSize", val: 10},
+ {param: "replIndexPrefetch", val: "none"},
+ {param: "syncdelay", val: 30},
+ {param: "traceExceptions", val: true},
+ {param: "sslMode", val: "preferSSL"},
+ {param: "clusterAuthMode", val: "sendX509"},
+ {param: "userCacheInvalidationIntervalSecs", val: 300}
+ ];
+ params.forEach(function(p) {
+ var cmd = {setParameter: 1};
+ cmd[p.param] = p.val;
+ assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
+ authorizeErrorCode,
+ "setParameter: " + p.param);
+ });
+ params.forEach(function(p) {
+ var cmd = {getParameter: 1};
+ cmd[p.param] = 1;
+ assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
+ authorizeErrorCode,
+ "getParameter: " + p.param);
+ });
+ };
+
+ var assertCanRunCommands = function(mongo, st) {
+ print("============ ensuring that commands can be run.");
+
+ // CRUD
+ var test = mongo.getDB("test");
+
+ // this will throw if it fails
test.system.users.findOne();
- });
- assert.writeError(test.foo.save({_id: 0}));
- assert.throws(function() {
- test.foo.findOne({_id: 0});
- });
- assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeError(test.foo.remove({_id: 0}));
- // Multi-shard
- assert.throws(function() {
+ assert.writeOK(test.foo.save({_id: 0}));
+ assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeOK(test.foo.remove({_id: 0}));
+
+ // Multi-shard
test.foo.mapReduce(
function() {
emit(1, 1);
@@ -70,153 +149,80 @@ var assertCannotRunCommands = function(mongo, st) {
return Array.sum(count);
},
{out: "other"});
- });
- // Config
- assert.throws(function() {
+ // Config
+ // this will throw if it fails
mongo.getDB("config").shards.findOne();
- });
- var authorizeErrorCode = 13;
- var res = mongo.getDB("admin").runCommand({
- moveChunk: "test.foo",
- find: {_id: 1},
- to: "shard0000" // Arbitrary shard.
- });
- assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk");
- assert.commandFailedWithCode(
- mongo.getDB("test").copyDatabase("admin", "admin2"), authorizeErrorCode, "copyDatabase");
- // Create collection
- assert.commandFailedWithCode(
- mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
- authorizeErrorCode,
- "createCollection");
- // Set/Get system parameters
- var params = [
- {param: "journalCommitInterval", val: 200},
- {param: "logLevel", val: 2},
- {param: "logUserIds", val: 1},
- {param: "notablescan", val: 1},
- {param: "quiet", val: 1},
- {param: "replApplyBatchSize", val: 10},
- {param: "replIndexPrefetch", val: "none"},
- {param: "syncdelay", val: 30},
- {param: "traceExceptions", val: true},
- {param: "sslMode", val: "preferSSL"},
- {param: "clusterAuthMode", val: "sendX509"},
- {param: "userCacheInvalidationIntervalSecs", val: 300}
- ];
- params.forEach(function(p) {
- var cmd = {setParameter: 1};
- cmd[p.param] = p.val;
- assert.commandFailedWithCode(
- mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
- });
- params.forEach(function(p) {
- var cmd = {getParameter: 1};
- cmd[p.param] = 1;
- assert.commandFailedWithCode(
- mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
- });
-};
-
-var assertCanRunCommands = function(mongo, st) {
- print("============ ensuring that commands can be run.");
-
- // CRUD
- var test = mongo.getDB("test");
-
- // this will throw if it fails
- test.system.users.findOne();
-
- assert.writeOK(test.foo.save({_id: 0}));
- assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeOK(test.foo.remove({_id: 0}));
-
- // Multi-shard
- test.foo.mapReduce(
- function() {
- emit(1, 1);
- },
- function(id, count) {
- return Array.sum(count);
- },
- {out: "other"});
-
- // Config
- // this will throw if it fails
- mongo.getDB("config").shards.findOne();
-
- to = findEmptyShard(st, "test.foo");
- var res = mongo.getDB("admin").runCommand({moveChunk: "test.foo", find: {_id: 1}, to: to});
- assert.commandWorked(res);
-};
-
-var authenticate = function(mongo) {
- print("============ authenticating user.");
- mongo.getDB("admin").auth(username, password);
-};
-
-var setupSharding = function(shardingTest) {
- var mongo = shardingTest.s;
-
- print("============ enabling sharding on test.foo.");
- mongo.getDB("admin").runCommand({enableSharding: "test"});
- shardingTest.ensurePrimaryShard('test', 'shard0001');
- mongo.getDB("admin").runCommand({shardCollection: "test.foo", key: {_id: 1}});
-
- var test = mongo.getDB("test");
- for (i = 1; i < 20; i++) {
- test.foo.insert({_id: i});
- }
-};
-
-var start = function() {
- return new ShardingTest({
- auth: "",
- keyFile: keyfile,
- shards: numShards,
- chunkSize: 1,
- other: {
- nopreallocj: 1,
- useHostname: false // Must use localhost to take advantage of the localhost auth bypass
+ var to = findEmptyShard(st, "test.foo");
+ var res = mongo.getDB("admin").runCommand({moveChunk: "test.foo", find: {_id: 1}, to: to});
+ assert.commandWorked(res);
+ };
+
+ var authenticate = function(mongo) {
+ print("============ authenticating user.");
+ mongo.getDB("admin").auth(username, password);
+ };
+
+ var setupSharding = function(shardingTest) {
+ var mongo = shardingTest.s;
+
+ print("============ enabling sharding on test.foo.");
+ mongo.getDB("admin").runCommand({enableSharding: "test"});
+ shardingTest.ensurePrimaryShard('test', 'shard0001');
+ mongo.getDB("admin").runCommand({shardCollection: "test.foo", key: {_id: 1}});
+
+ var test = mongo.getDB("test");
+ for (var i = 1; i < 20; i++) {
+ test.foo.insert({_id: i});
}
- });
-};
-
-var shutdown = function(st) {
- print("============ shutting down.");
-
- // SERVER-8445
- // Unlike MongoRunner.stopMongod and ReplSetTest.stopSet,
- // ShardingTest.stop does not have a way to provide auth
- // information. Therefore, we'll do this manually for now.
-
- for (i = 0; i < st._mongos.length; i++) {
- var port = st["s" + i].port;
- MongoRunner.stopMongos(port,
- /*signal*/ false,
- {auth: {user: username, pwd: password}});
- }
-
- for (i = 0; i < st._connections.length; i++) {
- var port = st["shard" + i].port;
- MongoRunner.stopMongod(port,
- /*signal*/ false,
- {auth: {user: username, pwd: password}});
- }
-
- for (i = 0; i < st._configServers.length; i++) {
- var c = st["config" + i].port;
- MongoRunner.stopMongod(port,
- /*signal*/ false,
- {auth: {user: username, pwd: password}});
- }
-
- st.stop();
-};
-
-var runTest = function() {
+ };
+
+ var start = function() {
+ return new ShardingTest({
+ auth: "",
+ shards: numShards,
+ other: {
+ keyFile: keyfile,
+ chunkSize: 1,
+ useHostname:
+ false // Must use localhost to take advantage of the localhost auth bypass
+ }
+ });
+ };
+
+ var shutdown = function(st) {
+ print("============ shutting down.");
+
+ // SERVER-8445
+ // Unlike MongoRunner.stopMongod and ReplSetTest.stopSet,
+ // ShardingTest.stop does not have a way to provide auth
+ // information. Therefore, we'll do this manually for now.
+
+ for (var i = 0; i < st._mongos.length; i++) {
+ var port = st["s" + i].port;
+ MongoRunner.stopMongos(port,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
+
+ for (var i = 0; i < st._connections.length; i++) {
+ var port = st["shard" + i].port;
+ MongoRunner.stopMongod(port,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
+
+ for (var i = 0; i < st._configServers.length; i++) {
+ var c = st["config" + i].port;
+ MongoRunner.stopMongod(port,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
+
+ st.stop();
+ };
+
print("=====================");
print("starting shards");
print("=====================");
@@ -259,6 +265,4 @@ var runTest = function() {
extraShards.forEach(function(sh) {
MongoRunner.stopMongod(sh);
});
-};
-
-runTest();
+})();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 6166682bd83..68f8b20b2c7 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -68,10 +68,6 @@
return x < 2;
}, "no balance happened", 8 * 60 * 1000, 2000);
- assert.soon(function() {
- return !s.isAnyBalanceInFlight();
- });
-
assert.eq(coll.count(), coll.find().itcount());
s.stop();
diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js
index acbb01f6794..93164e5e128 100644
--- a/jstests/sharding/mrShardedOutputAuth.js
+++ b/jstests/sharding/mrShardedOutputAuth.js
@@ -36,12 +36,8 @@
assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded");
}
- var st = new ShardingTest({
- name: "mrShardedOutputAuth",
- shards: 1,
- mongos: 1,
- other: {extraOptions: {"keyFile": "jstests/libs/key1"}}
- });
+ var st = new ShardingTest(
+ {name: "mrShardedOutputAuth", shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1'}});
// Setup the users to the input, output and admin databases
var mongos = st.s;
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index b8c8d2f1b9e..4e1a1578a3b 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -20,13 +20,6 @@ removeShard = function(st, replTest) {
};
assert.soon(checkRemoveShard, "failed to remove shard", 5 * 60000);
- // Need to wait for migration to be over... only works for inline deletes
- checkNSLock = function() {
- printjson(st.s.getDB("config").locks.find().toArray());
- return !st.isAnyBalanceInFlight();
- };
- assert.soon(checkNSLock, "migrations did not end?");
-
sleep(2000);
var directdb = replTest.getPrimary().getDB("admin");
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index ed15592a9b2..58129d9fe46 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -36,8 +36,8 @@
var db = s.getDB("test");
var t = db.foo;
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', 'test-rs0');
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+ s.ensurePrimaryShard('test', s.shard0.shardName);
// -------------------------------------------------------------------------------------------
// ---------- test that config server updates when replica set config changes ----------------
@@ -49,19 +49,16 @@
s.config.databases.find().forEach(printjson);
s.config.shards.find().forEach(printjson);
- var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
-
function countNodes() {
- var x = s.config.shards.findOne({_id: dbPrimaryShardId});
- return x.host.split(",").length;
+ return s.config.shards.findOne({_id: s.shard0.shardName}).host.split(",").length;
}
assert.eq(2, countNodes(), "A1");
- var rs = s.getRSEntry(dbPrimaryShardId);
- rs.test.add();
+ var rs = s.rs0;
+ rs.add();
try {
- rs.test.reInitiate();
+ rs.reInitiate();
} catch (e) {
// this os ok as rs's may close connections on a change of master
print(e);
@@ -69,7 +66,7 @@
assert.soon(function() {
try {
- printjson(rs.test.getPrimary().getDB("admin").runCommand("isMaster"));
+ printjson(rs.getPrimary().getDB("admin").runCommand("isMaster"));
s.config.shards.find().forEach(printjsononeline);
return countNodes() == 3;
} catch (e) {
@@ -87,12 +84,12 @@
jsTest.log(
"Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
- rs.test.awaitReplication();
+ rs.awaitReplication();
// Make sure we wait for secondaries here - otherwise a secondary could come online later and be
// used for the
// count command before being fully replicated
jsTest.log("Awaiting secondary status of all nodes");
- rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
+ rs.waitForState(rs.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
// -------------------------------------------------------------------------------------------
// ---------- test routing to slaves ----------------
@@ -103,7 +100,7 @@
var m = new Mongo(s.s.name);
var ts = m.getDB("test").foo;
- var before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ var before = rs.getPrimary().adminCommand("serverStatus").opcounters;
for (var i = 0; i < 10; i++) {
assert.eq(17, ts.findOne().x, "B1");
@@ -115,7 +112,7 @@
assert.eq(17, ts.findOne().x, "B2");
}
- var after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ var after = rs.getPrimary().adminCommand("serverStatus").opcounters;
printjson(before);
printjson(after);
@@ -136,7 +133,7 @@
// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
// replication for this and future tests to pass
- rs.test.awaitReplication();
+ rs.awaitReplication();
assert.eq(100, ts.count(), "B4");
assert.eq(100, ts.find().itcount(), "B5");
@@ -161,7 +158,7 @@
s.printShardingStatus();
- var other = s.config.shards.findOne({_id: {$ne: dbPrimaryShardId}});
+ var other = s.config.shards.findOne({_id: {$ne: s.shard0.shardName}});
assert.commandWorked(s.getDB('admin').runCommand({
moveChunk: "test.foo",
find: {x: 10},
@@ -172,14 +169,14 @@
}));
assert.eq(100, t.count(), "C3");
- assert.eq(50, rs.test.getPrimary().getDB("test").foo.count(), "C4");
+ assert.eq(50, rs.getPrimary().getDB("test").foo.count(), "C4");
// by non-shard key
m = new Mongo(s.s.name);
ts = m.getDB("test").foo;
- before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ before = rs.getPrimary().adminCommand("serverStatus").opcounters;
for (var i = 0; i < 10; i++) {
assert.eq(17, ts.findOne({_id: 5}).x, "D1");
@@ -190,7 +187,7 @@
assert.eq(17, ts.findOne({_id: 5}).x, "D2");
}
- after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ after = rs.getPrimary().adminCommand("serverStatus").opcounters;
assert.lte(before.query + 10, after.query, "D3");
@@ -203,7 +200,7 @@
ts = m.getDB("test").foo;
- before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ before = rs.getPrimary().adminCommand("serverStatus").opcounters;
for (var i = 0; i < 10; i++) {
assert.eq(57, ts.findOne({x: 57}).x, "E1");
@@ -214,7 +211,7 @@
assert.eq(57, ts.findOne({x: 57}).x, "E2");
}
- after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ after = rs.getPrimary().adminCommand("serverStatus").opcounters;
assert.lte(before.query + 10, after.query, "E3");
@@ -223,9 +220,10 @@
printjson(ts.find().batchSize(5).explain());
// fsyncLock the secondaries
- rs.test.getSecondaries().forEach(function(secondary) {
+ rs.getSecondaries().forEach(function(secondary) {
assert.commandWorked(secondary.getDB("test").fsyncLock());
});
+
// Modify data only on the primary replica of the primary shard.
// { x: 60 } goes to the shard of "rs", which is the primary shard.
assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
@@ -233,7 +231,7 @@
// But we can guarantee not to read from primary.
assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
// Unlock the secondaries
- rs.test.getSecondaries().forEach(function(secondary) {
+ rs.getSecondaries().forEach(function(secondary) {
secondary.getDB("test").fsyncUnlock();
});
// Clean up the data
@@ -255,5 +253,4 @@
printjson(db.adminCommand("getShardMap"));
s.stop();
-
})();