summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorMarcos José Grillo Ramirez <marcos.grillo@mongodb.com>2021-07-07 13:36:09 +0200
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-07-07 12:17:09 +0000
commit4a20641fe71e2a9a559ca25c9377d537f6ad182a (patch)
treeab460d7f701b14da62fce37b02fade80be6055c7 /jstests
parent8f42e5e827aa2a39ab7fe8345b124d5c115f840e (diff)
downloadmongo-4a20641fe71e2a9a559ca25c9377d537f6ad182a.tar.gz
Revert "SERVER-32531 Disalow standalone nodes as shards if no queryableBackupMode is enabled"
This reverts commit 92a0de6ce7e8dfb312d93f499ca5e3c0f6caa945.
Diffstat (limited to 'jstests')
-rw-r--r--jstests/libs/command_line/test_parsed_options.js12
-rw-r--r--jstests/libs/config_files/set_shardingrole_shardsvr.json8
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js8
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js25
-rw-r--r--jstests/noPassthrough/require_api_version.js14
-rw-r--r--jstests/sharding/addshard1.js24
-rw-r--r--jstests/sharding/addshard2.js61
-rw-r--r--jstests/sharding/addshard_idempotent.js42
-rw-r--r--jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js15
-rw-r--r--jstests/sharding/implicit_db_creation.js21
-rw-r--r--jstests/sharding/listshards.js33
-rw-r--r--jstests/sharding/localhostAuthBypass.js47
-rw-r--r--jstests/sharding/remove1.js8
-rw-r--r--jstests/sharding/shard_aware_init.js6
-rw-r--r--jstests/sharding/shard_aware_on_add_shard.js44
-rw-r--r--jstests/sharding/sharding_options.js20
16 files changed, 217 insertions, 171 deletions
diff --git a/jstests/libs/command_line/test_parsed_options.js b/jstests/libs/command_line/test_parsed_options.js
index de11488e88a..50e2733c520 100644
--- a/jstests/libs/command_line/test_parsed_options.js
+++ b/jstests/libs/command_line/test_parsed_options.js
@@ -204,15 +204,3 @@ function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
// Make sure the options are equal to what we expect
assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
}
-
-// Tests that the passed configuration will not run a new mongod instances. Mainly used to test
-// conflicting parameters at startup.
-//
-// Arguments:
-// mongoRunnerConfig - Configuration object to pass to the mongo runner
-// Example:
-//
-// testGetCmdLineOptsMongodFailed({ shardsvr : "" });
-function testGetCmdLineOptsMongodFailed(mongoRunnerConfig) {
- assert.throws(() => MongoRunner.runMongod(mongoRunnerConfig));
-}
diff --git a/jstests/libs/config_files/set_shardingrole_shardsvr.json b/jstests/libs/config_files/set_shardingrole_shardsvr.json
deleted file mode 100644
index c605dce50cc..00000000000
--- a/jstests/libs/config_files/set_shardingrole_shardsvr.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "sharding" : {
- "clusterRole" : "shardsvr"
- },
- "replication" : {
- "replSetName" : "dummy"
- }
-} \ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js b/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
index 27085eea249..b1500598680 100644
--- a/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
@@ -221,12 +221,12 @@ let standaloneTest = function(nodeOptions, downgradeVersion) {
//
// Replica set tests.
//
-let replicaSetTest = function(nodeOptions, downgradeVersion, numNodes = 3) {
+let replicaSetTest = function(nodeOptions, downgradeVersion) {
jsTestLog("Running replica set test with 'downgradeVersion': " + downgradeVersion);
const downgradeFCV = binVersionToFCV(downgradeVersion);
// New latest binary version replica set.
jsTest.log("Starting a latest binVersion ReplSetTest");
- let rst = new ReplSetTest({nodes: numNodes, nodeOptions: nodeOptions});
+ let rst = new ReplSetTest({nodes: 3, nodeOptions: nodeOptions});
rst.startSet();
rst.initiate();
let primaryAdminDB = rst.getPrimary().getDB("admin");
@@ -362,7 +362,9 @@ standaloneTest({}, 'last-lts');
replicaSetTest({}, 'last-continuous');
replicaSetTest({}, 'last-lts');
-// Do tests for replica sets started with --shardsvr.
+// Do tests for standalones and replica sets started with --shardsvr.
+standaloneTest({shardsvr: ""}, 'last-continuous');
+standaloneTest({shardsvr: ""}, 'last-lts');
replicaSetTest({shardsvr: ""}, 'last-continuous');
replicaSetTest({shardsvr: ""}, 'last-lts');
diff --git a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
index 3b52a77005f..a96a607f953 100644
--- a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
@@ -237,6 +237,15 @@ function runStandaloneTest(downgradeVersion) {
adminDB = conn.getDB("admin");
checkFCV(adminDB, downgradeFCV);
MongoRunner.stopMongod(conn);
+
+ // A 'latest' binary mongod started with --shardsvr and clean data files defaults to
+ // lastLTSFCV.
+ conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, shardsvr: ""});
+ assert.neq(
+ null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
+ adminDB = conn.getDB("admin");
+ checkFCV(adminDB, lastLTSFCV);
+ MongoRunner.stopMongod(conn);
}
function runReplicaSetTest(downgradeVersion) {
@@ -439,27 +448,11 @@ function runShardingTest(downgradeVersion) {
const unsupportedOldFCV = (parseFloat(downgradeFCV) - 1).toFixed(1);
const unsupportedFutureFCV = (parseFloat(latestFCV) + 0.1).toFixed(1);
- let singleNodeShard;
- let conn;
let st;
let mongosAdminDB;
let configPrimaryAdminDB;
let shardPrimaryAdminDB;
- // A 'latest' binary single node replica set and clean data files defaults to lastLTSFCV.
- singleNodeShard =
- new ReplSetTest({dbpath: dbpath, binVersion: latest, noCleanData: true, nodes: 1});
- singleNodeShard.startSet({shardsvr: ""});
- singleNodeShard.initiate();
- conn = singleNodeShard.getPrimary();
- assert.neq(
- null,
- conn,
- "Single node replSet was unable to start up with version=" + latest + " and no data files");
- shardPrimaryAdminDB = conn.getDB("admin");
- checkFCV(shardPrimaryAdminDB, lastLTSFCV);
- singleNodeShard.stopSet();
-
// A 'latest' binary cluster started with clean data files will set FCV to 'latestFCV'.
st = new ShardingTest({
shards: {rs0: {nodes: [{binVersion: latest}, {binVersion: latest}]}},
diff --git a/jstests/noPassthrough/require_api_version.js b/jstests/noPassthrough/require_api_version.js
index 7e5e5fb5369..661f33ea886 100644
--- a/jstests/noPassthrough/require_api_version.js
+++ b/jstests/noPassthrough/require_api_version.js
@@ -127,8 +127,7 @@ function runTest(db, supportsTransctions, writeConcern = {}, secondaries = []) {
function requireApiVersionOnShardOrConfigServerTest() {
assert.throws(
- () => MongoRunner.runMongod(
- {shardsvr: "", replSet: "dummy", setParameter: {"requireApiVersion": true}}),
+ () => MongoRunner.runMongod({shardsvr: "", setParameter: {"requireApiVersion": true}}),
[],
"mongod should not be able to start up with --shardsvr and requireApiVersion=true");
@@ -137,15 +136,12 @@ function requireApiVersionOnShardOrConfigServerTest() {
[],
"mongod should not be able to start up with --configsvr and requireApiVersion=true");
- const rs = new ReplSetTest({nodes: 1});
- rs.startSet({shardsvr: ""});
- rs.initiate();
- const singleNodeShard = rs.getPrimary();
- assert.neq(null, singleNodeShard, "mongod was not able to start up");
+ const shardsvrMongod = MongoRunner.runMongod({shardsvr: ""});
+ assert.neq(null, shardsvrMongod, "mongod was not able to start up");
assert.commandFailed(
- singleNodeShard.adminCommand({setParameter: 1, requireApiVersion: true}),
+ shardsvrMongod.adminCommand({setParameter: 1, requireApiVersion: true}),
"should not be able to set requireApiVersion=true on mongod that was started with --shardsvr");
- rs.stopSet();
+ MongoRunner.stopMongod(shardsvrMongod);
const configsvrMongod = MongoRunner.runMongod({configsvr: ""});
assert.neq(null, configsvrMongod, "mongod was not able to start up");
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index 81a161fcc84..2fa38bb0cbf 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -7,11 +7,8 @@ var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false});
// Create a shard and add a database; if the database is not duplicated the mongod should accept
// it as shard
-var rs1 = new ReplSetTest({name: "addshard1-1", host: 'localhost', nodes: 1});
-rs1.startSet({shardsvr: ""});
-rs1.initiate();
-
-var db1 = rs1.getPrimary().getDB("testDB");
+var conn1 = MongoRunner.runMongod({'shardsvr': ""});
+var db1 = conn1.getDB("testDB");
var numObjs = 3;
for (var i = 0; i < numObjs; i++) {
@@ -22,7 +19,8 @@ var configDB = s.s.getDB('config');
assert.eq(null, configDB.databases.findOne({_id: 'testDB'}));
var newShard = "myShard";
-assert.commandWorked(s.admin.runCommand({addShard: rs1.getURL(), name: newShard, maxSize: 1024}));
+assert.commandWorked(
+ s.admin.runCommand({addshard: "localhost:" + conn1.port, name: newShard, maxSize: 1024}));
assert.neq(null, configDB.databases.findOne({_id: 'testDB'}));
@@ -31,20 +29,18 @@ assert.eq(1024, newShardDoc.maxSize);
assert(newShardDoc.topologyTime instanceof Timestamp);
// a mongod with an existing database name should not be allowed to become a shard
-var rs2 = new ReplSetTest({name: "addshard1-2", nodes: 1});
-rs2.startSet({shardsvr: ""});
-rs2.initiate();
+var conn2 = MongoRunner.runMongod({'shardsvr': ""});
-var db2 = rs2.getPrimary().getDB("otherDB");
+var db2 = conn2.getDB("otherDB");
assert.commandWorked(db2.foo.save({a: 1}));
-var db3 = rs2.getPrimary().getDB("testDB");
+var db3 = conn2.getDB("testDB");
assert.commandWorked(db3.foo.save({a: 1}));
s.config.databases.find().forEach(printjson);
var rejectedShard = "rejectedShard";
-assert(!s.admin.runCommand({addShard: rs2.getURL(), name: rejectedShard}).ok,
+assert(!s.admin.runCommand({addshard: "localhost:" + conn2.port, name: rejectedShard}).ok,
"accepted mongod with duplicate db");
// Check that all collection that were local to the mongod's are accessible through the mongos
@@ -77,8 +73,8 @@ assert.eq(2,
"wrong chunk number after splitting collection that existed before");
assert.eq(numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
-rs1.stopSet();
-rs2.stopSet();
+MongoRunner.stopMongod(conn1);
+MongoRunner.stopMongod(conn2);
s.stop();
})();
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 878b5043ec0..64d5300c3c0 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -4,9 +4,9 @@
*/
(function() {
-let addShardRes;
+var addShardRes;
-const assertAddShardSucceeded = function(res, shardName) {
+var assertAddShardSucceeded = function(res, shardName) {
assert.commandWorked(res);
// If a shard name was specified, make sure that the name the addShard command reports the
@@ -26,7 +26,7 @@ const assertAddShardSucceeded = function(res, shardName) {
// Note: this method expects that the failure is *not* that the specified shardName is already
// the shardName of an existing shard.
-const assertAddShardFailed = function(res, shardName) {
+var assertAddShardFailed = function(res, shardName) {
assert.commandFailed(res);
// If a shard name was specified in the addShard, make sure no shard with its name shows up
@@ -39,8 +39,8 @@ const assertAddShardFailed = function(res, shardName) {
}
};
-const removeShardWithName = function(shardName) {
- let res = st.s.adminCommand({removeShard: shardName});
+var removeShardWithName = function(shardName) {
+ var res = st.s.adminCommand({removeShard: shardName});
assert.commandWorked(res);
assert.eq('started', res.state);
assert.soon(function() {
@@ -50,27 +50,48 @@ const removeShardWithName = function(shardName) {
}, "removeShard never completed for shard " + shardName);
};
-const st = new ShardingTest({
+var st = new ShardingTest({
shards: 0,
mongos: 1,
});
// Add one shard since the last shard cannot be removed.
-const normalShard = new ReplSetTest({name: "addshard2-1", nodes: 1, nodeOptions: {shardsvr: ""}});
-normalShard.startSet();
-normalShard.initiate();
-
-st.s.adminCommand({addShard: normalShard.getURL(), name: 'normalShard'});
+var normalShard = MongoRunner.runMongod({shardsvr: ''});
+st.s.adminCommand({addShard: normalShard.name, name: 'normalShard'});
// Allocate a port that can be used to test adding invalid hosts.
-const portWithoutHostRunning = allocatePort();
+var portWithoutHostRunning = allocatePort();
-// 1. Test adding a *replica set* with an ordinary set name
+// 1. Test adding a *standalone*
// 1.a. with or without specifying the shardName.
+jsTest.log("Adding a standalone *without* a specified shardName should succeed.");
+let standalone1 = MongoRunner.runMongod({shardsvr: ''});
+addShardRes = st.s.adminCommand({addshard: standalone1.name});
+assertAddShardSucceeded(addShardRes);
+removeShardWithName(addShardRes.shardAdded);
+MongoRunner.stopMongod(standalone1);
+
+jsTest.log("Adding a standalone *with* a specified shardName should succeed.");
+let standalone2 = MongoRunner.runMongod({shardsvr: ''});
+addShardRes = st.s.adminCommand({addshard: standalone2.name, name: "shardName"});
+assertAddShardSucceeded(addShardRes, "shardName");
+removeShardWithName(addShardRes.shardAdded);
+MongoRunner.stopMongod(standalone2);
+
+// 1.b. with an invalid hostname.
+
+jsTest.log("Adding a standalone with a non-existing host should fail.");
+addShardRes = st.s.adminCommand({addShard: getHostName() + ":" + portWithoutHostRunning});
+assertAddShardFailed(addShardRes);
+
+// 2. Test adding a *replica set* with an ordinary set name
+
+// 2.a. with or without specifying the shardName.
+
jsTest.log("Adding a replica set without a specified shardName should succeed.");
-const rst1 = new ReplSetTest({nodes: 1});
+let rst1 = new ReplSetTest({nodes: 1});
rst1.startSet({shardsvr: ''});
rst1.initiate();
addShardRes = st.s.adminCommand({addShard: rst1.getURL()});
@@ -81,7 +102,7 @@ rst1.stopSet();
jsTest.log(
"Adding a replica set with a specified shardName that matches the set's name should succeed.");
-const rst2 = new ReplSetTest({nodes: 1});
+let rst2 = new ReplSetTest({nodes: 1});
rst2.startSet({shardsvr: ''});
rst2.initiate();
addShardRes = st.s.adminCommand({addShard: rst2.getURL(), name: rst2.name});
@@ -103,7 +124,7 @@ jsTest.log("Adding a replica with a specified shardName of 'config' should fail.
addShardRes = st.s.adminCommand({addShard: rst3.getURL(), name: "config"});
assertAddShardFailed(addShardRes, "config");
-// 1.b. with invalid hostnames.
+// 2.b. with invalid hostnames.
jsTest.log("Adding a replica set with only non-existing hosts should fail.");
addShardRes =
@@ -119,7 +140,7 @@ assertAddShardFailed(addShardRes);
rst3.stopSet();
-// 2. Test adding a replica set whose *set name* is "config" with or without specifying the
+// 3. Test adding a replica set whose *set name* is "config" with or without specifying the
// shardName.
let rst4 = new ReplSetTest({name: "config", nodes: 1});
@@ -144,7 +165,7 @@ removeShardWithName(addShardRes.shardAdded);
rst4.stopSet();
-// 3. Test that a replica set whose *set name* is "admin" can be written to (SERVER-17232).
+// 4. Test that a replica set whose *set name* is "admin" can be written to (SERVER-17232).
let rst5 = new ReplSetTest({name: "admin", nodes: 1});
rst5.startSet({shardsvr: ''});
@@ -157,7 +178,7 @@ assertAddShardSucceeded(addShardRes);
// Ensure the write goes to the newly added shard.
assert.commandWorked(st.s.getDB('test').runCommand({create: "foo"}));
-const res = st.s.getDB('config').getCollection('databases').findOne({_id: 'test'});
+var res = st.s.getDB('config').getCollection('databases').findOne({_id: 'test'});
assert.neq(null, res);
if (res.primary != addShardRes.shardAdded) {
assert.commandWorked(st.s.adminCommand({movePrimary: 'test', to: addShardRes.shardAdded}));
@@ -173,5 +194,5 @@ removeShardWithName(addShardRes.shardAdded);
rst5.stopSet();
st.stop();
-normalShard.stopSet();
+MongoRunner.stopMongod(normalShard);
})();
diff --git a/jstests/sharding/addshard_idempotent.js b/jstests/sharding/addshard_idempotent.js
index e6420f5f0e5..85d3a072ca3 100644
--- a/jstests/sharding/addshard_idempotent.js
+++ b/jstests/sharding/addshard_idempotent.js
@@ -2,15 +2,31 @@
(function() {
'use strict';
-const st = new ShardingTest({name: "add_shard_idempotent", shards: 1});
+var st = new ShardingTest({name: "add_shard_idempotent", shards: 0});
+
+jsTestLog("Testing adding a standalone shard multiple times");
+var shard1 = MongoRunner.runMongod({'shardsvr': ""});
+assert.commandWorked(
+ st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
+
+// Running the identical addShard command should succeed.
+assert.commandWorked(
+ st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
+
+// Trying to add the same shard with different options should fail
+assert.commandFailed(
+ st.admin.runCommand({addshard: shard1.host, name: "newShard1"})); // No maxSize
+
+assert.commandFailed(
+ st.admin.runCommand({addshard: shard1.host, name: "a different shard name", maxSize: 1024}));
jsTestLog("Testing adding a replica set shard multiple times");
-const shard2 = new ReplSetTest({name: 'rsShard', nodes: 3});
-shard2.startSet({shardsvr: ""});
+var shard2 = new ReplSetTest({name: 'rsShard', nodes: 3, nodeOptions: {shardsvr: ""}});
+shard2.startSet();
shard2.initiate();
shard2.getPrimary(); // Wait for there to be a primary
-const shard2SeedList1 = shard2.name + "/" + shard2.nodes[0].host;
-const shard2SeedList2 = shard2.name + "/" + shard2.nodes[2].host;
+var shard2SeedList1 = shard2.name + "/" + shard2.nodes[0].host;
+var shard2SeedList2 = shard2.name + "/" + shard2.nodes[2].host;
assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newShard2"}));
@@ -22,22 +38,24 @@ assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newS
assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList2, name: "newShard2"}));
// Verify that the config.shards collection looks right.
-const shards = st.s.getDB('config').shards.find().toArray();
+var shards = st.s.getDB('config').shards.find().toArray();
assert.eq(2, shards.length);
-let shard1TopologyTime, shard2TopologyTime;
-for (let i = 0; i < shards.length; i++) {
- let shard = shards[i];
- if (shard._id != 'newShard2') {
+for (var i = 0; i < shards.length; i++) {
+ var shard = shards[i];
+ if (shard._id == 'newShard1') {
+ assert.eq(shard1.host, shard.host);
+ assert.eq(1024, shard.maxSize);
assert(shard.topologyTime instanceof Timestamp);
- shard1TopologyTime = shard.topologyTime;
+ var shard1TopologyTime = shard.topologyTime;
} else {
assert.eq('newShard2', shard._id);
assert.eq(shard2.getURL(), shard.host);
assert(shard.topologyTime instanceof Timestamp);
- shard2TopologyTime = shard.topologyTime;
+ var shard2TopologyTime = shard.topologyTime;
}
}
assert.gt(shard2TopologyTime, shard1TopologyTime);
+MongoRunner.stopMongod(shard1);
shard2.stopSet();
st.stop();
})();
diff --git a/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js b/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
index a66173929ff..8ff3cd65745 100644
--- a/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
+++ b/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
@@ -11,7 +11,6 @@ const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
const newShardName = "newShard";
-let newShard;
// Commands sent directly to the config server should fail with WC < majority.
const unacceptableWCsForConfig = [
@@ -51,7 +50,7 @@ const setupFuncs = {
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
},
addShard: function() {
- assert.commandWorked(st.s.adminCommand({addShard: newShard.getURL(), name: newShardName}));
+ assert.commandWorked(st.s.adminCommand({addShard: newShard.name, name: newShardName}));
},
};
@@ -126,7 +125,7 @@ function checkCommandConfigSvr(command, setupFunc, cleanupFunc) {
cleanupFunc);
}
-let st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
// enableSharding
checkCommandMongos({enableSharding: dbName}, setupFuncs.noop, cleanupFuncs.dropDatabase);
@@ -156,13 +155,11 @@ checkCommandConfigSvr({_configsvrCreateDatabase: dbName, to: st.shard0.name},
cleanupFuncs.dropDatabase);
// addShard
-newShard = new ReplSetTest({nodes: 1});
-newShard.startSet({shardsvr: ''});
-newShard.initiate();
-checkCommandMongos({addShard: newShard.getURL(), name: newShardName},
+var newShard = MongoRunner.runMongod({shardsvr: ""});
+checkCommandMongos({addShard: newShard.name, name: newShardName},
setupFuncs.noop,
cleanupFuncs.removeShardIfExists);
-checkCommandConfigSvr({_configsvrAddShard: newShard.getURL(), name: newShardName},
+checkCommandConfigSvr({_configsvrAddShard: newShard.name, name: newShardName},
setupFuncs.noop,
cleanupFuncs.removeShardIfExists);
@@ -193,6 +190,6 @@ checkCommand(st.s.getDB(dbName),
setupFuncs.createDatabase,
cleanupFuncs.dropDatabase);
-newShard.stopSet();
+MongoRunner.stopMongos(newShard);
st.stop();
})();
diff --git a/jstests/sharding/implicit_db_creation.js b/jstests/sharding/implicit_db_creation.js
index 54bee11225c..03c460bdeff 100644
--- a/jstests/sharding/implicit_db_creation.js
+++ b/jstests/sharding/implicit_db_creation.js
@@ -4,12 +4,12 @@
(function() {
"use strict";
-const st = new ShardingTest({shards: 2});
-const configDB = st.s.getDB('config');
+var st = new ShardingTest({shards: 2});
+var configDB = st.s.getDB('config');
assert.eq(null, configDB.databases.findOne());
-const testDB = st.s.getDB('test');
+var testDB = st.s.getDB('test');
// Test that reads will not result into a new config.databases entry.
assert.eq(null, testDB.user.findOne());
@@ -17,7 +17,7 @@ assert.eq(null, configDB.databases.findOne({_id: 'test'}));
assert.commandWorked(testDB.user.insert({x: 1}));
-const testDBDoc = configDB.databases.findOne();
+var testDBDoc = configDB.databases.findOne();
assert.eq('test', testDBDoc._id, tojson(testDBDoc));
// Test that inserting to another collection in the same database will not modify the existing
@@ -26,23 +26,20 @@ assert.commandWorked(testDB.bar.insert({y: 1}));
assert.eq(testDBDoc, configDB.databases.findOne());
st.s.adminCommand({enableSharding: 'foo'});
-const fooDBDoc = configDB.databases.findOne({_id: 'foo'});
+var fooDBDoc = configDB.databases.findOne({_id: 'foo'});
assert.neq(null, fooDBDoc);
assert(fooDBDoc.partitioned);
-const newShard = new ReplSetTest({nodes: 1});
-newShard.startSet({shardsvr: ""});
-newShard.initiate();
-
-const unshardedDB = newShard.getPrimary().getDB('unshardedDB');
+var newShardConn = MongoRunner.runMongod({'shardsvr': ""});
+var unshardedDB = newShardConn.getDB('unshardedDB');
unshardedDB.user.insert({z: 1});
-assert.commandWorked(st.s.adminCommand({addShard: newShard.getURL()}));
+assert.commandWorked(st.s.adminCommand({addShard: newShardConn.name}));
assert.neq(null, configDB.databases.findOne({_id: 'unshardedDB'}));
-newShard.stopSet();
+MongoRunner.stopMongod(newShardConn);
st.stop();
})();
diff --git a/jstests/sharding/listshards.js b/jstests/sharding/listshards.js
index a0e07e5e44d..0960746fe7c 100644
--- a/jstests/sharding/listshards.js
+++ b/jstests/sharding/listshards.js
@@ -4,29 +4,39 @@
(function() {
'use strict';
-const checkShardName = function(shardName, shardsArray) {
+function checkShardName(shardName, shardsArray) {
var found = false;
- shardsArray.forEach((shardObj) => {
+ shardsArray.forEach(function(shardObj) {
if (shardObj._id === shardName) {
found = true;
return;
}
});
return found;
-};
+}
-const shardTest =
+var shardTest =
new ShardingTest({name: 'listShardsTest', shards: 1, mongos: 1, other: {useHostname: true}});
-const mongos = shardTest.s0;
-let res = mongos.adminCommand('listShards');
+var mongos = shardTest.s0;
+var res = mongos.adminCommand('listShards');
assert.commandWorked(res, 'listShards command failed');
-let shardsArray = res.shards;
+var shardsArray = res.shards;
assert.eq(shardsArray.length, 1);
+// add standalone mongod
+var standaloneShard = MongoRunner.runMongod({useHostName: true, shardsvr: ""});
+res = shardTest.admin.runCommand({addShard: standaloneShard.host, name: 'standalone'});
+assert.commandWorked(res, 'addShard command failed');
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 2);
+assert(checkShardName('standalone', shardsArray),
+ 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
+
// add replica set named 'repl'
-const rs1 =
- new ReplSetTest({name: 'repl', nodes: 1, useHostName: true, nodeOptions: {shardsvr: ""}});
+var rs1 = new ReplSetTest({name: 'repl', nodes: 1, useHostName: true, nodeOptions: {shardsvr: ""}});
rs1.startSet();
rs1.initiate();
res = shardTest.admin.runCommand({addShard: rs1.getURL()});
@@ -34,7 +44,7 @@ assert.commandWorked(res, 'addShard command failed');
res = mongos.adminCommand('listShards');
assert.commandWorked(res, 'listShards command failed');
shardsArray = res.shards;
-assert.eq(shardsArray.length, 2);
+assert.eq(shardsArray.length, 3);
assert(checkShardName('repl', shardsArray),
'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
@@ -57,10 +67,11 @@ assert.soon(function() {
res = mongos.adminCommand('listShards');
assert.commandWorked(res, 'listShards command failed');
shardsArray = res.shards;
-assert.eq(shardsArray.length, 1);
+assert.eq(shardsArray.length, 2);
assert(!checkShardName('repl', shardsArray),
'listShards command returned removed replica set shard: ' + tojson(shardsArray));
rs1.stopSet();
shardTest.stop();
+MongoRunner.stopMongod(standaloneShard);
})();
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 449c928e467..eb9f3f771c8 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -18,7 +18,6 @@ var keyfile = "jstests/libs/key1";
var numShards = 2;
var username = "foo";
var password = "bar";
-var adhocShard = 0;
var createUser = function(mongo) {
print("============ adding a user.");
@@ -34,19 +33,14 @@ var addUsersToEachShard = function(st) {
};
var addShard = function(st, shouldPass) {
- adhocShard++;
- var rs =
- new ReplSetTest({nodes: 1, host: 'localhost', name: 'localhostAuthShard-' + adhocShard});
- rs.startSet({shardsvr: "", keyFile: keyfile, auth: ""});
- rs.initiate();
-
- var res = st.getDB("admin").runCommand({addShard: rs.getURL()});
+ var m = MongoRunner.runMongod({auth: "", keyFile: keyfile, useHostname: false, 'shardsvr': ''});
+ var res = st.getDB("admin").runCommand({addShard: m.host});
if (shouldPass) {
assert.commandWorked(res, "Add shard");
} else {
assert.commandFailed(res, "Add shard");
}
- return rs;
+ return m;
};
var findEmptyShard = function(st, ns) {
@@ -198,6 +192,35 @@ var start = function() {
});
};
+var shutdown = function(st) {
+ print("============ shutting down.");
+
+ // SERVER-8445
+ // Unlike MongoRunner.stopMongod and ReplSetTest.stopSet,
+ // ShardingTest.stop does not have a way to provide auth
+ // information. Therefore, we'll do this manually for now.
+
+ for (var i = 0; i < st._mongos.length; i++) {
+ var conn = st["s" + i];
+ MongoRunner.stopMongos(conn,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
+
+ for (var i = 0; i < st._connections.length; i++) {
+ st["rs" + i].stopSet(/*signal*/ false, {auth: {user: username, pwd: password}});
+ }
+
+ for (var i = 0; i < st._configServers.length; i++) {
+ var conn = st["config" + i];
+ MongoRunner.stopMongod(conn,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
+
+ st.stop();
+};
+
print("=====================");
print("starting shards");
print("=====================");
@@ -236,8 +259,8 @@ assertCanRunCommands(mongo, st);
extraShards.push(addShard(mongo, 1));
st.printShardingStatus();
-extraShards.forEach(function(rs) {
- rs.stopSet();
+shutdown(st);
+extraShards.forEach(function(sh) {
+ MongoRunner.stopMongod(sh);
});
-st.stop();
})();
diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js
index c53b6327b88..9b8b045fdc4 100644
--- a/jstests/sharding/remove1.js
+++ b/jstests/sharding/remove1.js
@@ -44,12 +44,10 @@ assert.gt(topologyTime2, topologyTime1);
assert.commandFailed(s.s0.adminCommand({removeshard: s.shard1.shardName}));
// Should create a shard0002 shard
-var rs = new ReplSetTest({nodes: 1});
-rs.startSet({shardsvr: ""});
-rs.initiate();
-assert.commandWorked(s.s0.adminCommand({addshard: rs.getURL()}));
+var conn = MongoRunner.runMongod({shardsvr: ""});
+assert.commandWorked(s.s0.adminCommand({addshard: conn.host}));
assert.eq(2, s.config.shards.count(), "new server does not appear in count");
-rs.stopSet();
+MongoRunner.stopMongod(conn);
s.stop();
})();
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index 20d15d07f6c..5b36acbffd5 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -166,6 +166,12 @@ var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
var st = new ShardingTest({shards: 1});
{
+ var mongod = MongoRunner.runMongod({shardsvr: ''});
+ runTest(mongod, st.configRS.getURL(), function() {});
+ MongoRunner.stopMongod(mongod);
+}
+
+{
var replTest = new ReplSetTest({nodes: 1});
replTest.startSet({shardsvr: ''});
replTest.initiate();
diff --git a/jstests/sharding/shard_aware_on_add_shard.js b/jstests/sharding/shard_aware_on_add_shard.js
index 0b9260c42d3..e524f2cfe64 100644
--- a/jstests/sharding/shard_aware_on_add_shard.js
+++ b/jstests/sharding/shard_aware_on_add_shard.js
@@ -6,8 +6,15 @@
(function() {
"use strict";
-const checkShardingStateInitialized = function(conn, configConnStr, shardName, clusterId) {
- const res = conn.getDB('admin').runCommand({shardingState: 1});
+var waitForPrimary = function(conn) {
+ assert.soon(function() {
+ var res = conn.getDB('admin').runCommand({hello: 1});
+ return res.isWritablePrimary;
+ });
+};
+
+var checkShardingStateInitialized = function(conn, configConnStr, shardName, clusterId) {
+ var res = conn.getDB('admin').runCommand({shardingState: 1});
assert.commandWorked(res);
assert(res.enabled);
assert.eq(shardName, res.shardName);
@@ -16,25 +23,40 @@ const checkShardingStateInitialized = function(conn, configConnStr, shardName, c
assert.soon(() => configConnStr == conn.adminCommand({shardingState: 1}).configServer);
};
-const checkShardMarkedAsShardAware = function(mongosConn, shardName) {
- const res = mongosConn.getDB('config').getCollection('shards').findOne({_id: shardName});
+var checkShardMarkedAsShardAware = function(mongosConn, shardName) {
+ var res = mongosConn.getDB('config').getCollection('shards').findOne({_id: shardName});
assert.neq(null, res, "Could not find new shard " + shardName + " in config.shards");
assert.eq(1, res.state);
};
// Create the cluster to test adding shards to.
-const st = new ShardingTest({shards: 1});
-const clusterId = st.s.getDB('config').getCollection('version').findOne().clusterId;
-const newShardName = "newShard";
+var st = new ShardingTest({shards: 1});
+var clusterId = st.s.getDB('config').getCollection('version').findOne().clusterId;
+
+// Add a shard that is a standalone mongod.
+
+var standaloneConn = MongoRunner.runMongod({shardsvr: ''});
+waitForPrimary(standaloneConn);
+
+jsTest.log("Going to add standalone as shard: " + standaloneConn);
+var newShardName = "newShard";
+assert.commandWorked(st.s.adminCommand({addShard: standaloneConn.name, name: newShardName}));
+checkShardingStateInitialized(standaloneConn, st.configRS.getURL(), newShardName, clusterId);
+checkShardMarkedAsShardAware(st.s, newShardName);
+
+MongoRunner.stopMongod(standaloneConn);
+
+// Add a shard that is a replica set.
-// Add a shard and ensure awareness.
-const replTest = new ReplSetTest({nodes: 1});
+var replTest = new ReplSetTest({nodes: 1});
replTest.startSet({shardsvr: ''});
replTest.initiate();
+waitForPrimary(replTest.getPrimary());
jsTest.log("Going to add replica set as shard: " + tojson(replTest));
-assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: newShardName}));
-checkShardingStateInitialized(replTest.getPrimary(), st.configRS.getURL(), newShardName, clusterId);
+assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: replTest.getURL()}));
+checkShardingStateInitialized(
+ replTest.getPrimary(), st.configRS.getURL(), replTest.getURL(), clusterId);
checkShardMarkedAsShardAware(st.s, newShardName);
replTest.stopSet();
diff --git a/jstests/sharding/sharding_options.js b/jstests/sharding/sharding_options.js
index 8e44934b64f..6acf2361729 100644
--- a/jstests/sharding/sharding_options.js
+++ b/jstests/sharding/sharding_options.js
@@ -35,22 +35,11 @@ testGetCmdLineOptsMongod({configsvr: "", journal: ""}, expectedResult);
jsTest.log("Testing \"shardsvr\" command line option");
expectedResult = {
- "parsed": {"sharding": {"clusterRole": "shardsvr"}, "replication": {"replSet": "dummy"}}
+ "parsed": {"sharding": {"clusterRole": "shardsvr"}}
};
-testGetCmdLineOptsMongod({shardsvr: "", replSet: "dummy"}, expectedResult);
+testGetCmdLineOptsMongod({shardsvr: ""}, expectedResult);
-jsTest.log("Testing \"sharding.clusterRole = shardsvr\" config file option");
-expectedResult = {
- "parsed": {
- "config": "jstests/libs/config_files/set_shardingrole_shardsvr.json",
- "sharding": {"clusterRole": "shardsvr"},
- "replication": {"replSetName": "dummy"}
- }
-};
-testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_shardingrole_shardsvr.json"},
- expectedResult);
-
-jsTest.log("Testing \"sharding.clusterRole = configsvr\" config file option");
+jsTest.log("Testing \"sharding.clusterRole\" config file option");
expectedResult = {
"parsed": {
"config": "jstests/libs/config_files/set_shardingrole.json",
@@ -81,7 +70,4 @@ expectedResult = {
testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nomoveparanoia.ini"},
expectedResult);
-jsTest.log("Ensure starting a standalone with --shardsvr fails");
-testGetCmdLineOptsMongodFailed({shardsvr: ""});
-
print(baseName + " succeeded.");