summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml2
-rw-r--r--jstests/aggregation/sharded_agg_cleanup_on_error.js3
-rw-r--r--jstests/noPassthrough/exit_logging.js2
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js2
-rw-r--r--jstests/readonly/lib/read_only_test.js2
-rw-r--r--jstests/sharding/allow_partial_results.js2
-rw-r--r--jstests/sharding/auth_slaveok_routing.js2
-rw-r--r--jstests/sharding/basic_sharding_params.js32
-rw-r--r--jstests/sharding/bouncing_count.js2
-rw-r--r--jstests/sharding/bulk_insert.js2
-rw-r--r--jstests/sharding/change_streams_primary_shard_unaware.js4
-rw-r--r--jstests/sharding/cursor_timeout.js9
-rw-r--r--jstests/sharding/forget_mr_temp_ns.js2
-rw-r--r--jstests/sharding/geo_near_sharded.js2
-rw-r--r--jstests/sharding/kill_pinned_cursor.js3
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js8
-rw-r--r--jstests/sharding/mapReduce_outSharded.js2
-rw-r--r--jstests/sharding/migrateBig_balancer.js5
-rw-r--r--jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js12
-rw-r--r--jstests/sharding/mongos_validate_writes.js4
-rw-r--r--jstests/sharding/movePrimary1.js3
-rw-r--r--jstests/sharding/move_stale_mongos.js2
-rw-r--r--jstests/sharding/movechunk_with_moveParanoia.js2
-rw-r--r--jstests/sharding/mrShardedOutput.js12
-rw-r--r--jstests/sharding/mr_noscripting.js3
-rw-r--r--jstests/sharding/prefix_shard_key.js2
-rw-r--r--jstests/sharding/return_partial_shards_down.js2
-rw-r--r--jstests/sharding/sharding_balance2.js1
-rw-r--r--jstests/sharding/startup_with_all_configs_down.js2
-rw-r--r--jstests/sharding/write_commands_sharding_state.js40
-rw-r--r--src/mongo/shell/shardingtest.js50
31 files changed, 131 insertions, 90 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index fd71d1a18f4..2727dbfb7c2 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -120,6 +120,8 @@ selector:
- jstests/sharding/pending_chunk.js
# New failpoint in v4.0 mongos.
- jstests/sharding/crash_mongos_against_upgraded_cluster.js
+ # New failpoint in v4.0 mongod.
+ - jstests/sharding/move_primary_clone_test.js
# New waitForClusterTime
- jstests/sharding/auth_slaveok_routing.js
# This test should not be run with a mixed cluster environment.
diff --git a/jstests/aggregation/sharded_agg_cleanup_on_error.js b/jstests/aggregation/sharded_agg_cleanup_on_error.js
index 2403a8e1d01..f8b3aad263d 100644
--- a/jstests/aggregation/sharded_agg_cleanup_on_error.js
+++ b/jstests/aggregation/sharded_agg_cleanup_on_error.js
@@ -13,8 +13,7 @@
const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
const kFailpointOptions = {shouldCheckForInterrupt: true};
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
- const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+ const st = new ShardingTest({shards: 2});
const kDBName = "test";
const kDivideByZeroErrCode = 16608;
const mongosDB = st.s.getDB(kDBName);
diff --git a/jstests/noPassthrough/exit_logging.js b/jstests/noPassthrough/exit_logging.js
index 781cf2d4f0d..4e6be5c811f 100644
--- a/jstests/noPassthrough/exit_logging.js
+++ b/jstests/noPassthrough/exit_logging.js
@@ -91,7 +91,7 @@
(function testMongos() {
print("********************\nTesting exit logging in mongos\n********************");
- var st = new ShardingTest({shards: 1, other: {shardOptions: {nojournal: ""}}});
+ var st = new ShardingTest({shards: 1});
var mongosLauncher = {
start: function(opts) {
var actualOpts = {configdb: st._configDB};
diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
index 46838744251..38542deed0a 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
@@ -3,7 +3,7 @@
// @tags: [requires_sharding]
//
-// TODO: SERVER-33444 remove shardAsReplicaSet: false
+// TODO: SERVER-33601 remove shardAsReplicaSet: false
var st = new ShardingTest({shards: 2, mongos: 1, other: {shardAsReplicaSet: false}});
var mongos = st.s0;
diff --git a/jstests/readonly/lib/read_only_test.js b/jstests/readonly/lib/read_only_test.js
index ab9db5e14cf..cc94a2fbe89 100644
--- a/jstests/readonly/lib/read_only_test.js
+++ b/jstests/readonly/lib/read_only_test.js
@@ -52,7 +52,7 @@ var StandaloneFixture, ShardedFixture, runReadOnlyTest, zip2, cycleN;
};
ShardedFixture.prototype.runLoadPhase = function runLoadPhase(test) {
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
+ // TODO: SERVER-33830 remove shardAsReplicaSet: false
this.shardingTest = new ShardingTest({
nopreallocj: true,
mongos: 1,
diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js
index e7bc96ea151..6490720cb80 100644
--- a/jstests/sharding/allow_partial_results.js
+++ b/jstests/sharding/allow_partial_results.js
@@ -12,7 +12,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
const collName = "foo";
const ns = dbName + "." + collName;
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
+ // TODO: SERVER-33597 remove shardAsReplicaSet: false
const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
jsTest.log("Insert some data.");
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index c2cb71baf38..51810f76d3d 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -41,7 +41,7 @@
var rsOpts = {oplogSize: 50};
// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
var st = new ShardingTest(
- {shards: 1, rs: rsOpts, other: {keyFile: 'jstests/libs/key1', ShardAsReplicaSet: false}});
+ {shards: 1, rs: rsOpts, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
var mongos = st.s;
var replTest = st.rs0;
diff --git a/jstests/sharding/basic_sharding_params.js b/jstests/sharding/basic_sharding_params.js
index 3e71167cdd6..c1076ff8941 100644
--- a/jstests/sharding/basic_sharding_params.js
+++ b/jstests/sharding/basic_sharding_params.js
@@ -6,12 +6,10 @@
'use strict';
function shardingTestUsingObjects() {
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
var st = new ShardingTest({
mongos: {s0: {verbose: 6}, s1: {verbose: 5}},
config: {c0: {verbose: 4}},
- shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}},
- other: {shardAsReplicaSet: false}
+ shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}}
});
var s0 = st.s0;
@@ -23,19 +21,21 @@
var c0 = st.c0;
assert.eq(c0, st._configServers[0]);
- var d0 = st.d0;
- assert.eq(d0, st._connections[0]);
+ var rs0 = st.rs0;
+ assert.eq(rs0, st._rsObjects[0]);
var rs1 = st.rs1;
assert.eq(rs1, st._rsObjects[1]);
+ var rs0_d0 = rs0.nodes[0];
+
var rs1_d0 = rs1.nodes[0];
var rs1_a1 = rs1.nodes[1];
assert(s0.commandLine.hasOwnProperty("vvvvvv"));
assert(s1.commandLine.hasOwnProperty("vvvvv"));
assert(c0.commandLine.hasOwnProperty("vvvv"));
- assert(d0.commandLine.hasOwnProperty("vvv"));
+ assert(rs0_d0.commandLine.hasOwnProperty("vvv"));
assert(rs1_d0.commandLine.hasOwnProperty("vv"));
assert(rs1_a1.commandLine.hasOwnProperty("v"));
@@ -43,12 +43,10 @@
}
function shardingTestUsingArrays() {
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
var st = new ShardingTest({
mongos: [{verbose: 5}, {verbose: 4}],
config: [{verbose: 3}],
- shards: [{verbose: 2}, {verbose: 1}],
- other: {shardAsReplicaSet: false}
+ shards: [{verbose: 2}, {verbose: 1}]
});
var s0 = st.s0;
@@ -60,17 +58,21 @@
var c0 = st.c0;
assert.eq(c0, st._configServers[0]);
- var d0 = st.d0;
- assert.eq(d0, st._connections[0]);
+ var rs0 = st.rs0;
+ assert.eq(rs0, st._rsObjects[0]);
+
+ var rs1 = st.rs1;
+ assert.eq(rs1, st._rsObjects[1]);
+
+ var rs0_d0 = rs0.nodes[0];
- var d1 = st.d1;
- assert.eq(d1, st._connections[1]);
+ var rs1_d0 = rs1.nodes[0];
assert(s0.commandLine.hasOwnProperty("vvvvv"));
assert(s1.commandLine.hasOwnProperty("vvvv"));
assert(c0.commandLine.hasOwnProperty("vvv"));
- assert(d0.commandLine.hasOwnProperty("vv"));
- assert(d1.commandLine.hasOwnProperty("v"));
+ assert(rs0_d0.commandLine.hasOwnProperty("vv"));
+ assert(rs1_d0.commandLine.hasOwnProperty("v"));
st.stop();
}
diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js
index 4df4a735228..090c6f7f627 100644
--- a/jstests/sharding/bouncing_count.js
+++ b/jstests/sharding/bouncing_count.js
@@ -8,7 +8,7 @@
(function() {
'use strict';
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
+ // TODO: SERVER-33830 remove shardAsReplicaSet: false
var st = new ShardingTest({shards: 10, mongos: 3, other: {shardAsReplicaSet: false}});
var mongosA = st.s0;
diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js
index 02cb8e47226..d50830a4665 100644
--- a/jstests/sharding/bulk_insert.js
+++ b/jstests/sharding/bulk_insert.js
@@ -2,7 +2,7 @@
(function() {
'use strict';
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
+ // TODO: SERVER-33601 remove shardAsReplicaSet: false
var st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
var mongos = st.s;
diff --git a/jstests/sharding/change_streams_primary_shard_unaware.js b/jstests/sharding/change_streams_primary_shard_unaware.js
index 4c7329c85e3..528f96edb1d 100644
--- a/jstests/sharding/change_streams_primary_shard_unaware.js
+++ b/jstests/sharding/change_streams_primary_shard_unaware.js
@@ -59,7 +59,7 @@
// Restart the primary shard and ensure that it is no longer aware that the collection is
// sharded.
- st.rs0.restart(0);
+ st.restartShardRS(0);
assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
const mongos1DB = st.s1.getDB(testName);
@@ -141,7 +141,7 @@
// Restart the primary shard and ensure that it is no longer aware that the collection is
// sharded.
- st.rs0.restart(0);
+ st.restartShardRS(0);
assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
// Establish change stream cursor on mongos2 using the resume token from the change steam on
diff --git a/jstests/sharding/cursor_timeout.js b/jstests/sharding/cursor_timeout.js
index 340868e06f4..a6be1762245 100644
--- a/jstests/sharding/cursor_timeout.js
+++ b/jstests/sharding/cursor_timeout.js
@@ -26,7 +26,6 @@
const cursorMonitorFrequencySecs = 1;
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
const st = new ShardingTest({
shards: 2,
other: {
@@ -44,7 +43,6 @@
clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs
}
},
- shardAsReplicaSet: false
},
enableBalancer: false
});
@@ -52,19 +50,20 @@
const adminDB = st.admin;
const routerColl = st.s.getDB('test').user;
- const shardHost = st.config.shards.findOne({_id: "shard0001"}).host;
+ const shardHost = st.config.shards.findOne({_id: st.shard1.shardName}).host;
const mongod = new Mongo(shardHost);
const shardColl = mongod.getCollection(routerColl.getFullName());
assert.commandWorked(adminDB.runCommand({enableSharding: routerColl.getDB().getName()}));
- st.ensurePrimaryShard(routerColl.getDB().getName(), "shard0000");
+ st.ensurePrimaryShard(routerColl.getDB().getName(), st.shard0.shardName);
+
assert.commandWorked(
adminDB.runCommand({shardCollection: routerColl.getFullName(), key: {x: 1}}));
assert.commandWorked(adminDB.runCommand({split: routerColl.getFullName(), middle: {x: 10}}));
assert.commandWorked(adminDB.runCommand({
moveChunk: routerColl.getFullName(),
find: {x: 11},
- to: "shard0001",
+ to: st.shard1.shardName,
_waitForDelete: true
}));
diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js
index d3403dd7fee..bb587c487f0 100644
--- a/jstests/sharding/forget_mr_temp_ns.js
+++ b/jstests/sharding/forget_mr_temp_ns.js
@@ -2,7 +2,7 @@
// Tests whether we forget M/R's temporary namespaces for sharded output
//
-// TODO: SERVER-33444 remove shardAsReplicaSet: false
+// TODO: SERVER-33599 remove shardAsReplicaSet: false
var st = new ShardingTest({shards: 1, mongos: 1, other: {shardAsReplicaSet: false}});
var mongos = st.s0;
diff --git a/jstests/sharding/geo_near_sharded.js b/jstests/sharding/geo_near_sharded.js
index a92b579ef7e..f297b789747 100644
--- a/jstests/sharding/geo_near_sharded.js
+++ b/jstests/sharding/geo_near_sharded.js
@@ -46,7 +46,7 @@
tojson({sharded: sharded, indexType: indexType}));
}
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
+ // TODO: SERVER-33954 Remove shardAsReplicaSet: false
var st = new ShardingTest({shards: 3, mongos: 1, other: {shardAsReplicaSet: false}});
assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
st.ensurePrimaryShard('test', st.shard1.shardName);
diff --git a/jstests/sharding/kill_pinned_cursor.js b/jstests/sharding/kill_pinned_cursor.js
index 0ba46f59b8f..f636f981082 100644
--- a/jstests/sharding/kill_pinned_cursor.js
+++ b/jstests/sharding/kill_pinned_cursor.js
@@ -12,8 +12,7 @@
const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
const kFailpointOptions = {shouldCheckForInterrupt: true};
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
- const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+ const st = new ShardingTest({shards: 2});
const kDBName = "test";
const mongosDB = st.s.getDB(kDBName);
const shard0DB = st.shard0.getDB(kDBName);
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index d16fe3f9214..92dae92f5f0 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -9,12 +9,8 @@
assert.eq(out.counts.output, 512, "output count is wrong");
};
- var st = new ShardingTest({
- shards: 2,
- verbose: 1,
- mongos: 1,
- other: {chunkSize: 1, enableBalancer: true, shardAsReplicaSet: false}
- });
+ var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
var admin = st.s0.getDB('admin');
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index 3a95fb9aa65..300cb6e53c1 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -6,7 +6,7 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
};
-// TODO: SERVER-33444 remove shardAsReplicaSet: false
+// TODO: SERVER-33599 remove shardAsReplicaSet: false
var st = new ShardingTest({
shards: 2,
verbose: 1,
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index 990b76019c0..2d28244cba7 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -7,14 +7,13 @@
(function() {
"use strict";
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
+ // TODO: SERVER-33830 remove shardAsReplicaSet: false
var st = new ShardingTest({
name: 'migrateBig_balancer',
shards: 2,
other: {enableBalancer: true, shardAsReplicaSet: false}
});
var mongos = st.s;
-
var admin = mongos.getDB("admin");
var db = mongos.getDB("test");
var coll = db.getCollection("stuff");
@@ -37,8 +36,8 @@
for (var i = 0; i < 40; i++) {
bulk.insert({data: dataObj});
}
- assert.writeOK(bulk.execute());
+ assert.writeOK(bulk.execute());
assert.eq(40, coll.count(), "prep1");
assert.commandWorked(admin.runCommand({shardcollection: "" + coll, key: {_id: 1}}));
diff --git a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
index 6af13213a46..ab125e12e21 100644
--- a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
+++ b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
@@ -5,8 +5,7 @@
(function() {
"use strict";
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
- var st = new ShardingTest({shards: 1, other: {shardAsReplicaSet: false}});
+ var st = new ShardingTest({shards: 1});
// Insert a recovery doc with non-zero minOpTimeUpdaters to simulate a migration
// process that crashed in the middle of the critical section.
@@ -19,20 +18,21 @@
minOpTimeUpdaters: 2
};
- assert.writeOK(st.d0.getDB('admin').system.version.insert(recoveryDoc));
+ assert.writeOK(st.shard0.getDB('admin').system.version.insert(recoveryDoc));
// Make sure test is setup correctly.
var minOpTimeRecoveryDoc =
- st.d0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
+ st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
assert.neq(null, minOpTimeRecoveryDoc);
assert.eq(0, minOpTimeRecoveryDoc.minOpTime.ts.getTime());
assert.eq(2, minOpTimeRecoveryDoc.minOpTimeUpdaters);
- st.restartMongod(0);
+ st.restartShardRS(0);
// After the restart, the shard should have updated the opTime and reset minOpTimeUpdaters.
- minOpTimeRecoveryDoc = st.d0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
+ minOpTimeRecoveryDoc =
+ st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
assert.neq(null, minOpTimeRecoveryDoc);
assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0);
diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js
index 85b7dbb136f..c8c38411279 100644
--- a/jstests/sharding/mongos_validate_writes.js
+++ b/jstests/sharding/mongos_validate_writes.js
@@ -6,7 +6,9 @@
(function() {
'use strict';
- var st = new ShardingTest({shards: 2, mongos: 3, other: {shardOptions: {verbose: 2}}});
+ // TODO: SERVER-34093 Remove shardAsReplicaSet: false
+ var st = new ShardingTest(
+ {shards: 2, mongos: 3, other: {shardOptions: {verbose: 2}}, shardAsReplicaSet: false});
var mongos = st.s0;
var staleMongosA = st.s1;
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
index 80928677984..9c36d8fbf88 100644
--- a/jstests/sharding/movePrimary1.js
+++ b/jstests/sharding/movePrimary1.js
@@ -1,7 +1,8 @@
(function() {
'use strict';
- var s = new ShardingTest({shards: 2});
+ // TODO: SERVER-34093 Remove shardAsReplicaSet: false
+ var s = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
var db = s.getDB('test1');
var c = db.foo;
diff --git a/jstests/sharding/move_stale_mongos.js b/jstests/sharding/move_stale_mongos.js
index ab0643b128b..18288803378 100644
--- a/jstests/sharding/move_stale_mongos.js
+++ b/jstests/sharding/move_stale_mongos.js
@@ -2,7 +2,7 @@
// Tests that stale mongoses can properly move chunks.
//
-// TODO: SERVER-33444 remove shardAsReplicaSet: false
+// TODO: SERVER-33830 remove shardAsReplicaSet: false
var st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
var admin = st.s0.getDB('admin');
var testDb = 'test';
diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js
index 18a2f31658f..b3b203af32e 100644
--- a/jstests/sharding/movechunk_with_moveParanoia.js
+++ b/jstests/sharding/movechunk_with_moveParanoia.js
@@ -2,7 +2,6 @@
* This test sets moveParanoia flag and then check that the directory is created with the moved data
*/
-// TODO: SERVER-33444 remove shardAsReplicaSet: false
var st = new ShardingTest({
shards: 2,
mongos: 1,
@@ -10,7 +9,6 @@ var st = new ShardingTest({
chunkSize: 1,
enableAutoSplit: true,
shardOptions: {moveParanoia: ""},
- shardAsReplicaSet: false
}
});
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index b3981cacb36..fcc8f2a7e19 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -4,12 +4,12 @@
// collection input twice the size of the first and outputs it to the new sharded
// collection created in the first pass.
-// TODO: SERVER-33444 remove shardAsReplicaSet: false
+// TODO: SERVER-33599 remove shardAsReplicaSet: false
var st = new ShardingTest({shards: 2, other: {chunkSize: 1, shardAsReplicaSet: false}});
var config = st.getDB("config");
st.adminCommand({enablesharding: "test"});
-st.ensurePrimaryShard("test", "shard0001");
+st.ensurePrimaryShard("test", st.shard1.shardName);
st.adminCommand({shardcollection: "test.foo", key: {"a": 1}});
var testDB = st.getDB("test");
@@ -30,7 +30,7 @@ var str = new Array(1024).join('a');
// collections in the database. The upshot is that we need a sharded collection on
// both shards in order to ensure M/R will output to two shards.
st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
-st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: 'shard0000'});
+st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: st.shard0.shardName});
// Add some more data for input so that chunks will get split further
for (var splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
@@ -82,15 +82,15 @@ config.chunks.find({ns: testDB.mrShardedOut.getFullName()}).forEach(function(chu
// Check that chunks for the newly created sharded output collection are well distributed.
var shard0Chunks =
- config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: 'shard0000'}).count();
+ config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: st.shard0.shardName}).count();
var shard1Chunks =
- config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: 'shard0001'}).count();
+ config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: st.shard1.shardName}).count();
assert.lte(Math.abs(shard0Chunks - shard1Chunks), 1);
jsTest.log('Starting second pass');
st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
-st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: 'shard0000'});
+st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: st.shard0.shardName});
// Add some more data for input so that chunks will get split further
for (splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
diff --git a/jstests/sharding/mr_noscripting.js b/jstests/sharding/mr_noscripting.js
index d5781e8fcea..6bf196c587e 100644
--- a/jstests/sharding/mr_noscripting.js
+++ b/jstests/sharding/mr_noscripting.js
@@ -3,8 +3,7 @@ var shardOpts = [
{} // just use default params
];
-// TODO: SERVER-33444 remove shardAsReplicaSet: false
-var st = new ShardingTest({shards: shardOpts, other: {nopreallocj: 1, shardAsReplicaSet: false}});
+var st = new ShardingTest({shards: shardOpts, other: {nopreallocj: 1}});
var mongos = st.s;
st.shardColl('bar', {x: 1});
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index d633669e28b..6dd3b30344f 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -9,7 +9,7 @@
(function() {
'use strict';
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
+ // TODO: SERVER-33601 remove shardAsReplicaSet: false
var s = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
var db = s.getDB("test");
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index 37ecb758653..edf537d4ed1 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -5,7 +5,7 @@
// Checking UUID consistency involves talking to shards, but this test shuts down shards.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-// TODO: SERVER-33444 remove shardAsReplicaSet: false
+// TODO: SERVER-33597 remove shardAsReplicaSet: false
var st = new ShardingTest(
{shards: 3, mongos: 1, other: {mongosOptions: {verbose: 2}, shardAsReplicaSet: false}});
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index b2befe5dd22..7bba7e25bf3 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -13,7 +13,6 @@
assert.eq(2, names.length);
assert.commandWorked(s.s0.adminCommand({addshard: names[0]}));
assert.commandWorked(s.s0.adminCommand({addshard: names[1], maxSize: MaxSizeMB}));
-
assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', names[0]);
diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js
index 72ff9238bfc..2dc78e07d76 100644
--- a/jstests/sharding/startup_with_all_configs_down.js
+++ b/jstests/sharding/startup_with_all_configs_down.js
@@ -23,7 +23,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
MongoRunner.runMongod(shard);
}
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
+ // TODO: SERVER-33830 remove shardAsReplicaSet: false
var st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
jsTestLog("Setting up initial data");
diff --git a/jstests/sharding/write_commands_sharding_state.js b/jstests/sharding/write_commands_sharding_state.js
index 479a3c1f687..395d328a138 100644
--- a/jstests/sharding/write_commands_sharding_state.js
+++ b/jstests/sharding/write_commands_sharding_state.js
@@ -5,9 +5,7 @@
(function() {
'use strict';
- // TODO: SERVER-33444 remove shardAsReplicaSet: false
- var st = new ShardingTest(
- {name: "write_commands", mongos: 2, shards: 2, other: {shardAsReplicaSet: false}});
+ var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2});
var dbTestName = 'WriteCommandsTestDB';
var collName = dbTestName + '.TestColl';
@@ -38,27 +36,27 @@
assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 21}));
// Make sure the documents are correctly placed
- printjson(st.d0.getDB(dbTestName).TestColl.find().toArray());
- printjson(st.d1.getDB(dbTestName).TestColl.find().toArray());
+ printjson(st.shard0.getDB(dbTestName).TestColl.find().toArray());
+ printjson(st.shard1.getDB(dbTestName).TestColl.find().toArray());
- assert.eq(1, st.d0.getDB(dbTestName).TestColl.count());
- assert.eq(2, st.d1.getDB(dbTestName).TestColl.count());
+ assert.eq(1, st.shard0.getDB(dbTestName).TestColl.count());
+ assert.eq(2, st.shard1.getDB(dbTestName).TestColl.count());
- assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({Key: 1}).count());
- assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 11}).count());
- assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 21}).count());
+ assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+ assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 11}).count());
+ assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
// Move chunk [0, 19] to st.shard0.shardName and make sure the documents are correctly placed
assert.commandWorked(st.s0.adminCommand(
{moveChunk: collName, find: {Key: 19}, _waitForDelete: true, to: st.shard0.shardName}));
printjson(st.config.getSiblingDB('config').chunks.find().toArray());
- printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
- printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
// Now restart all mongod instances, so they don't know yet that they are sharded
- st.restartMongod(0);
- st.restartMongod(1);
+ st.restartShardRS(0);
+ st.restartShardRS(1);
// Now that both mongod shards are restarted, they don't know yet that they are part of a
// sharded
@@ -73,15 +71,15 @@
// information, see SERVER-19395).
st.s1.getDB(dbTestName).TestColl.update({Key: 11}, {$inc: {Counter: 1}}, {upsert: true});
- printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
- printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
- assert.eq(2, st.d0.getDB(dbTestName).TestColl.count());
- assert.eq(1, st.d1.getDB(dbTestName).TestColl.count());
+ assert.eq(2, st.shard0.getDB(dbTestName).TestColl.count());
+ assert.eq(1, st.shard1.getDB(dbTestName).TestColl.count());
- assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({Key: 1}).count());
- assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({Key: 11}).count());
- assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 21}).count());
+ assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+ assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 11}).count());
+ assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
st.stop();
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index 3a63f8192f4..29f710386b0 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -876,9 +876,31 @@ var ShardingTest = function(params) {
};
/**
+ * Restarts each node in a particular shard replica set using the shard's original startup
+ * options by default.
+ *
+ * Option { startClean : true } forces clearing the data directory.
+ * Option { auth : Object } object that contains the auth details for admin credentials.
+ * Should contain the fields 'user' and 'pwd'
+ *
+ *
+ * @param {int} shard server number (0, 1, 2, ...) to be restarted
+ */
+ this.restartShardRS = function(n, options, signal, wait) {
+ for (let i = 0; i < this["rs" + n].nodeList().length; i++) {
+ this["rs" + n].restart(i);
+ }
+
+ this["rs" + n].awaitSecondaryNodes();
+ this._connections[n] = new Mongo(this["rs" + n].getURL());
+ this["shard" + n] = this._connections[n];
+ };
+
+ /**
* Stops and restarts a config server mongod process.
*
- * If opts is specified, the new mongod is started using those options. Otherwise, it is started
+ * If opts is specified, the new mongod is started using those options. Otherwise, it is
+ * started
* with its previous parameters.
*
* Warning: Overwrites the old cn/confign member variables.
@@ -1053,6 +1075,7 @@ var ShardingTest = function(params) {
var tempCount = 0;
for (var i in numShards) {
otherParams[i] = numShards[i];
+
tempCount++;
}
@@ -1143,6 +1166,31 @@ var ShardingTest = function(params) {
rsDefaults.nodes = rsDefaults.nodes || otherParams.numReplicas;
}
+ if (startShardsAsRS && !(otherParams.rs || otherParams["rs" + i])) {
+ if (jsTestOptions().shardMixedBinVersions) {
+ if (!otherParams.shardOptions) {
+ otherParams.shardOptions = {};
+ }
+ // If the test doesn't depend on specific shard binVersions, create a mixed
+ // version
+ // shard cluster that randomly assigns shard binVersions, half "latest" and half
+ // "last-stable".
+ if (!otherParams.shardOptions.binVersion) {
+ Random.setRandomSeed();
+ otherParams.shardOptions.binVersion =
+ MongoRunner.versionIterator(["latest", "last-stable"], true);
+ }
+ }
+
+ if (otherParams.shardOptions && otherParams.shardOptions.binVersion) {
+ otherParams.shardOptions.binVersion =
+ MongoRunner.versionIterator(otherParams.shardOptions.binVersion);
+ }
+
+ rsDefaults = Object.merge(rsDefaults, otherParams["d" + i]);
+ rsDefaults = Object.merge(rsDefaults, otherParams.shardOptions);
+ }
+
var rsSettings = rsDefaults.settings;
delete rsDefaults.settings;