summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-10-11 12:51:05 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-10-11 18:45:31 -0400
commitd40e4b4104060ae92d618e32d49eccf30f4be73c (patch)
tree8ce114e444286238510fd21101487ef07f1e54a2
parent8ae0a291325cccbf8dc94124c8fef88565065275 (diff)
downloadmongo-d40e4b4104060ae92d618e32d49eccf30f4be73c.tar.gz
SERVER-5128/SERVER-5130 Modify js test to validate both tickets are fixed
Also gets rid of one unnecessary noPassthroughWithMongod test.
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml2
-rw-r--r--jstests/noPassthroughWithMongod/replica_set_shard_version.js64
-rw-r--r--jstests/sharding/all_config_hosts_down.js49
-rw-r--r--jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js78
4 files changed, 79 insertions, 114 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
index f0c180017c7..ac3245c8c92 100644
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
@@ -107,7 +107,7 @@ selector:
- jstests/sharding/stale_mongos_updates_and_removes.js
- jstests/sharding/zero_shard_version.js
# Already stop or blackholes the primary of the CSRS config shard
- - jstests/sharding/all_config_hosts_down.js
+ - jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
- jstests/sharding/all_config_servers_blackholed_from_mongos.js
- jstests/sharding/batch_write_command_sharded.js
- jstests/sharding/config_rs_no_primary.js
diff --git a/jstests/noPassthroughWithMongod/replica_set_shard_version.js b/jstests/noPassthroughWithMongod/replica_set_shard_version.js
deleted file mode 100644
index 73c520c14ac..00000000000
--- a/jstests/noPassthroughWithMongod/replica_set_shard_version.js
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Tests whether a Replica Set in a mongos cluster can cause versioning problems.
- */
-
-// Checking UUID consistency involves talking to a shard node, which in this test has been
-// stepped-down
-TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-
-(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 1, mongos: 2, other: {rs: true, enableBalancer: true}});
-
- var mongosA = st.s0;
- var mongosB = st.s1;
- var shard = st.shard0;
-
- var coll = mongosA.getCollection(jsTestName() + ".coll");
-
- // Wait for primary and then initialize shard SERVER-5130
- st.rs0.getPrimary();
- coll.findOne();
-
- var sadmin = shard.getDB("admin");
- assert.throws(function() {
- sadmin.runCommand({replSetStepDown: 3000, force: true});
- });
-
- st.rs0.getPrimary();
-
- mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
-
- try {
- // This _almost_ always fails, unless the new primary is already detected. If if fails, it
- // should mark the master as bad, so mongos will reload the replica set master next request.
- //
- // TODO: Can we just retry and succeed here?
- coll.findOne();
- } catch (e) {
- print("This error is expected : ");
- printjson(e);
- }
-
- jsTest.log("Running query which should succeed...");
-
- // This should always succeed without throwing an error
- coll.findOne();
-
- mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: false});
-
- // Now check secondary
- assert.throws(function() {
- sadmin.runCommand({replSetStepDown: 3000, force: true});
- });
-
- // Can't use the mongosB - SERVER-5128
- var other = new Mongo(mongosA.host);
- other.setSlaveOk(true);
- other = other.getCollection(jsTestName() + ".coll");
-
- print("eliot: " + tojson(other.findOne()));
-
- st.stop();
-})();
diff --git a/jstests/sharding/all_config_hosts_down.js b/jstests/sharding/all_config_hosts_down.js
deleted file mode 100644
index 712b9b2dc19..00000000000
--- a/jstests/sharding/all_config_hosts_down.js
+++ /dev/null
@@ -1,49 +0,0 @@
-//
-// Test for what happens when config servers are down and the database config is loaded
-// Should fail sanely
-//
-
-// Checking UUID consistency involves talking to the config servers, which are shut down in this
-// test.
-TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-
-(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 1, mongos: 1});
-
- var mongos = st.s;
- var coll = mongos.getCollection("foo.bar");
-
- jsTestLog("Stopping config servers");
- for (var i = 0; i < st._configServers.length; i++) {
- MongoRunner.stopMongod(st._configServers[i]);
- }
-
- // Make sure mongos has no database info currently loaded
- mongos.getDB("admin").runCommand({flushRouterConfig: 1});
-
- jsTestLog("Config flushed and config servers down!");
-
- // Throws transport error first and subsequent times when loading config data, not no primary
- for (var i = 0; i < 2; i++) {
- try {
- coll.findOne();
- // Should always throw
- assert(false);
- } catch (e) {
- printjson(e);
-
- // Make sure we get a transport error, and not a no-primary error
- assert(e.code == 8002 || // SCCC config down, for v3.0 compatibility.
- e.code == 10276 || // Transport error
- e.code == 13328 || // Connect error
- e.code == ErrorCodes.HostUnreachable ||
- e.code == ErrorCodes.FailedToSatisfyReadPreference ||
- e.code == ErrorCodes.ReplicaSetNotFound);
- }
- }
-
- st.stop();
-
-}());
diff --git a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
new file mode 100644
index 00000000000..6a89bf1508c
--- /dev/null
+++ b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
@@ -0,0 +1,78 @@
+/**
+ * Shuts down config server and shard replica set nodes one by one and ensures correct behaviour.
+ */
+
+// Checking UUID consistency involves talking to the config servers, which are shut down in this
+// test.
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
+(function() {
+ 'use strict';
+
+ var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+
+ jsTest.log('Config nodes up: 3 of 3, shard nodes up: 2 of 2: ' +
+ 'Insert test data to work with');
+ assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+ assert.eq([{_id: 0, count: 1}], st.s0.getDB('TestDB').TestColl.find().toArray());
+
+ jsTest.log('Config nodes up: 2 of 3, shard nodes up: 2 of 2: ' +
+ 'Inserts and queries must work');
+ st.configRS.stop(0);
+ st.restartMongos(0);
+ assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+ assert.eq([{_id: 0, count: 2}], st.s0.getDB('TestDB').TestColl.find().toArray());
+
+ jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' +
+ 'Inserts and queries must work');
+ st.configRS.stop(1);
+ st.restartMongos(0);
+ assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+ assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
+
+ jsTest.log('Config nodes up: 1 of 3, shard nodes up: 1 of 2: ' +
+ 'Only queries will work (no shard primary)');
+ st.rs0.stop(0);
+ st.restartMongos(0);
+ st.s0.setSlaveOk(true);
+ assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
+
+ jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' +
+ 'MongoS must start, but no operations will work (no shard nodes available)');
+ st.rs0.stop(1);
+ st.restartMongos(0);
+ assert.throws(function() {
+ st.s0.getDB('TestDB').TestColl.find().toArray();
+ });
+
+ jsTest.log('Config nodes up: 0 of 3, shard nodes up: 0 of 2: ' +
+ 'Metadata cannot be loaded at all, no operations will work');
+ st.configRS.stop(1);
+
+ // Instead of restarting mongos, ensure it has no metadata
+ assert.commandWorked(st.s0.adminCommand({flushRouterConfig: 1}));
+
+ // Throws transport error first and subsequent times when loading config data, not no primary
+ for (var i = 0; i < 2; i++) {
+ try {
+ st.s0.getDB('TestDB').TestColl.findOne();
+
+ // Must always throw
+ assert(false);
+ } catch (e) {
+ printjson(e);
+
+ // Make sure we get a transport error, and not a no-primary error
+ assert(e.code == 10276 || // Transport error
+ e.code == 13328 || // Connect error
+ e.code == ErrorCodes.HostUnreachable ||
+ e.code == ErrorCodes.FailedToSatisfyReadPreference ||
+ e.code == ErrorCodes.ReplicaSetNotFound);
+ }
+ }
+
+ st.stop();
+}());