summaryrefslogtreecommitdiff
path: root/jstests/sharding/recovering_slaveok.js
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-12 12:00:46 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-13 17:25:13 -0400
commit66c0dda7e9f97d03383139f7e62e4534934b5ecc (patch)
tree0b3fff121c9a0a4076c103edf2b99a239e286382 /jstests/sharding/recovering_slaveok.js
parent6abdb820a8259591cc43ad4ed52dbf7c3509d04b (diff)
downloadmongo-66c0dda7e9f97d03383139f7e62e4534934b5ecc.tar.gz
SERVER-20646 Cluster find command per-host retry logic
This change makes the cluster 'find' command to retry on a per-host basis instead of the entire operation. Reverts commit c433c8157f988a377c1cf9646078450ecd68c297. Reverts commit 5ab3290f8796f2143acd5011ab0baae70ed5cece.
Diffstat (limited to 'jstests/sharding/recovering_slaveok.js')
-rw-r--r--jstests/sharding/recovering_slaveok.js129
1 files changed, 129 insertions, 0 deletions
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
new file mode 100644
index 00000000000..b446c80918c
--- /dev/null
+++ b/jstests/sharding/recovering_slaveok.js
@@ -0,0 +1,129 @@
+// This tests that slaveOk'd queries in sharded setups get correctly routed when a slave goes into
+// RECOVERING state, and don't break
+
+(function() {
+
+'use strict';
+
+var shardTest = new ShardingTest({ name: "recovering_slaveok",
+ shards: 2,
+ mongos: 2,
+ other: { rs: true } });
+
+var mongos = shardTest.s0;
+var mongosSOK = shardTest.s1;
+mongosSOK.setSlaveOk();
+
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+
+var dbase = mongos.getDB("test");
+var coll = dbase.getCollection("foo");
+var dbaseSOk = mongosSOK.getDB( "" + dbase );
+var collSOk = mongosSOK.getCollection( "" + coll );
+
+var rsA = shardTest._rs[0].test;
+var rsB = shardTest._rs[1].test;
+
+rsA.getMaster().getDB( "test_a" ).dummy.insert({ x : 1 });
+rsB.getMaster().getDB( "test_b" ).dummy.insert({ x : 1 });
+
+rsA.awaitReplication();
+rsB.awaitReplication();
+
+print("1: initial insert");
+
+coll.save({ _id : -1, a : "a", date : new Date() });
+coll.save({ _id : 1, b : "b", date : new Date() });
+
+print("2: shard collection");
+
+shardTest.shardGo(coll, /* shardBy */ { _id : 1 }, /* splitAt */ { _id : 0 });
+
+print("3: test normal and slaveOk queries");
+
+// Make shardA and rsA the same
+var shardA = shardTest.getShard(coll, { _id : -1 });
+var shardAColl = shardA.getCollection( "" + coll );
+var shardB = shardTest.getShard(coll, { _id : 1 });
+
+if (shardA.name == rsB.getURL()) {
+ var swap = rsB;
+ rsB = rsA;
+ rsA = swap;
+}
+
+rsA.awaitReplication();
+rsB.awaitReplication();
+
+// Because of async migration cleanup, we need to wait for this condition to be true
+assert.soon(function() { return coll.find().itcount() == collSOk.find().itcount(); });
+
+assert.eq(shardAColl.find().itcount(), 1);
+assert.eq(shardAColl.findOne()._id, -1);
+
+print("5: make one of the secondaries RECOVERING");
+
+var secs = rsA.getSecondaries();
+var goodSec = secs[0];
+var badSec = secs[1];
+
+assert.commandWorked(badSec.adminCommand("replSetMaintenance"));
+rsA.waitForState(badSec, ReplSetTest.State.RECOVERING);
+
+print("6: stop non-RECOVERING secondary");
+
+rsA.stop(goodSec);
+
+print("7: check our regular and slaveOk query");
+
+assert.eq(2, coll.find().itcount());
+assert.eq(2, collSOk.find().itcount());
+
+print("8: restart both our secondaries clean");
+
+rsA.restart(rsA.getSecondaries(),
+ { remember : true, startClean : true },
+ undefined,
+ 5 * 60 * 1000);
+
+print("9: wait for recovery");
+
+rsA.waitForState(rsA.getSecondaries(), rsA.SECONDARY, 5 * 60 * 1000 );
+
+print("10: check our regular and slaveOk query");
+
+// We need to make sure our nodes are considered accessible from mongos - otherwise we fail
+// See SERVER-7274
+ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsA.nodes, { ok : true });
+ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsB.nodes, { ok : true });
+
+// We need to make sure at least one secondary is accessible from mongos - otherwise we fail
+// See SERVER-7699
+ReplSetTest.awaitRSClientHosts(collSOk.getMongo(), [rsA.getSecondaries()[0]],
+ { secondary : true, ok : true });
+ReplSetTest.awaitRSClientHosts(collSOk.getMongo(), [rsB.getSecondaries()[0]],
+ { secondary : true, ok : true });
+
+print("SlaveOK Query...");
+var sOKCount = collSOk.find().itcount();
+
+var collCount = null
+try{
+ print("Normal query...")
+ collCount = coll.find().itcount();
+}
+catch(e){
+ printjson(e);
+
+ // There may have been a stepdown caused by step 8, so we run this twice in a row. The first
+ // time can error out.
+ print("Error may have been caused by stepdown, try again.")
+ collCount = coll.find().itcount();
+}
+
+assert.eq(collCount, sOKCount);
+
+shardTest.stop();
+
+})();