summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheahuychou Mao <cheahuychou.mao@mongodb.com>2019-10-01 23:11:40 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-02-09 19:38:25 +0000
commit7838ec6d5e17b19fc409660fd4ed4ec1f9395509 (patch)
tree56001fc8978ccf45679cae730d401b0a85069eeb
parent5db2fa159cd860d987eac02e233a8d028a3b9b3f (diff)
downloadmongo-7838ec6d5e17b19fc409660fd4ed4ec1f9395509.tar.gz
SERVER-33597 Make allow_partial_results.js, return_partial_shards_down.js start shards as replica sets
(cherry picked from commit e6bbc9d860d126978ecda8707872cfdcc6b57fa8)
-rw-r--r--jstests/sharding/allow_partial_results.js25
-rw-r--r--jstests/sharding/return_partial_shards_down.js55
-rw-r--r--src/mongo/s/query/establish_cursors.cpp17
3 files changed, 63 insertions, 34 deletions
diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js
index 58e5aab6e06..7361cd475a9 100644
--- a/jstests/sharding/allow_partial_results.js
+++ b/jstests/sharding/allow_partial_results.js
@@ -12,7 +12,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
const collName = "foo";
const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: 2});
+ const st = new ShardingTest({shards: 2, other: {rs: true}});
jsTest.log("Insert some data.");
const nDocs = 100;
@@ -49,7 +49,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
assert.eq(nDocs, findRes.cursor.firstBatch.length);
jsTest.log("Stopping " + st.shard0.shardName);
- MongoRunner.stopMongod(st.shard0);
+ st.rs0.stopSet();
jsTest.log("Without 'allowPartialResults', if some shard down, find fails.");
assert.commandFailed(coll.runCommand({find: collName}));
@@ -57,11 +57,22 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
jsTest.log("With 'allowPartialResults: false', if some shard down, find fails.");
assert.commandFailed(coll.runCommand({find: collName, allowPartialResults: false}));
- jsTest.log(
- "With 'allowPartialResults: true', if some shard down, find succeeds with partial results");
- findRes = assert.commandWorked(coll.runCommand({find: collName, allowPartialResults: true}));
- assert.commandWorked(findRes);
- assert.eq(nDocs / 2, findRes.cursor.firstBatch.length);
+ if (jsTestOptions().mongosBinVersion == "last-stable") {
+ // In v3.6, mongos was updated to swallow FailedToSatisfyReadPreference errors if
+ // allowPartialResults is true, allowing allowPartialResults to work with replica set shards
+ // (see SERVER-33597 for details). So when the mongos version is v3.4, the command should
+ // fail.
+ jsTest.log(
+ "With 'allowPartialResults: true', if some shard down and mongos version is v3.4, find fails");
+ assert.commandFailedWithCode(coll.runCommand({find: collName, allowPartialResults: true}),
+ ErrorCodes.FailedToSatisfyReadPreference);
+ } else {
+ jsTest.log(
+ "With 'allowPartialResults: true', if some shard down, find succeeds with partial results");
+ findRes = coll.runCommand({find: collName, allowPartialResults: true});
+ assert.commandWorked(findRes);
+ assert.eq(nDocs / 2, findRes.cursor.firstBatch.length);
+ }
jsTest.log("The allowPartialResults option does not currently apply to aggregation.");
assert.commandFailedWithCode(coll.runCommand({
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index c3e0f3f2e27..f07a2dc5cee 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -5,7 +5,22 @@
// Checking UUID consistency involves talking to shards, but this test shuts down shards.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-var st = new ShardingTest({shards: 3, mongos: 1, other: {mongosOptions: {verbose: 2}}});
+var checkDocCount = function(coll, returnPartialFlag, shardsDown, expectedCount) {
+ if (jsTestOptions().mongosBinVersion == "last-stable" && shardsDown) {
+ // In v3.6, mongos was updated to swallow FailedToSatisfyReadPreference errors if
+ // allowPartialResults is true, allowing allowPartialResults to work with replica set shards
+ // (see SERVER-33597 for details). So when the mongos version is v3.4, the command should
+ // fail.
+ var error = assert.throws(function() {
+ coll.find({}, {}, 0, 0, 0, returnPartialFlag).itcount();
+ });
+ assert(ErrorCodes.FailedToSatisfyReadPreference == error.code);
+ } else {
+ assert.eq(expectedCount, coll.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+ }
+};
+
+var st = new ShardingTest({shards: 3, mongos: 1, other: {mongosOptions: {verbose: 2}, rs: true}});
// Stop balancer, we're doing our own manual chunk distribution
st.stopBalancer();
@@ -16,19 +31,19 @@ var admin = mongos.getDB("admin");
var collOneShard = mongos.getCollection("foo.collOneShard");
var collAllShards = mongos.getCollection("foo.collAllShards");
-printjson(admin.runCommand({enableSharding: collOneShard.getDB() + ""}));
+assert.commandWorked(admin.runCommand({enableSharding: collOneShard.getDB() + ""}));
printjson(admin.runCommand({movePrimary: collOneShard.getDB() + "", to: st.shard0.shardName}));
-printjson(admin.runCommand({shardCollection: collOneShard + "", key: {_id: 1}}));
-printjson(admin.runCommand({shardCollection: collAllShards + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({shardCollection: collOneShard + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({shardCollection: collAllShards + "", key: {_id: 1}}));
-// Split and move the "both shard" collection to both shards
+// Split and move the "all shard" collection to all shards
-printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 0}}));
-printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 1000}}));
-printjson(
+assert.commandWorked(admin.runCommand({split: collAllShards + "", middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({split: collAllShards + "", middle: {_id: 1000}}));
+assert.commandWorked(
admin.runCommand({moveChunk: collAllShards + "", find: {_id: 0}, to: st.shard1.shardName}));
-printjson(
+assert.commandWorked(
admin.runCommand({moveChunk: collAllShards + "", find: {_id: 1000}, to: st.shard2.shardName}));
// Collections are now distributed correctly
@@ -47,35 +62,35 @@ jsTest.log("All shards up!");
assert.eq(3, collOneShard.find().itcount());
assert.eq(3, collAllShards.find().itcount());
-assert.eq(3, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-assert.eq(3, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+checkDocCount(collOneShard, returnPartialFlag, false, 3);
+checkDocCount(collAllShards, returnPartialFlag, false, 3);
jsTest.log("One shard down!");
-MongoRunner.stopMongod(st.shard2);
+st.rs2.stopSet();
jsTest.log("done.");
-assert.eq(3, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-assert.eq(2, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+checkDocCount(collOneShard, returnPartialFlag, false, 3);
+checkDocCount(collAllShards, returnPartialFlag, true, 2);
jsTest.log("Two shards down!");
-MongoRunner.stopMongod(st.shard1);
+st.rs1.stopSet();
jsTest.log("done.");
-assert.eq(3, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-assert.eq(1, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+checkDocCount(collOneShard, returnPartialFlag, false, 3);
+checkDocCount(collAllShards, returnPartialFlag, true, 1);
jsTest.log("All shards down!");
-MongoRunner.stopMongod(st.shard0);
+st.rs0.stopSet();
jsTest.log("done.");
-assert.eq(0, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-assert.eq(0, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+checkDocCount(collOneShard, returnPartialFlag, true, 0);
+checkDocCount(collAllShards, returnPartialFlag, true, 0);
jsTest.log("DONE!");
diff --git a/src/mongo/s/query/establish_cursors.cpp b/src/mongo/s/query/establish_cursors.cpp
index cb62cef73ac..dbb3725eb86 100644
--- a/src/mongo/s/query/establish_cursors.cpp
+++ b/src/mongo/s/query/establish_cursors.cpp
@@ -118,13 +118,16 @@ StatusWith<std::vector<ClusterClientCursorParams::RemoteCursor>> establishCursor
break;
}
-
- // Retriable errors are swallowed if 'allowPartialResults' is true.
- if (allowPartialResults &&
- std::find(RemoteCommandRetryScheduler::kAllRetriableErrors.begin(),
- RemoteCommandRetryScheduler::kAllRetriableErrors.end(),
- swCursorResponse.getStatus().code()) !=
- RemoteCommandRetryScheduler::kAllRetriableErrors.end()) {
+ // Retriable errors are swallowed if 'allowPartialResults' is true. Targeting shard
+ // replica sets can also throw FailedToSatisfyReadPreference, so we swallow it too.
+ bool isEligibleException =
+ (std::find(RemoteCommandRetryScheduler::kAllRetriableErrors.begin(),
+ RemoteCommandRetryScheduler::kAllRetriableErrors.end(),
+ swCursorResponse.getStatus().code()) !=
+ RemoteCommandRetryScheduler::kAllRetriableErrors.end() ||
+ swCursorResponse.getStatus().code() == ErrorCodes::FailedToSatisfyReadPreference);
+
+ if (allowPartialResults && isEligibleException) {
continue;
}