summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheahuychou Mao <cheahuychou.mao@mongodb.com>2019-12-19 20:28:41 +0000
committerevergreen <evergreen@mongodb.com>2019-12-19 20:28:41 +0000
commitaff2e09e657691ba4764c8a6cfb4630c364abdb0 (patch)
tree8ef387601c404a67e7bd5564d220ea97704651c0
parent6e274136bea6ad0dee5874b2085d941f28e831df (diff)
downloadmongo-aff2e09e657691ba4764c8a6cfb4630c364abdb0.tar.gz
SERVER-33597 Make allow_partial_results.js, return_partial_shards_down.js start shards as replica sets
(cherry picked from commit e6bbc9d860d126978ecda8707872cfdcc6b57fa8)
-rw-r--r--jstests/sharding/allow_partial_results.js26
-rw-r--r--jstests/sharding/return_partial_shards_down.js64
-rw-r--r--src/mongo/s/query/establish_cursors.cpp9
3 files changed, 66 insertions, 33 deletions
diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js
index 7ecbbb1dc7b..3fa82b6e9e2 100644
--- a/jstests/sharding/allow_partial_results.js
+++ b/jstests/sharding/allow_partial_results.js
@@ -12,8 +12,7 @@ const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
-// TODO: SERVER-33597 remove shardAsReplicaSet: false
-const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+const st = new ShardingTest({shards: 2});
jsTest.log("Insert some data.");
const nDocs = 100;
@@ -49,7 +48,7 @@ assert.commandWorked(findRes);
assert.eq(nDocs, findRes.cursor.firstBatch.length);
jsTest.log("Stopping " + st.shard0.shardName);
-MongoRunner.stopMongod(st.shard0);
+st.rs0.stopSet();
jsTest.log("Without 'allowPartialResults', if some shard down, find fails.");
assert.commandFailed(coll.runCommand({find: collName}));
@@ -57,11 +56,22 @@ assert.commandFailed(coll.runCommand({find: collName}));
jsTest.log("With 'allowPartialResults: false', if some shard down, find fails.");
assert.commandFailed(coll.runCommand({find: collName, allowPartialResults: false}));
-jsTest.log(
- "With 'allowPartialResults: true', if some shard down, find succeeds with partial results");
-findRes = assert.commandWorked(coll.runCommand({find: collName, allowPartialResults: true}));
-assert.commandWorked(findRes);
-assert.eq(nDocs / 2, findRes.cursor.firstBatch.length);
+if (jsTestOptions().mongosBinVersion == "last-stable") {
+ // In v4.4, mongos was updated to swallow FailedToSatisfyReadPreference errors if
+ // allowPartialResults is true, allowing allowPartialResults to work with replica set shards
+ // (see SERVER-33597 for details). So when the mongos version is v4.2, the command should
+ // fail.
+ jsTest.log(
+ "With 'allowPartialResults: true', if some shard down and mongos version is v4.2, find fails");
+ assert.commandFailedWithCode(coll.runCommand({find: collName, allowPartialResults: true}),
+ ErrorCodes.FailedToSatisfyReadPreference);
+} else {
+ jsTest.log(
+ "With 'allowPartialResults: true', if some shard down, find succeeds with partial results");
+ findRes = coll.runCommand({find: collName, allowPartialResults: true});
+ assert.commandWorked(findRes);
+ assert.eq(nDocs / 2, findRes.cursor.firstBatch.length);
+}
jsTest.log("The allowPartialResults option does not currently apply to aggregation.");
assert.commandFailedWithCode(coll.runCommand({
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index edf537d4ed1..76a73e9c52b 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -5,9 +5,22 @@
// Checking UUID consistency involves talking to shards, but this test shuts down shards.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-// TODO: SERVER-33597 remove shardAsReplicaSet: false
-var st = new ShardingTest(
- {shards: 3, mongos: 1, other: {mongosOptions: {verbose: 2}, shardAsReplicaSet: false}});
+var checkDocCount = function(coll, returnPartialFlag, shardsDown, expectedCount) {
+ if (jsTestOptions().mongosBinVersion == "last-stable" && shardsDown) {
+ // In v4.2, mongos was updated to swallow FailedToSatisfyReadPreference errors if
+ // allowPartialResults is true, allowing allowPartialResults to work with replica set shards
+ // (see SERVER-33597 for details). So when the mongos version is v4.0, the command should
+ // fail.
+ var error = assert.throws(function() {
+ coll.find({}, {}, 0, 0, 0, returnPartialFlag).itcount();
+ });
+ assert(ErrorCodes.FailedToSatisfyReadPreference == error.code);
+ } else {
+ assert.eq(expectedCount, coll.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+ }
+};
+
+var st = new ShardingTest({shards: 3, mongos: 1, other: {mongosOptions: {verbose: 2}}});
// Stop balancer, we're doing our own manual chunk distribution
st.stopBalancer();
@@ -18,19 +31,20 @@ var admin = mongos.getDB("admin");
var collOneShard = mongos.getCollection("foo.collOneShard");
var collAllShards = mongos.getCollection("foo.collAllShards");
-printjson(admin.runCommand({enableSharding: collOneShard.getDB() + ""}));
-printjson(admin.runCommand({movePrimary: collOneShard.getDB() + "", to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({enableSharding: collOneShard.getDB() + ""}));
+assert.commandWorked(
+ admin.runCommand({movePrimary: collOneShard.getDB() + "", to: st.shard0.shardName}));
-printjson(admin.runCommand({shardCollection: collOneShard + "", key: {_id: 1}}));
-printjson(admin.runCommand({shardCollection: collAllShards + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({shardCollection: collOneShard + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({shardCollection: collAllShards + "", key: {_id: 1}}));
-// Split and move the "both shard" collection to both shards
+// Split and move the "all shard" collection to all shards
-printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 0}}));
-printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 1000}}));
-printjson(
+assert.commandWorked(admin.runCommand({split: collAllShards + "", middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({split: collAllShards + "", middle: {_id: 1000}}));
+assert.commandWorked(
admin.runCommand({moveChunk: collAllShards + "", find: {_id: 0}, to: st.shard1.shardName}));
-printjson(
+assert.commandWorked(
admin.runCommand({moveChunk: collAllShards + "", find: {_id: 1000}, to: st.shard2.shardName}));
// Collections are now distributed correctly
@@ -39,8 +53,8 @@ st.printShardingStatus();
var inserts = [{_id: -1}, {_id: 1}, {_id: 1000}];
-collOneShard.insert(inserts);
-assert.writeOK(collAllShards.insert(inserts));
+assert.commandWorked(collOneShard.insert(inserts));
+assert.commandWorked(collAllShards.insert(inserts));
var returnPartialFlag = 1 << 7;
@@ -49,35 +63,35 @@ jsTest.log("All shards up!");
assert.eq(3, collOneShard.find().itcount());
assert.eq(3, collAllShards.find().itcount());
-assert.eq(3, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-assert.eq(3, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+checkDocCount(collOneShard, returnPartialFlag, false, 3);
+checkDocCount(collAllShards, returnPartialFlag, false, 3);
jsTest.log("One shard down!");
-MongoRunner.stopMongod(st.shard2);
+st.rs2.stopSet();
jsTest.log("done.");
-assert.eq(3, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-assert.eq(2, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+checkDocCount(collOneShard, returnPartialFlag, false, 3);
+checkDocCount(collAllShards, returnPartialFlag, true, 2);
jsTest.log("Two shards down!");
-MongoRunner.stopMongod(st.shard1);
+st.rs1.stopSet();
jsTest.log("done.");
-assert.eq(3, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-assert.eq(1, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+checkDocCount(collOneShard, returnPartialFlag, false, 3);
+checkDocCount(collAllShards, returnPartialFlag, true, 1);
jsTest.log("All shards down!");
-MongoRunner.stopMongod(st.shard0);
+st.rs0.stopSet();
jsTest.log("done.");
-assert.eq(0, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-assert.eq(0, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
+checkDocCount(collOneShard, returnPartialFlag, true, 0);
+checkDocCount(collAllShards, returnPartialFlag, true, 0);
jsTest.log("DONE!");
diff --git a/src/mongo/s/query/establish_cursors.cpp b/src/mongo/s/query/establish_cursors.cpp
index a0ff0f6cb07..829c38246d1 100644
--- a/src/mongo/s/query/establish_cursors.cpp
+++ b/src/mongo/s/query/establish_cursors.cpp
@@ -101,6 +101,15 @@ std::vector<RemoteCursor> establishCursors(OperationContext* opCtx,
continue;
}
throw; // Fail this loop.
+ } catch (const ExceptionFor<ErrorCodes::FailedToSatisfyReadPreference>&) {
+ // The errors marked as retriable errors are meant to correspond to the driver's
+ // spec (see SERVER-42908), but targeting a replica set shard can fail with
+ // FailedToSatisfyReadPreference, which is not a retriable error in the driver's
+ // spec, so we swallow it separately here if allowPartialResults is true.
+ if (allowPartialResults) {
+ continue;
+ }
+ throw; // Fail this loop.
}
}
return remoteCursors;