summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorMartin Neupauer <martin.neupauer@mongodb.com>2019-01-03 10:45:41 -0500
committerMartin Neupauer <martin.neupauer@mongodb.com>2019-01-23 10:09:44 -0500
commit9388a033c308f29e3629f1184403b578792a8e21 (patch)
treec1707a3ec1ba1091b0c52962674f793b76be4a8e /jstests
parenta5862c03848b0c6cd5844be1ac3b8d3228217d39 (diff)
downloadmongo-9388a033c308f29e3629f1184403b578792a8e21.tar.gz
SERVER-38364 Test cluster configuration changes during $out
Diffstat (limited to 'jstests')
-rw-r--r--jstests/sharding/out_with_drop_shard.js102
-rw-r--r--jstests/sharding/out_with_move_primary.js128
2 files changed, 230 insertions, 0 deletions
diff --git a/jstests/sharding/out_with_drop_shard.js b/jstests/sharding/out_with_drop_shard.js
new file mode 100644
index 00000000000..b61c74def98
--- /dev/null
+++ b/jstests/sharding/out_with_drop_shard.js
@@ -0,0 +1,102 @@
+// Tests that the $out aggregation stage is resilient to drop shard in both the source and
+// output collection during execution.
+(function() {
+ 'use strict';
+
+ const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+ // We need the balancer to remove a shard.
+ st.startBalancer();
+
+ const mongosDB = st.s.getDB(jsTestName());
+ const sourceColl = mongosDB["source"];
+ const targetColl = mongosDB["target"];
+
+ function setAggHang(mode) {
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
+ }
+
+ function removeShard(shardName) {
+ var res = st.s.adminCommand({removeShard: shardName});
+ assert.commandWorked(res);
+ assert.eq('started', res.state);
+ assert.soon(function() {
+ res = st.s.adminCommand({removeShard: shardName});
+ assert.commandWorked(res);
+ return ('completed' === res.state);
+ }, "removeShard never completed for shard " + shardName);
+ }
+
+ function addShard(shard) {
+ assert.commandWorked(st.s.adminCommand({addShard: shard}));
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: sourceColl.getFullName(), find: {shardKey: 0}, to: shard}));
+ }
+ function runOutWithMode(outMode, shardedColl, dropShard) {
+ // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
+ setAggHang("alwaysOn");
+
+ let comment = outMode + "_" + shardedColl.getName() + "_1";
+ let outFn = `
+ const sourceDB = db.getSiblingDB(jsTestName());
+ const sourceColl = sourceDB["${sourceColl.getName()}"];
+ let cmdRes = sourceDB.runCommand({
+ aggregate: "${sourceColl.getName()}",
+ pipeline: [{$out: {to: "${targetColl.getName()}", mode: "${outMode}"}}],
+ cursor: {},
+ comment: "${comment}"
+ });
+ assert.commandWorked(cmdRes);
+ `;
+
+ // Start the $out aggregation in a parallel shell.
+ let outShell = startParallelShell(outFn, st.s.port);
+
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(
+ () => mongosDB
+ .currentOp({
+ $or: [
+ {op: "command", "command.comment": comment},
+ {op: "getmore", "cursor.originatingCommand.comment": comment}
+ ]
+ })
+ .inprog.length >= 1,
+ () => tojson(mongosDB.currentOp().inprog));
+
+ if (dropShard) {
+ removeShard(st.shard0.shardName);
+ } else {
+ addShard(st.rs0.getURL());
+ }
+ // Unset the failpoint to unblock the $out and join with the parallel shell.
+ setAggHang("off");
+ outShell();
+
+ // Verify that the $out succeeded.
+ assert.eq(2, targetColl.find().itcount());
+
+ assert.commandWorked(targetColl.remove({}));
+ }
+
+ // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+ st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+
+ // Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
+ st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+
+ // Write two documents in the source collection that should target the two chunks in the target
+ // collection.
+ assert.commandWorked(sourceColl.insert({shardKey: -1}));
+ assert.commandWorked(sourceColl.insert({shardKey: 1}));
+
+ // Note that mode "replaceCollection" is not supported with an existing sharded output
+ // collection.
+ runOutWithMode("insertDocuments", targetColl, true);
+ runOutWithMode("insertDocuments", targetColl, false);
+ runOutWithMode("replaceDocuments", targetColl, true);
+
+ st.stop();
+})();
diff --git a/jstests/sharding/out_with_move_primary.js b/jstests/sharding/out_with_move_primary.js
new file mode 100644
index 00000000000..f3f2f5568a1
--- /dev/null
+++ b/jstests/sharding/out_with_move_primary.js
@@ -0,0 +1,128 @@
+// Tests that the $out aggregation stage is resilient to move primary in both the source and
+// output collection during execution.
+(function() {
+ 'use strict';
+
+ const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+
+ const mongosDB = st.s.getDB(jsTestName());
+ const sourceColl = mongosDB["source"];
+ const targetColl = mongosDB["target"];
+
+ function setAggHang(mode) {
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
+ }
+
+ function runOutWithMode(outMode, shardedColl, expectFailCode) {
+ // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
+ setAggHang("alwaysOn");
+
+ // Set the primary shard.
+ st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+ let comment = outMode + "_" + shardedColl.getName() + "_1";
+ let outFn = `
+ const sourceDB = db.getSiblingDB(jsTestName());
+ const sourceColl = sourceDB["${sourceColl.getName()}"];
+ let cmdRes = sourceDB.runCommand({
+ aggregate: "${sourceColl.getName()}",
+ pipeline: [{$out: {to: "${targetColl.getName()}", mode: "${outMode}"}}],
+ cursor: {},
+ comment: "${comment}"
+ });
+ if (${expectFailCode} !== undefined) {
+ assert.commandFailedWithCode(cmdRes, ${expectFailCode});
+ } else {
+ assert.commandWorked(cmdRes);
+ }
+ `;
+
+ // Start the $out aggregation in a parallel shell.
+ let outShell = startParallelShell(outFn, st.s.port);
+
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(
+ () => mongosDB
+ .currentOp({
+ $or: [
+ {op: "command", "command.comment": comment},
+ {op: "getmore", "cursor.originatingCommand.comment": comment}
+ ]
+ })
+ .inprog.length == 1,
+ () => tojson(mongosDB.currentOp().inprog));
+
+ // Migrate the primary shard from shard0 to shard1.
+ st.ensurePrimaryShard(mongosDB.getName(), st.shard1.shardName);
+
+ // Unset the failpoint to unblock the $out and join with the parallel shell.
+ setAggHang("off");
+ outShell();
+
+ // Verify that the $out succeeded.
+ if (expectFailCode === undefined) {
+ assert.eq(2, targetColl.find().itcount());
+ }
+
+ assert.commandWorked(targetColl.remove({}));
+ }
+
+ // The source collection is unsharded.
+ assert.commandWorked(sourceColl.insert({shardKey: -1}));
+ assert.commandWorked(sourceColl.insert({shardKey: 1}));
+
+ // Note that the actual error is NamespaceNotFound but it is wrapped in a generic error code by
+ // mistake.
+ runOutWithMode("replaceCollection", sourceColl, ErrorCodes.CommandFailed);
+ runOutWithMode("replaceDocuments", sourceColl);
+ runOutWithMode("insertDocuments", sourceColl);
+
+ sourceColl.drop();
+
+ // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+ st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+
+ // Write a document to each chunk of the source collection.
+ assert.commandWorked(sourceColl.insert({shardKey: -1}));
+ assert.commandWorked(sourceColl.insert({shardKey: 1}));
+
+ runOutWithMode("replaceCollection", sourceColl, ErrorCodes.CommandFailed);
+ runOutWithMode("replaceDocuments", sourceColl);
+ runOutWithMode("insertDocuments", sourceColl);
+
+ sourceColl.drop();
+
+ // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+ st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+
+ // Write two documents in the source collection that should target the two chunks in the target
+ // collection.
+ assert.commandWorked(sourceColl.insert({shardKey: -1}));
+ assert.commandWorked(sourceColl.insert({shardKey: 1}));
+
+ runOutWithMode("replaceCollection", targetColl, ErrorCodes.CommandFailed);
+ runOutWithMode("replaceDocuments", targetColl);
+ runOutWithMode("insertDocuments", targetColl);
+
+ sourceColl.drop();
+ targetColl.drop();
+
+ // Shard the collections with shard key {shardKey: 1} and split into 2 chunks.
+ st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+ st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+
+ // Write two documents in the source collection that should target the two chunks in the target
+ // collection.
+ assert.commandWorked(sourceColl.insert({shardKey: -1}));
+ assert.commandWorked(sourceColl.insert({shardKey: 1}));
+
+ // Note that mode "replaceCollection" is not supported with an existing sharded output
+ // collection.
+ runOutWithMode("replaceDocuments", targetColl);
+ runOutWithMode("insertDocuments", targetColl);
+
+ st.stop();
+})();