summaryrefslogtreecommitdiff
path: root/jstests/sharding/out_does_not_force_merge.js
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2018-11-29 17:39:33 -0500
committerCharlie Swanson <charlie.swanson@mongodb.com>2019-01-16 10:09:59 -0500
commit0cb2195939494660334db8e9d0a52509caeb621c (patch)
tree2d71e10270020f2e7ea8bf3bfb812b13f83abbca /jstests/sharding/out_does_not_force_merge.js
parentd29f140ae0e69057d8b1801f4d2a993612fa5bcb (diff)
downloadmongo-0cb2195939494660334db8e9d0a52509caeb621c.tar.gz
SERVER-38311 Adjust $out merging strategy
Diffstat (limited to 'jstests/sharding/out_does_not_force_merge.js')
-rw-r--r--jstests/sharding/out_does_not_force_merge.js62
1 files changed, 62 insertions, 0 deletions
diff --git a/jstests/sharding/out_does_not_force_merge.js b/jstests/sharding/out_does_not_force_merge.js
new file mode 100644
index 00000000000..01585fbe518
--- /dev/null
+++ b/jstests/sharding/out_does_not_force_merge.js
@@ -0,0 +1,62 @@
+// Tests that an $out stage does not force a pipeline to split into a "shards part" and a "merging
+// part" if no other stage in the pipeline would force such a split.
+(function() {
+ "use strict";
+
+ const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+
+ const mongosDB = st.s.getDB("test_db");
+
+ const inColl = mongosDB["inColl"];
+ // Two different output collections will be sharded by different keys.
+ const outCollById = mongosDB["outCollById"];
+ const outCollBySK = mongosDB["outCollBySK"];
+ st.shardColl(outCollById, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
+ st.shardColl(outCollBySK, {sk: 1}, {sk: 500}, {sk: 500}, mongosDB.getName());
+
+ const numDocs = 1000;
+
+ // Shard the input collection.
+ st.shardColl(inColl, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
+
+ // Insert some data to the input collection.
+ const bulk = inColl.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, sk: numDocs - i});
+ }
+ assert.commandWorked(bulk.execute());
+
+ function assertOutRunsOnShards(explain) {
+ assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
+ assert(explain.splitPipeline.hasOwnProperty("shardsPart"), tojson(explain));
+ assert.eq(
+ explain.splitPipeline.shardsPart.filter(stage => stage.hasOwnProperty("$out")).length,
+ 1,
+ tojson(explain));
+ assert(explain.splitPipeline.hasOwnProperty("mergerPart"), tojson(explain));
+ assert.eq([], explain.splitPipeline.mergerPart, tojson(explain));
+ }
+
+ // Test that a simple $out can run in parallel. Note that we still expect a 'splitPipeline' in
+ // the explain output, but the merging half should be empty to indicate that the entire thing is
+ // executing in parallel on the shards.
+ let explain =
+ inColl.explain().aggregate([{$out: {to: outCollById.getName(), mode: "insertDocuments"}}]);
+ assertOutRunsOnShards(explain);
+ // Actually execute the pipeline and make sure it works as expected.
+ assert.eq(outCollById.find().itcount(), 0);
+ inColl.aggregate([{$out: {to: outCollById.getName(), mode: "insertDocuments"}}]);
+ assert.eq(outCollById.find().itcount(), numDocs);
+
+ // Test the same thing but in a pipeline where the output collection's shard key differs from
+ // the input collection's.
+ explain =
+ inColl.explain().aggregate([{$out: {to: outCollBySK.getName(), mode: "insertDocuments"}}]);
+ assertOutRunsOnShards(explain);
+ // Again, test that execution works as expected.
+ assert.eq(outCollBySK.find().itcount(), 0);
+ inColl.aggregate([{$out: {to: outCollBySK.getName(), mode: "insertDocuments"}}]);
+ assert.eq(outCollBySK.find().itcount(), numDocs);
+
+ st.stop();
+}());