summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2019-09-05 21:01:36 +0000
committerevergreen <evergreen@mongodb.com>2019-09-05 21:01:36 +0000
commit47bc0d69fd8822f5e58b6bd7f8799307ee7159f6 (patch)
tree31ee34054322aed7dffdb34464612ca5f811d3fc /jstests
parent5164447331290ea11bfcad48194317517821c78c (diff)
downloadmongo-47bc0d69fd8822f5e58b6bd7f8799307ee7159f6.tar.gz
SERVER-42691 Modernize mapReduce_inSharded.js and friends
Diffstat (limited to 'jstests')
-rw-r--r--jstests/sharding/mapReduce_inSharded.js87
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js69
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js64
-rw-r--r--jstests/sharding/mapReduce_outSharded.js62
-rw-r--r--jstests/sharding/mapReduce_outSharded_checkUUID.js271
-rw-r--r--jstests/sharding/mr_output_options.js117
6 files changed, 262 insertions, 408 deletions
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
deleted file mode 100644
index 9abe9c922c2..00000000000
--- a/jstests/sharding/mapReduce_inSharded.js
+++ /dev/null
@@ -1,87 +0,0 @@
-(function() {
-"use strict";
-
-var verifyOutput = function(out) {
- printjson(out);
- assert.commandWorked(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, 512, "output count is wrong");
-};
-
-var st = new ShardingTest(
- {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
-st.adminCommand({enablesharding: "mrShard"});
-st.ensurePrimaryShard('mrShard', st.shard1.shardName);
-st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-
-var db = st.getDB("mrShard");
-
-var bulk = db.srcSharded.initializeUnorderedBulkOp();
-for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
-}
-assert.commandWorked(bulk.execute());
-
-function map() {
- emit(this.i, 1);
-}
-function reduce(key, values) {
- return Array.sum(values);
-}
-
-// sharded src
-var suffix = "InSharded";
-
-var out = db.srcSharded.mapReduce(map, reduce, "mrBasic" + suffix);
-verifyOutput(out);
-
-out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
-verifyOutput(out);
-
-out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
-verifyOutput(out);
-
-out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
-verifyOutput(out);
-
-out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
-verifyOutput(out);
-assert(out.results != 'undefined', "no results for inline");
-
-// Ensure that mapReduce with a sharded input collection can accept the collation option.
-out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}, collation: {locale: "en_US"}});
-verifyOutput(out);
-assert(out.results != 'undefined', "no results for inline with collation");
-
-out = db.srcSharded.mapReduce(
- map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
-verifyOutput(out);
-
-out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
- map: map,
- reduce: reduce,
- out: "mrBasic" +
- "srcSharded",
-});
-verifyOutput(out);
-
-// Ensure that the collation option is propagated to the shards. This uses a case-insensitive
-// collation, and the query seeding the mapReduce should only match the document if the
-// collation is passed along to the shards.
-assert.commandWorked(db.srcSharded.remove({}));
-assert.eq(db.srcSharded.find().itcount(), 0);
-assert.commandWorked(db.srcSharded.insert({i: 0, j: 0, str: "FOO"}));
-out = db.srcSharded.mapReduce(
- map,
- reduce,
- {out: {inline: 1}, query: {str: "foo"}, collation: {locale: "en_US", strength: 2}});
-assert.commandWorked(out);
-assert.eq(out.counts.input, 1);
-st.stop();
-})();
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
deleted file mode 100644
index e8d2c44b94e..00000000000
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ /dev/null
@@ -1,69 +0,0 @@
-(function() {
-"use strict";
-
-var verifyOutput = function(out) {
- printjson(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, 512, "output count is wrong");
-};
-
-var st = new ShardingTest(
- {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
-var admin = st.s0.getDB('admin');
-
-assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
-st.ensurePrimaryShard('mrShard', st.shard1.shardName);
-assert.commandWorked(admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
-
-var db = st.s0.getDB("mrShard");
-
-var bulk = db.srcSharded.initializeUnorderedBulkOp();
-for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
-}
-assert.commandWorked(bulk.execute());
-
-function map() {
- emit(this.i, 1);
-}
-function reduce(key, values) {
- return Array.sum(values);
-}
-
-// sharded src sharded dst
-var suffix = "InShardedOutSharded";
-
-var out =
- db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
-verifyOutput(out);
-
-out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
-verifyOutput(out);
-
-out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
-verifyOutput(out);
-
-out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
-verifyOutput(out);
-assert(out.results != 'undefined', "no results for inline");
-
-out = db.srcSharded.mapReduce(
- map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
-verifyOutput(out);
-
-out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
- map: map,
- reduce: reduce,
- out: "mrBasic" +
- "srcSharded",
-});
-verifyOutput(out);
-
-st.stop();
-})();
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
deleted file mode 100644
index 004db315f97..00000000000
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ /dev/null
@@ -1,64 +0,0 @@
-var verifyOutput = function(out) {
- printjson(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, 512, "output count is wrong");
-};
-
-var st = new ShardingTest(
- {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
-st.adminCommand({enablesharding: "mrShard"});
-st.ensurePrimaryShard('mrShard', st.shard1.shardName);
-st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-
-var db = st.getDB("mrShard");
-
-var bulk = db.srcNonSharded.initializeUnorderedBulkOp();
-for (j = 0; j < 100; j++) {
- for (i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
-}
-assert.commandWorked(bulk.execute());
-
-function map() {
- emit(this.i, 1);
-}
-function reduce(key, values) {
- return Array.sum(values);
-}
-
-// non-sharded in/out
-var suffix = "";
-
-out = db.srcNonSharded.mapReduce(map, reduce, "mrBasic" + suffix);
-verifyOutput(out);
-
-out = db.srcNonSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
-verifyOutput(out);
-
-out = db.srcNonSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
-verifyOutput(out);
-
-out = db.srcNonSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
-verifyOutput(out);
-
-out = db.srcNonSharded.mapReduce(map, reduce, {out: {inline: 1}});
-verifyOutput(out);
-assert(out.results != 'undefined', "no results for inline");
-
-out = db.srcNonSharded.mapReduce(
- map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
-verifyOutput(out);
-
-out = db.runCommand({
- mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
- map: map,
- reduce: reduce,
- out: "mrBasic" +
- "srcNonSharded",
-});
-verifyOutput(out);
-st.stop();
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
deleted file mode 100644
index 60a6ab8c0d0..00000000000
--- a/jstests/sharding/mapReduce_outSharded.js
+++ /dev/null
@@ -1,62 +0,0 @@
-var verifyOutput = function(out) {
- printjson(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, 512, "output count is wrong");
-};
-
-var st = new ShardingTest(
- {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
-st.adminCommand({enablesharding: "mrShard"});
-st.ensurePrimaryShard('mrShard', st.shard1.shardName);
-st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-
-var db = st.getDB("mrShard");
-
-var bulk = db.srcNonSharded.initializeUnorderedBulkOp();
-for (j = 0; j < 100; j++) {
- for (i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
-}
-assert.commandWorked(bulk.execute());
-
-function map() {
- emit(this.i, 1);
-}
-function reduce(key, values) {
- return Array.sum(values);
-}
-
-// non sharded src sharded dst
-var suffix = "OutSharded";
-
-out =
- db.srcNonSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
-verifyOutput(out);
-
-out = db.srcNonSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
-verifyOutput(out);
-
-out = db.srcNonSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
-verifyOutput(out);
-
-out = db.srcNonSharded.mapReduce(map, reduce, {out: {inline: 1}});
-verifyOutput(out);
-assert(out.results != 'undefined', "no results for inline");
-
-out = db.srcNonSharded.mapReduce(
- map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
-verifyOutput(out);
-
-out = db.runCommand({
- mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
- map: map,
- reduce: reduce,
- out: "mrBasic" +
- "srcNonSharded",
-});
-verifyOutput(out);
-st.stop();
diff --git a/jstests/sharding/mapReduce_outSharded_checkUUID.js b/jstests/sharding/mapReduce_outSharded_checkUUID.js
index 5e4386da1d2..03d3fcf59a2 100644
--- a/jstests/sharding/mapReduce_outSharded_checkUUID.js
+++ b/jstests/sharding/mapReduce_outSharded_checkUUID.js
@@ -2,148 +2,167 @@
"use strict";
load("jstests/libs/uuid_util.js");
-var verifyOutput = function(out, output) {
- printjson(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, output, "output count is wrong");
-};
-
-var assertCollectionNotOnShard = function(db, coll) {
- var listCollsRes = db.runCommand({listCollections: 1, filter: {name: coll}});
+function assertCollectionNotOnShard(db, coll) {
+ const listCollsRes = db.runCommand({listCollections: 1, filter: {name: coll}});
assert.commandWorked(listCollsRes);
assert.neq(undefined, listCollsRes.cursor);
assert.neq(undefined, listCollsRes.cursor.firstBatch);
assert.eq(0, listCollsRes.cursor.firstBatch.length);
-};
-
-var st = new ShardingTest({shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1}});
+}
-var admin = st.s0.getDB('admin');
+const st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1}});
+const testDB = st.s0.getDB("mrShard");
+const inputColl = testDB.srcSharded;
-assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
-st.ensurePrimaryShard('mrShard', st.shard1.shardName);
-assert.commandWorked(admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
+st.adminCommand({enableSharding: testDB.getName()});
+st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
+st.adminCommand({shardCollection: inputColl.getFullName(), key: {_id: 1}});
-var db = st.s0.getDB("mrShard");
+const nDistinctKeys = 512;
+const nValuesPerKey = 100;
+const nTotalDocs = nDistinctKeys * nValuesPerKey;
-var bulk = db.srcSharded.initializeUnorderedBulkOp();
-for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
+const bulk = inputColl.initializeUnorderedBulkOp();
+for (let key = 0; key < nDistinctKeys; key++) {
+ for (let value = 0; value < nValuesPerKey; value++) {
+ bulk.insert({key: key, value: value});
}
}
assert.commandWorked(bulk.execute());
-function map() {
- emit(this.i, 1);
+function verifyOutput(mrOutput, expectedNOutputDocs) {
+ assert.commandWorked(mrOutput);
+ assert.eq(mrOutput.counts.input, nTotalDocs, `input count is wrong: ${tojson(mrOutput)}`);
+ assert.eq(mrOutput.counts.emit, nTotalDocs, `emit count is wrong: ${tojson(mrOutput)}`);
+ assert.gt(
+ mrOutput.counts.reduce, nValuesPerKey - 1, `reduce count is wrong: ${tojson(mrOutput)}`);
+ assert.eq(
+ mrOutput.counts.output, expectedNOutputDocs, `output count is wrong: ${tojson(mrOutput)}`);
+}
+
+function mapFn() {
+ emit(this.key, 1);
}
-function reduce(key, values) {
+function reduceFn(key, values) {
return Array.sum(values);
}
-// sharded src sharded dst
-var suffix = "InShardedOutSharded";
-
-// Check that merge to an existing empty sharded collection works and creates a new UUID after
-// M/R
-st.adminCommand({shardcollection: "mrShard.outSharded", key: {"_id": 1}});
-var origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
-var out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
-verifyOutput(out, 512);
-var newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
-assert.neq(origUUID, newUUID);
-
-// Shard1 is the primary shard and only one chunk should have been written, so the chunk with
-// the new UUID should have been written to it.
-assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
-
-// Shard0 should not have any chunks from the output collection because all shards should have
-// returned an empty split point list in the first phase of the mapReduce, since the reduced
-// data size is far less than the chunk size setting of 1MB.
-assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
-
-// Check that merge to an existing sharded collection that has data on all shards works and that
-// the collection uses the same UUID after M/R
-assert.commandWorked(admin.runCommand({split: "mrShard.outSharded", middle: {"_id": 2000}}));
-assert.commandWorked(admin.runCommand(
- {moveChunk: "mrShard.outSharded", find: {"_id": 2000}, to: st.shard0.shardName}));
-assert.commandWorked(st.s.getCollection("mrShard.outSharded").insert({_id: 1000}));
-assert.commandWorked(st.s.getCollection("mrShard.outSharded").insert({_id: 2001}));
-origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
-
-out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
-verifyOutput(out, 514);
-
-newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
-assert.eq(origUUID, newUUID);
-assert.eq(newUUID, getUUIDFromListCollections(st.shard0.getDB("mrShard"), "outSharded"));
-assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
-
-// Check that replace to an existing sharded collection has data on all shards works and that
-// the collection creates a new UUID after M/R.
-origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
-out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "outSharded", sharded: true}});
-verifyOutput(out, 512);
-
-newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
-assert.neq(origUUID, newUUID);
-
-// Shard1 is the primary shard and only one chunk should have been written, so the chunk with
-// the new UUID should have been written to it.
-assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
-
-// Shard0 should not have any chunks from the output collection because all shards should have
-// returned an empty split point list in the first phase of the mapReduce, since the reduced
-// data size is far less than the chunk size setting of 1MB.
-assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
-
-// Check that reduce to an existing unsharded collection fails when `sharded: true`.
-assert.commandWorked(db.runCommand({create: "reduceUnsharded"}));
-assert.commandFailed(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {reduce: "reduceUnsharded", sharded: true}
-}));
-
-assert.commandWorked(db.reduceUnsharded.insert({x: 1}));
-assert.commandFailed(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {reduce: "reduceUnsharded", sharded: true}
-}));
-
-// Check that replace to an existing unsharded collection works when `sharded: true`.
-assert.commandWorked(db.runCommand({create: "replaceUnsharded"}));
-origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
-
-assert.commandWorked(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {replace: "replaceUnsharded", sharded: true}
-}));
-
-newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
-assert.neq(origUUID, newUUID);
-assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
-
-assert.commandWorked(db.replaceUnsharded.insert({x: 1}));
-origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
-
-assert.commandWorked(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {replace: "replaceUnsharded", sharded: true}
-}));
-
-newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
-assert.neq(origUUID, newUUID);
-assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
+(function testShardedOutput() {
+ // Check that merge to an existing empty sharded collection works and creates a new UUID after
+ // M/R
+ const outputColl = testDB[inputColl.getName() + "Out"];
+ st.adminCommand({shardCollection: outputColl.getFullName(), key: {_id: 1}});
+ let origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
+ let out = testDB.srcSharded.mapReduce(
+ mapFn, reduceFn, {out: {merge: outputColl.getName(), sharded: true}});
+ verifyOutput(out, nDistinctKeys);
+ let newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
+ assert.neq(origUUID, newUUID);
+
+ // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
+ // the new UUID should have been written to it.
+ assert.eq(newUUID,
+ getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
+
+ // Shard0 should not have any chunks from the output collection because all shards should have
+ // returned an empty split point list in the first phase of the mapReduce, since the reduced
+ // data size is far less than the chunk size setting of 1MB.
+ assertCollectionNotOnShard(st.shard0.getDB(testDB.getName()), outputColl.getName());
+
+ // Check that merge to an existing sharded collection that has data on all shards works and that
+ // the collection uses the same UUID after M/R
+ st.adminCommand({split: outputColl.getFullName(), middle: {"_id": 2000}});
+ st.adminCommand(
+ {moveChunk: outputColl.getFullName(), find: {"_id": 2000}, to: st.shard0.shardName});
+ assert.commandWorked(outputColl.insert([{_id: 1000}, {_id: 2001}]));
+ origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
+
+ out = testDB.srcSharded.mapReduce(
+ mapFn, reduceFn, {out: {merge: outputColl.getName(), sharded: true}});
+ verifyOutput(out, nDistinctKeys + 2);
+
+ newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
+ assert.eq(origUUID, newUUID);
+ assert.eq(newUUID,
+ getUUIDFromListCollections(st.shard0.getDB(testDB.getName()), outputColl.getName()));
+ assert.eq(newUUID,
+ getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
+
+ // Check that replace to an existing sharded collection has data on all shards works and that
+ // the collection creates a new UUID after M/R.
+ origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
+ out = testDB.srcSharded.mapReduce(
+ mapFn, reduceFn, {out: {replace: outputColl.getName(), sharded: true}});
+ verifyOutput(out, nDistinctKeys);
+
+ newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
+ assert.neq(origUUID, newUUID);
+
+ // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
+ // the new UUID should have been written to it.
+ assert.eq(newUUID,
+ getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
+
+ // Shard0 should not have any chunks from the output collection because all shards should have
+ // returned an empty split point list in the first phase of the mapReduce, since the reduced
+ // data size is far less than the chunk size setting of 1MB.
+ assertCollectionNotOnShard(st.shard0.getDB(testDB.getName()), outputColl.getName());
+}());
+
+(function testUnshardedOutputColl() {
+ // Check that reduce to an existing unsharded collection fails when `sharded: true`.
+ const reduceOutput = testDB.reduceUnsharded;
+ assert.commandWorked(testDB.runCommand({create: reduceOutput.getName()}));
+ assert.commandFailed(testDB.runCommand({
+ mapReduce: inputColl.getName(),
+ map: mapFn,
+ reduce: reduceFn,
+ out: {reduce: reduceOutput.getName(), sharded: true}
+ }));
+
+ assert.commandWorked(testDB.reduceUnsharded.insert({x: 1}));
+ assert.commandFailed(testDB.runCommand({
+ mapReduce: inputColl.getName(),
+ map: mapFn,
+ reduce: reduceFn,
+ out: {reduce: reduceOutput.getName(), sharded: true}
+ }));
+
+ // Check that replace to an existing unsharded collection works when `sharded: true`.
+ const replaceOutput = testDB.replaceUnsharded;
+ assert.commandWorked(testDB.runCommand({create: replaceOutput.getName()}));
+ let origUUID =
+ getUUIDFromListCollections(st.s.getDB(testDB.getName()), replaceOutput.getName());
+
+ assert.commandWorked(testDB.runCommand({
+ mapReduce: inputColl.getName(),
+ map: mapFn,
+ reduce: reduceFn,
+ out: {replace: replaceOutput.getName(), sharded: true}
+ }));
+
+ let newUUID = getUUIDFromConfigCollections(st.s, replaceOutput.getFullName());
+ assert.neq(origUUID, newUUID);
+ assert.eq(
+ newUUID,
+ getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), replaceOutput.getName()));
+
+ assert.commandWorked(testDB.replaceUnsharded.insert({x: 1}));
+ origUUID = getUUIDFromListCollections(st.s.getDB(testDB.getName()), replaceOutput.getName());
+
+ assert.commandWorked(testDB.runCommand({
+ mapReduce: inputColl.getName(),
+ map: mapFn,
+ reduce: reduceFn,
+ out: {replace: replaceOutput.getName(), sharded: true}
+ }));
+
+ newUUID = getUUIDFromConfigCollections(st.s, replaceOutput.getFullName());
+ assert.neq(origUUID, newUUID);
+ assert.eq(
+ newUUID,
+ getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), replaceOutput.getName()));
+}());
st.stop();
})();
diff --git a/jstests/sharding/mr_output_options.js b/jstests/sharding/mr_output_options.js
new file mode 100644
index 00000000000..fb992f219cb
--- /dev/null
+++ b/jstests/sharding/mr_output_options.js
@@ -0,0 +1,117 @@
+// Tests that the mapReduce command works correctly under all combinations of the input and output
+// collections being sharded or unsharded.
+(function() {
+"use strict";
+
+const st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+
+const testDB = st.getDB("mrShard");
+const inputColl = testDB.srcSharded;
+
+st.adminCommand({enableSharding: testDB.getName()});
+st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
+
+const nDistinctKeys = 512;
+const nValuesPerKey = 100;
+
+function seedCollection() {
+ const bulk = inputColl.initializeUnorderedBulkOp();
+ for (let key = 0; key < nDistinctKeys; key++) {
+ for (let value = 0; value < nValuesPerKey; value++) {
+ bulk.insert({key: key, value: value});
+ }
+ }
+ assert.commandWorked(bulk.execute());
+}
+
+function verifyOutput(mrOutput) {
+ assert.commandWorked(mrOutput);
+ const nTotalDocs = nDistinctKeys * nValuesPerKey;
+ assert.eq(mrOutput.counts.input, nTotalDocs, `input count is wrong: ${tojson(mrOutput)}`);
+ assert.eq(mrOutput.counts.emit, nTotalDocs, `emit count is wrong: ${tojson(mrOutput)}`);
+ assert.gt(
+ mrOutput.counts.reduce, nValuesPerKey - 1, `reduce count is wrong: ${tojson(mrOutput)}`);
+ assert.eq(mrOutput.counts.output, nDistinctKeys, `output count is wrong: ${tojson(mrOutput)}`);
+}
+
+function mapFn() {
+ emit(this.key, 1);
+}
+function reduceFn(key, values) {
+ return Array.sum(values);
+}
+
+function testMrOutput({inputSharded, outputSharded}) {
+ inputColl.drop();
+ if (inputSharded) {
+ st.adminCommand({shardCollection: inputColl.getFullName(), key: {_id: "hashed"}});
+ }
+ seedCollection();
+ const outputColl = testDB[inputColl.getName() + "Out"];
+ outputColl.drop();
+ if (outputSharded) {
+ st.adminCommand({shardCollection: outputColl.getFullName(), key: {_id: "hashed"}});
+ }
+
+ function runMRTestWithOutput(outOptions) {
+ verifyOutput(inputColl.mapReduce(mapFn, reduceFn, outOptions));
+ }
+
+ runMRTestWithOutput({out: {merge: outputColl.getName(), sharded: outputSharded}});
+
+ assert.commandWorked(outputColl.remove({}));
+ runMRTestWithOutput({out: {reduce: outputColl.getName(), sharded: outputSharded}});
+ // Test the same thing using runCommand directly.
+ verifyOutput(testDB.runCommand({
+ mapReduce: inputColl.getName(),
+ map: mapFn,
+ reduce: reduceFn,
+ out: {reduce: outputColl.getName(), sharded: outputSharded}
+ }));
+
+ const out = inputColl.mapReduce(mapFn, reduceFn, {out: {inline: 1}});
+ verifyOutput(out);
+ assert(out.results != 'undefined', "no results for inline");
+
+ if (!outputSharded) {
+ // We don't support replacing an existing sharded collection.
+ runMRTestWithOutput(outputColl.getName());
+ runMRTestWithOutput({out: {replace: outputColl.getName(), sharded: outputSharded}});
+ runMRTestWithOutput(
+ {out: {replace: outputColl.getName(), sharded: outputSharded, db: "mrShardOtherDB"}});
+ verifyOutput(testDB.runCommand({
+ mapReduce: inputColl.getName(),
+ map: mapFn,
+ reduce: reduceFn,
+ out: {replace: outputColl.getName(), sharded: outputSharded}
+ }));
+ }
+}
+
+testMrOutput({inputSharded: false, outputSharded: false});
+testMrOutput({inputSharded: false, outputSharded: true});
+testMrOutput({inputSharded: true, outputSharded: false});
+testMrOutput({inputSharded: true, outputSharded: true});
+
+// Ensure that mapReduce with a sharded input collection can accept the collation option.
+let out = inputColl.mapReduce(mapFn, reduceFn, {out: {inline: 1}, collation: {locale: "en_US"}});
+verifyOutput(out);
+assert(out.results != 'undefined', "no results for inline with collation");
+
+assert.commandWorked(inputColl.remove({}));
+
+// Ensure that the collation option is propagated to the shards. This uses a case-insensitive
+// collation, and the query seeding the mapReduce should only match the document if the
+// collation is passed along to the shards.
+assert.eq(inputColl.find().itcount(), 0);
+assert.commandWorked(inputColl.insert({key: 0, value: 0, str: "FOO"}));
+out = inputColl.mapReduce(
+ mapFn,
+ reduceFn,
+ {out: {inline: 1}, query: {str: "foo"}, collation: {locale: "en_US", strength: 2}});
+assert.commandWorked(out);
+assert.eq(out.counts.input, 1);
+assert.eq(out.results, [{_id: 0, value: 1}]);
+
+st.stop();
+})();