summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorJacob Evans <jacob.evans@mongodb.com>2019-12-11 21:33:49 +0000
committerevergreen <evergreen@mongodb.com>2019-12-11 21:33:49 +0000
commit57acc8b666b8c9dfc34eaf03c226ab26ac225781 (patch)
tree1a0971f0cce77fc5178fc48952770dc164818b22 /jstests
parent3da6513067131a50323f3388b8dc2918da885732 (diff)
downloadmongo-57acc8b666b8c9dfc34eaf03c226ab26ac225781.tar.gz
SERVER-44475 Remove Query Knob
Diffstat (limited to 'jstests')
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_inline.js8
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_interrupt.js5
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge.js5
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce.js3
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace.js5
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js5
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js5
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js3
-rw-r--r--jstests/core/collation.js14
-rw-r--r--jstests/core/constructors.js26
-rw-r--r--jstests/core/find_and_modify2.js18
-rw-r--r--jstests/core/function_string_representations.js11
-rw-r--r--jstests/core/index_stats.js2
-rw-r--r--jstests/core/map_reduce_validation.js10
-rw-r--r--jstests/core/mr_agg_explain.js51
-rw-r--r--jstests/core/mr_comments.js26
-rw-r--r--jstests/core/mr_compute_avg.js9
-rw-r--r--jstests/core/mr_correctness.js52
-rw-r--r--jstests/core/mr_fail_invalid_js.js20
-rw-r--r--jstests/core/mr_killop.js3
-rw-r--r--jstests/core/mr_multikey_deduping.js44
-rw-r--r--jstests/core/mr_output_other_db.js9
-rw-r--r--jstests/core/mr_scope.js4
-rw-r--r--jstests/core/mr_use_this_object.js4
-rw-r--r--jstests/core/or4.js26
-rw-r--r--jstests/core/recursion.js2
-rw-r--r--jstests/core/slice1.js8
-rw-r--r--jstests/libs/override_methods/implicitly_shard_accessed_collections.js2
-rw-r--r--jstests/libs/profiler.js10
-rw-r--r--jstests/multiVersion/map_reduce_multiversion_cluster.js9
-rw-r--r--jstests/multiVersion/map_reduce_multiversion_repl_set.js5
-rw-r--r--jstests/noPassthrough/currentop_query.js18
-rw-r--r--jstests/noPassthrough/index_partial_no_explain_cmds.js10
-rw-r--r--jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js88
-rw-r--r--jstests/noPassthroughWithMongod/mr_noscripting.js5
-rw-r--r--jstests/replsets/prepare_conflict_read_concern_behavior.js17
-rw-r--r--jstests/sharding/collation_targeting.js110
-rw-r--r--jstests/sharding/collation_targeting_inherited.js111
-rw-r--r--jstests/sharding/database_versioning_all_commands.js8
-rw-r--r--jstests/sharding/features2.js2
-rw-r--r--jstests/sharding/mapReduce_outSharded_checkUUID.js168
-rw-r--r--jstests/sharding/map_reduce_invalid_output_collection.js36
-rw-r--r--jstests/sharding/map_reduce_invalid_result_set.js6
-rw-r--r--jstests/sharding/mrShardedOutput.js135
-rw-r--r--jstests/sharding/mrShardedOutputAuth.js1
-rw-r--r--jstests/sharding/mr_noscripting.js4
-rw-r--r--jstests/sharding/mr_output_options.js51
-rw-r--r--jstests/sharding/read_pref_cmd.js35
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js2
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js55
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js2
-rw-r--r--jstests/sharding/track_unsharded_collections_check_shard_version.js2
52 files changed, 453 insertions, 817 deletions
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
index 7deea63ccd8..96b71c191f4 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_inline.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
@@ -10,10 +10,7 @@
* Used as the base workload for the other map-reduce workloads.
* @tags: [
* # mapReduce does not support afterClusterTime.
- * does_not_support_causal_consistency,
- * # TODO SERVER-42511: Remove this requires_fcv tag once the internalQueryUseAggMapReduce knob
- * # is removed from the fsm workloads.
- * requires_fcv_44
+ * does_not_support_causal_consistency
* ]
*/
var $config = (function() {
@@ -78,9 +75,6 @@ var $config = (function() {
var res = bulk.execute();
assertAlways.commandWorked(res);
assertAlways.eq(this.numDocs, res.nInserted);
-
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryUseAggMapReduce: true}));
}
return {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js b/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js
index 25cdf96410d..38383283c47 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js
@@ -10,10 +10,7 @@
* @tags: [
* # mapReduce does not support afterClusterTime.
* does_not_support_causal_consistency,
- * uses_curop_agg_stage,
- * # TODO SERVER-42511: Remove this requires_fcv tag once the internalQueryUseAggMapReduce knob
- * # is removed in the fsm workloads.
- * requires_fcv_44
+ * uses_curop_agg_stage
* ]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge.js b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
index 50a63afd71c..05fd0d54910 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
@@ -14,10 +14,7 @@
* Writes the results of each thread to the same collection.
* @tags: [
* # mapReduce does not support afterClusterTime.
- * does_not_support_causal_consistency,
- * # TODO SERVER-42511: Remove this requires_fcv tag once the internalQueryUseAggMapReduce knob
- * # is removed in the fsm workloads.
- * requires_fcv_44
+ * does_not_support_causal_consistency
* ]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
index 173bca4d5d3..20328d6da0b 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
@@ -13,9 +13,6 @@
* @tags: [
* # mapReduce does not support afterClusterTime.
* does_not_support_causal_consistency,
- * # TODO SERVER-42511: Remove this requires_fcv tag once the internalQueryUseAggMapReduce knob
- * # is removed in the fsm workloads.
- * requires_fcv_44,
* ]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace.js b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
index 5ef7a402141..074dad1732a 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
@@ -12,10 +12,7 @@
* collection.
* @tags: [
* # mapReduce does not support afterClusterTime.
- * does_not_support_causal_consistency,
- * # TODO SERVER-42511: Remove this requires_fcv tag once the internalQueryUseAggMapReduce knob
- * # is removed in the fsm workloads.
- * requires_fcv_44,
+ * does_not_support_causal_consistency
* ]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
index 1a705a85181..2b9d5a7f756 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
@@ -11,10 +11,7 @@
* output collection.
* @tags: [
* # mapReduce does not support afterClusterTime.
- * does_not_support_causal_consistency,
- * # TODO SERVER-42511: Remove this requires_fcv tag once the internalQueryUseAggMapReduce knob
- * # is removed in the fsm workloads.
- * requires_fcv_44,
+ * does_not_support_causal_consistency
* ]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
index ef447858a32..60e29554027 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
@@ -12,10 +12,7 @@
* This workload was designed to reproduce SERVER-15539.
* @tags: [
* # mapReduce does not support afterClusterTime.
- * does_not_support_causal_consistency,
- * # TODO SERVER-42511: Remove this requires_fcv tag once the internalQueryUseAggMapReduce knob
- * # is removed in the fsm workloads.
- * requires_fcv_44,
+ * does_not_support_causal_consistency
* ]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js b/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js
index 93a9eefedaf..1fa5acf8fe5 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js
@@ -98,9 +98,6 @@ var $config = extendWorkload($config, function($config, $super) {
cluster.shardCollection(db[this.collWithMigrations], this.shardKey, false);
$super.setup.apply(this, [db, this.collWithMigrations, cluster]);
}
-
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryUseAggMapReduce: true}));
};
return $config;
diff --git a/jstests/core/collation.js b/jstests/core/collation.js
index 51a0f5346af..f7bada2625f 100644
--- a/jstests/core/collation.js
+++ b/jstests/core/collation.js
@@ -957,20 +957,6 @@ assert.eq(null,
// Collation tests for mapReduce.
//
-// mapReduce should return "collection doesn't exist" error when collation specified and
-// collection does not exist.
-coll.drop();
-assert.throws(function() {
- coll.mapReduce(
- function() {
- emit(this.str, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, collation: {locale: "fr"}});
-});
-
// mapReduce should return correct results when collation specified and no indexes exist.
coll.drop();
assert.commandWorked(coll.insert({_id: 1, str: "foo"}));
diff --git a/jstests/core/constructors.js b/jstests/core/constructors.js
index 22d3b277cc9..308baf405fd 100644
--- a/jstests/core/constructors.js
+++ b/jstests/core/constructors.js
@@ -8,6 +8,9 @@
// Takes a list of constructors and returns a new list with an extra entry for each constructor with
// "new" prepended
+(function() {
+"use strict";
+const out = db.map_reduce_constructors_out;
function addConstructorsWithNew(constructorList) {
function prependNew(constructor) {
return "new " + constructor;
@@ -39,7 +42,7 @@ function clientEvalConstructorTest(constructorList) {
function mapReduceConstructorTest(constructorList) {
constructorList = addConstructorsWithNew(constructorList);
- t = db.mr_constructors;
+ const t = db.mr_constructors;
t.drop();
t.save({"partner": 1, "visits": 9});
@@ -49,34 +52,38 @@ function mapReduceConstructorTest(constructorList) {
t.save({"partner": 2, "visits": 41});
t.save({"partner": 2, "visits": 41});
+ let dummy;
constructorList.valid.forEach(function(constructor) {
try {
- m = eval("dummy = function(){ emit( \"test\" , " + constructor + " ) }");
+ const m = eval("dummy = function(){ emit( \"test\" , " + constructor + " ) }");
- r = eval("dummy = function( k , v ){ return { test : " + constructor + " } }");
+ const r = eval("dummy = function( k , v ){ return { test : " + constructor + " } }");
- res = t.mapReduce(m, r, {out: "mr_constructors_out", scope: {xx: 1}});
+ out.drop();
+ assert.commandWorked(
+ t.mapReduce(m, r, {out: {merge: "map_reduce_constructors_out"}, scope: {xx: 1}}));
} catch (e) {
throw ("valid constructor: " + constructor + " failed in mapReduce context: " + e);
}
});
constructorList.invalid.forEach(function(constructor) {
- m = eval("dummy = function(){ emit( \"test\" , " + constructor + " ) }");
+ const m = eval("dummy = function(){ emit( \"test\" , " + constructor + " ) }");
- r = eval("dummy = function( k , v ){ return { test : " + constructor + " } }");
+ const r = eval("dummy = function( k , v ){ return { test : " + constructor + " } }");
assert.throws(function() {
- res = t.mapReduce(m, r, {out: "mr_constructors_out", scope: {xx: 1}});
+ out.drop();
+ t.mapReduce(m, r, {out: {merge: "map_reduce_constructors_out"}, scope: {xx: 1}});
}, [], "invalid constructor did not throw error in mapReduce context: " + constructor);
});
- db.mr_constructors_out.drop();
+ out.drop();
t.drop();
}
function whereConstructorTest(constructorList) {
constructorList = addConstructorsWithNew(constructorList);
- t = db.where_constructors;
+ const t = db.where_constructors;
t.drop();
assert.commandWorked(t.insert({x: 1}));
@@ -291,3 +298,4 @@ whereConstructorTest(uuidConstructors);
whereConstructorTest(md5Constructors);
whereConstructorTest(hexdataConstructors);
whereConstructorTest(dateConstructors);
+})();
diff --git a/jstests/core/find_and_modify2.js b/jstests/core/find_and_modify2.js
index a73dcb5fda5..d34fe5f4281 100644
--- a/jstests/core/find_and_modify2.js
+++ b/jstests/core/find_and_modify2.js
@@ -3,19 +3,23 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.find_and_modify2;
-t.drop();
+(function() {
+"use strict";
-t.insert({_id: 1, i: 0, j: 0});
+const coll = db.find_and_modify2;
+coll.drop();
-out = t.findAndModify({update: {$inc: {i: 1}}, 'new': true, fields: {i: 1}});
+coll.insert({_id: 1, i: 0, j: 0});
+
+let out = coll.findAndModify({update: {$inc: {i: 1}}, 'new': true, fields: {i: 1}});
assert.eq(out, {_id: 1, i: 1});
-out = t.findAndModify({update: {$inc: {i: 1}}, fields: {i: 0}});
+out = coll.findAndModify({update: {$inc: {i: 1}}, fields: {i: 0}});
assert.eq(out, {_id: 1, j: 0});
-out = t.findAndModify({update: {$inc: {i: 1}}, fields: {_id: 0, j: 1}});
+out = coll.findAndModify({update: {$inc: {i: 1}}, fields: {_id: 0, j: 1}});
assert.eq(out, {j: 0});
-out = t.findAndModify({update: {$inc: {i: 1}}, fields: {_id: 0, j: 1}, 'new': true});
+out = coll.findAndModify({update: {$inc: {i: 1}}, fields: {_id: 0, j: 1}, 'new': true});
assert.eq(out, {j: 0});
+})();
diff --git a/jstests/core/function_string_representations.js b/jstests/core/function_string_representations.js
index e724a680c37..9d79bb9c8d4 100644
--- a/jstests/core/function_string_representations.js
+++ b/jstests/core/function_string_representations.js
@@ -12,7 +12,8 @@
(function() {
"use strict";
-var col = db.function_string_representations;
+const col = db.function_string_representations;
+const out = db.map_reduce_example;
col.drop();
assert.commandWorked(col.insert({
_id: "abc123",
@@ -24,12 +25,16 @@ assert.commandWorked(col.insert({
var mapFunction = "function() {emit(this._id, this.price);}";
var reduceFunction = "function(keyCustId, valuesPrices) {return Array.sum(valuesPrices);}";
-assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"}));
+out.drop();
+assert.commandWorked(
+ col.mapReduce(mapFunction, reduceFunction, {out: {merge: "map_reduce_example"}}));
// Provided strings may end with semicolons and/or whitespace
mapFunction += " ; ";
reduceFunction += " ; ";
-assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"}));
+out.drop();
+assert.commandWorked(
+ col.mapReduce(mapFunction, reduceFunction, {out: {merge: "map_reduce_example"}}));
// $where exhibits the same behavior
var whereFunction = "function() {return this.price === 25;}";
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
index 9d1ee063812..6bf31a56ecb 100644
--- a/jstests/core/index_stats.js
+++ b/jstests/core/index_stats.js
@@ -158,7 +158,7 @@ res = db.runCommand({
return val;
},
query: {b: 2},
- out: {inline: true}
+ out: {inline: 1}
});
assert.commandWorked(res);
countB++;
diff --git a/jstests/core/map_reduce_validation.js b/jstests/core/map_reduce_validation.js
index abdb951d816..9b0a28e3ce9 100644
--- a/jstests/core/map_reduce_validation.js
+++ b/jstests/core/map_reduce_validation.js
@@ -2,10 +2,7 @@
// @tags: [
// assumes_no_implicit_collection_creation_after_drop,
// uses_map_reduce_with_temp_collections,
-// does_not_support_stepdowns,
-// # TODO SERVER-42511: Remove this requires_fcv tag once the internalQueryUseAggMapReduce knob
-// # is removed.
-// requires_fcv_44,
+// does_not_support_stepdowns
// ]
(function() {
"use strict";
@@ -50,6 +47,9 @@ assert.commandFailedWithCode(db.runCommand({
ErrorCodes.CommandNotSupported);
// Test that you can output to a different database.
+// Create the other database.
+db.getSiblingDB("mr_validation_other").foo.drop();
+assert.commandWorked(db.getSiblingDB("mr_validation_other").createCollection("foo"));
assert.commandWorked(db.runCommand({
mapReduce: source.getName(),
map: mapFunc,
@@ -126,11 +126,9 @@ if (!FixtureHelpers.isMongos(db)) {
db.runCommand({mapReduce: "sourceView", map: mapFunc, reduce: reduceFunc, out: "foo"}),
ErrorCodes.CommandNotSupportedOnView);
- assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryUseAggMapReduce: true}));
assert.commandFailedWithCode(
db.runCommand({mapReduce: "sourceView", map: mapFunc, reduce: reduceFunc, out: "foo"}),
ErrorCodes.CommandNotSupportedOnView);
- assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryUseAggMapReduce: false}));
}
// Test that mapReduce fails gracefully if the query parameter is the wrong type.
diff --git a/jstests/core/mr_agg_explain.js b/jstests/core/mr_agg_explain.js
index d2fe0053e7a..e2d4e92c955 100644
--- a/jstests/core/mr_agg_explain.js
+++ b/jstests/core/mr_agg_explain.js
@@ -1,8 +1,6 @@
/**
* Tests that running mapReduce with explain behaves as expected.
- *
- * TODO SERVER-42511: Remove 'does_not_support_stepdowns' tag once query knob is removed.
- * @tags: [does_not_support_stepdowns, requires_fcv_44]
+ * @tags: [incompatible_with_embedded]
*/
(function() {
"use strict";
@@ -28,33 +26,28 @@ const mr = {
reduce: reduceFunc,
out: "inline"
};
-try {
- // Succeeds for all modes when using agg map reduce.
- assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryUseAggMapReduce: true}));
- for (let verbosity of ["queryPlanner", "executionStats", "allPlansExecution"]) {
- const results = coll.explain(verbosity).mapReduce(mr);
-
- // Check server info
- assert(results.hasOwnProperty('serverInfo'), results);
- assert.hasFields(results.serverInfo, ['host', 'port', 'version', 'gitVersion']);
-
- const stages = getAggPlanStages(results, "$cursor");
- assert(stages !== null);
-
- // Verify that explain's output contains the fields that we expect.
- // We loop through in the case that explain is run against a sharded cluster.
- for (var i = 0; i < stages.length; i++) {
- const stage = stages[i]["$cursor"];
- if (verbosity != "allPlansExecution") {
- assert(stage.hasOwnProperty(verbosity));
- } else {
- assert(stage.hasOwnProperty("executionStats"));
- const execStats = stage["executionStats"];
- assert(execStats.hasOwnProperty(verbosity));
- }
+// Succeeds for all modes when using agg map reduce.
+for (let verbosity of ["queryPlanner", "executionStats", "allPlansExecution"]) {
+ const results = coll.explain(verbosity).mapReduce(mr);
+
+ // Check server info
+ assert(results.hasOwnProperty('serverInfo'), results);
+ assert.hasFields(results.serverInfo, ['host', 'port', 'version', 'gitVersion']);
+
+ const stages = getAggPlanStages(results, "$cursor");
+ assert(stages !== null);
+
+ // Verify that explain's output contains the fields that we expect.
+ // We loop through in the case that explain is run against a sharded cluster.
+ for (var i = 0; i < stages.length; i++) {
+ const stage = stages[i]["$cursor"];
+ if (verbosity != "allPlansExecution") {
+ assert(stage.hasOwnProperty(verbosity));
+ } else {
+ assert(stage.hasOwnProperty("executionStats"));
+ const execStats = stage["executionStats"];
+ assert(execStats.hasOwnProperty(verbosity));
}
}
-} finally {
- assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryUseAggMapReduce: false}));
}
}());
diff --git a/jstests/core/mr_comments.js b/jstests/core/mr_comments.js
index f919be1e86f..48441078ebb 100644
--- a/jstests/core/mr_comments.js
+++ b/jstests/core/mr_comments.js
@@ -16,18 +16,19 @@ outColl.drop();
assert.commandWorked(coll.insert([{foo: 1}, {foo: 1}, {foo: 2}]));
// Test using comments within the function.
-let res = db.runCommand({
+assert.commandWorked(db.runCommand({
mapreduce: coll.getName(),
map: "// This is a comment\n\n // Emit some stuff\n emit(this.foo, 1)\n",
reduce: function(key, values) {
return Array.sum(values);
},
- out: outColl.getName()
-});
-assert.eq(3, res.counts.emit);
+ out: {merge: outColl.getName()}
+}));
+assert.eq(2, outColl.find().toArray().length);
// Test using a multi-line string literal.
-res = db.runCommand({
+outColl.drop();
+assert.commandWorked(db.runCommand({
mapreduce: coll.getName(),
map: `
// This is a comment
@@ -38,18 +39,19 @@ res = db.runCommand({
reduce: function(key, values) {
return Array.sum(values);
},
- out: outColl.getName()
-});
-assert.eq(3, res.counts.emit);
+ out: {merge: outColl.getName()}
+}));
+assert.eq(2, outColl.find().toArray().length);
// Test that a function passed with a comment in front of it is still recognized.
-res = db.runCommand({
+outColl.drop();
+assert.commandWorked(db.runCommand({
mapreduce: coll.getName(),
map: "// This is a comment\nfunction(){\n // Emit some stuff\n emit(this.foo, 1)\n}\n",
reduce: function(key, values) {
return Array.sum(values);
},
- out: outColl.getName()
-});
-assert.eq(3, res.counts.emit);
+ out: {merge: outColl.getName()}
+}));
+assert.eq(2, outColl.find().toArray().length);
}());
diff --git a/jstests/core/mr_compute_avg.js b/jstests/core/mr_compute_avg.js
index f99c4223d13..a0320679ea0 100644
--- a/jstests/core/mr_compute_avg.js
+++ b/jstests/core/mr_compute_avg.js
@@ -74,12 +74,7 @@ assert.eq(16, resultAsSingleObj.b.avg, () => tojson(resultAsSingleObj));
assert.eq(18, resultAsSingleObj.c.avg, () => tojson(resultAsSingleObj));
outputColl.drop();
-// inline just need to exist - so set it to false to make sure the code is just checking for
-// existence
-// For now, mapReduce is expected to return inline results when an out.inline argument is specified,
-// regardless of integer value specified. TODO SERVER-42511 convert this test into one that verifies
-// specifying anything but 1 fails.
-res = coll.mapReduce(mapFn, reduceFn, {finalize: finalizeFn, out: {inline: 0}});
+res = coll.mapReduce(mapFn, reduceFn, {finalize: finalizeFn, out: {inline: 1}});
assert.commandWorked(res);
resultAsSingleObj = reformat(res);
assert.eq(9, resultAsSingleObj.a.avg, () => tojson(resultAsSingleObj));
@@ -88,7 +83,7 @@ assert.eq(18, resultAsSingleObj.c.avg, () => tojson(resultAsSingleObj));
outputColl.drop();
assert(!("result" in res), () => `Expected inline output with 'results': ${tojson(res)}`);
-res = coll.mapReduce(mapFn, reduceFn, {finalize: finalizeFn, out: {inline: 5}});
+res = coll.mapReduce(mapFn, reduceFn, {finalize: finalizeFn, out: {inline: 1}});
assert.commandWorked(res);
resultAsSingleObj = reformat(res);
assert.eq(9, resultAsSingleObj.a.avg, () => tojson(resultAsSingleObj));
diff --git a/jstests/core/mr_correctness.js b/jstests/core/mr_correctness.js
index 466e5bc1b32..9dc6aaf1c4c 100644
--- a/jstests/core/mr_correctness.js
+++ b/jstests/core/mr_correctness.js
@@ -36,10 +36,13 @@ function reduceObjs(key, values) {
const outColl = db[coll.getName() + "_out"];
outColl.drop();
(function testBasicMapReduce() {
- const res = db.runCommand(
- {mapReduce: coll.getName(), map: mapToObj, reduce: reduceObjs, out: outColl.getName()});
+ const res = db.runCommand({
+ mapReduce: coll.getName(),
+ map: mapToObj,
+ reduce: reduceObjs,
+ out: {merge: outColl.getName()}
+ });
assert.commandWorked(res);
- assert.eq(4, res.counts.input);
assert.eq(res.result, outColl.getName());
assert.eq(
@@ -64,10 +67,9 @@ outColl.drop();
map: mapToObj,
reduce: reduceObjs,
query: {x: {$gt: 2}},
- out: outColl.getName()
+ out: {merge: outColl.getName()}
});
assert.commandWorked(res);
- assert.eq(2, res.counts.input, () => tojson(res));
assert.eq(res.result, outColl.getName());
const keys = {};
for (let result of outColl.find().toArray()) {
@@ -102,10 +104,9 @@ function reduceNumbers(key, values) {
map: mapToNumber,
reduce: reduceNumbers,
query: {x: {$gt: 2}},
- out: outColl.getName()
+ out: {merge: outColl.getName()}
});
assert.commandWorked(res);
- assert.eq(2, res.counts.input, () => tojson(res));
assert.eq(res.result, outColl.getName());
const keys = {};
for (let result of outColl.find().toArray()) {
@@ -125,10 +126,13 @@ function reduceNumbers(key, values) {
}
assert.commandWorked(bulk.execute());
- const res = db.runCommand(
- {mapReduce: coll.getName(), map: mapToObj, reduce: reduceObjs, out: outColl.getName()});
+ const res = db.runCommand({
+ mapReduce: coll.getName(),
+ map: mapToObj,
+ reduce: reduceObjs,
+ out: {merge: outColl.getName()}
+ });
assert.commandWorked(res);
- assert.eq(999, res.counts.input, () => tojson(res));
assert.eq(res.result, outColl.getName());
assert.eq(4,
outColl.find().count(),
@@ -143,26 +147,6 @@ function reduceNumbers(key, values) {
outColl.drop();
}());
-(function testThatVerboseOptionIncludesTimingInformation() {
- const cmd =
- {mapReduce: coll.getName(), map: mapToObj, reduce: reduceObjs, out: outColl.getName()};
- const withoutVerbose = assert.commandWorked(db.runCommand(cmd));
- // TODO SERVER-43290 The verbose option should have the same effect on mongos.
- assert(FixtureHelpers.isMongos(db) || !withoutVerbose.hasOwnProperty("timing"));
- const withVerbose = assert.commandWorked(db.runCommand(Object.merge(cmd, {verbose: true})));
- assert(withVerbose.hasOwnProperty("timing"));
-}());
-
-(function testMapReduceAgainstNonExistentCollection() {
- assert.commandFailedWithCode(db.runCommand({
- mapReduce: "lasjdlasjdlasjdjasldjalsdj12e",
- map: mapToObj,
- reduce: reduceObjs,
- out: outColl.getName()
- }),
- ErrorCodes.NamespaceNotFound);
-}());
-
(function testHighCardinalityKeySet() {
let correctValues = {};
@@ -178,8 +162,12 @@ function reduceNumbers(key, values) {
}
assert.commandWorked(bulk.execute());
- const res = db.runCommand(
- {mapReduce: coll.getName(), out: outColl.getName(), map: mapToObj, reduce: reduceObjs});
+ const res = db.runCommand({
+ mapReduce: coll.getName(),
+ out: {merge: outColl.getName()},
+ map: mapToObj,
+ reduce: reduceObjs
+ });
assert.commandWorked(res);
assert.eq(res.result, outColl.getName());
let actualValues = {};
diff --git a/jstests/core/mr_fail_invalid_js.js b/jstests/core/mr_fail_invalid_js.js
index 454e7b4ade6..01b422c1fe3 100644
--- a/jstests/core/mr_fail_invalid_js.js
+++ b/jstests/core/mr_fail_invalid_js.js
@@ -33,20 +33,17 @@ const outputColl = db.mr_fail_invalid_js_out;
emit(tag, 1);
}
};
- assert.commandWorked(coll.mapReduce(goodMapFn, reduceFn, {out: outputColl.getName()}));
+ assert.commandWorked(coll.mapReduce(goodMapFn, reduceFn, {out: {merge: outputColl.getName()}}));
outputColl.drop();
- // First test that a single missing path will just produce null keys.
+ // mapReduce fails when attempting to merge a missing key.
const singleInvalidPathMapFn = function() {
emit(this.missing_field, this.x);
};
- assert.commandWorked(
- coll.mapReduce(singleInvalidPathMapFn, reduceFn, {out: outputColl.getName()}));
- const undefinedTypeCode = 6;
- const nullTypeCode = 10;
- assert.eq(0, outputColl.find({_id: {$type: undefinedTypeCode}}).itcount());
- assert.eq(1, outputColl.find({_id: {$type: nullTypeCode}}).itcount());
+ assert.throws(() => coll.mapReduce(
+ singleInvalidPathMapFn, reduceFn, {out: {merge: outputColl.getName()}}),
+ []);
// Now test that a traversal through a missing path will cause an error.
const badMapFn = function() {
@@ -54,7 +51,7 @@ const outputColl = db.mr_fail_invalid_js_out;
};
assert.throws(
- () => coll.mapReduce(newMapFn, reduceFn, {out: outputColl.getName()}),
+ () => coll.mapReduce(newMapFn, reduceFn, {out: {merge: outputColl.getName()}}),
[],
"expected mapReduce to throw because map function references path that does not exist");
@@ -88,7 +85,7 @@ const outputColl = db.mr_fail_invalid_js_out;
};
// First test that a valid command succeeds.
- let res = coll.mapReduce(goodMapFn, goodReduceFn, outputColl.getName());
+ let res = coll.mapReduce(goodMapFn, goodReduceFn, {out: {merge: outputColl.getName()}});
assert.eq([{_id: 1, value: 1}, {_id: 2, value: 2}, {_id: 3, value: 2}, {_id: 4, value: 1}],
outputColl.find().sort({_id: 1}).toArray());
@@ -110,7 +107,8 @@ const outputColl = db.mr_fail_invalid_js_out;
// Test that things are still in an ok state and the next mapReduce can succeed.
outputColl.drop();
- assert.commandWorked(coll.mapReduce(goodMapFn, goodReduceFn, outputColl.getName()));
+ assert.commandWorked(
+ coll.mapReduce(goodMapFn, goodReduceFn, {out: {merge: outputColl.getName()}}));
assert.eq([{_id: 1, value: 1}, {_id: 2, value: 2}, {_id: 3, value: 2}, {_id: 4, value: 1}],
outputColl.find().sort({_id: 1}).toArray());
assert(outputColl.drop());
diff --git a/jstests/core/mr_killop.js b/jstests/core/mr_killop.js
index 07a92e289eb..82d72e35545 100644
--- a/jstests/core/mr_killop.js
+++ b/jstests/core/mr_killop.js
@@ -30,7 +30,8 @@ function getOpCode() {
const cmdBody = op.command;
if (cmdBody.$truncated) {
const stringifiedCmd = cmdBody.$truncated;
- return stringifiedCmd.search('mapreduce') >= 0 &&
+ return (stringifiedCmd.search('mapreduce') >= 0 ||
+ stringifiedCmd.search('aggregate') >= 0) &&
stringifiedCmd.search(source.getName()) >= 0;
}
diff --git a/jstests/core/mr_multikey_deduping.js b/jstests/core/mr_multikey_deduping.js
index af87cf11d01..5f113916eba 100644
--- a/jstests/core/mr_multikey_deduping.js
+++ b/jstests/core/mr_multikey_deduping.js
@@ -5,6 +5,8 @@
// does_not_support_stepdowns,
// uses_map_reduce_with_temp_collections,
// ]
+
+load("jstests/aggregation/extras/utils.js"); // For resultsEq
(function() {
"use strict";
@@ -23,21 +25,18 @@ outColl.drop();
return Array.sum(vals);
};
- let res =
- assert.commandWorked(coll.mapReduce(mapFn, reduceFn, {out: outColl.getName(), query: {}}));
- assert.eq(1, res.counts.input);
+ let res = assert.commandWorked(
+ coll.mapReduce(mapFn, reduceFn, {out: {merge: outColl.getName()}, query: {}}));
assert(outColl.drop());
- res = assert.commandWorked(
- coll.mapReduce(mapFn, reduceFn, {out: outColl.getName(), query: {arr: {$gte: 0}}}));
- assert.eq(1, res.counts.input);
+ res = assert.commandWorked(coll.mapReduce(
+ mapFn, reduceFn, {out: {merge: outColl.getName()}, query: {arr: {$gte: 0}}}));
assert(outColl.drop());
// Now test that we get the same results when there's an index present.
assert.commandWorked(coll.ensureIndex({arr: 1}));
- res = assert.commandWorked(
- coll.mapReduce(mapFn, reduceFn, {out: outColl.getName(), query: {arr: {$gte: 0}}}));
- assert.eq(1, res.counts.input);
+ res = assert.commandWorked(coll.mapReduce(
+ mapFn, reduceFn, {out: {merge: outColl.getName()}, query: {arr: {$gte: 0}}}));
assert(outColl.drop());
}());
@@ -62,7 +61,7 @@ outColl.drop();
const resultsNoIndexNoQuery =
assert
.commandWorked(db.runCommand(
- {mapreduce: coll.getName(), map: mapFn, reduce: reduceFn, out: {inline: true}}))
+ {mapreduce: coll.getName(), map: mapFn, reduce: reduceFn, out: {inline: 1}}))
.results;
const resultsNoIndexEqualityOnName = assert
.commandWorked(db.runCommand({
@@ -70,7 +69,7 @@ outColl.drop();
map: mapFn,
reduce: reduceFn,
query: {name: 'name1'},
- out: {inline: true}
+ out: {inline: 1}
}))
.results;
const resultsNoIndexRangeOnName = assert
@@ -79,21 +78,22 @@ outColl.drop();
map: mapFn,
reduce: reduceFn,
query: {name: {$gt: 'name'}},
- out: {inline: true}
+ out: {inline: 1}
}))
.results;
- assert.eq([{_id: "cat", value: 3}, {_id: "dog", value: 2}, {_id: "mouse", value: 1}],
- resultsNoIndexNoQuery);
- assert.eq([{_id: "cat", value: 1}, {_id: "dog", value: 1}], resultsNoIndexEqualityOnName);
- assert.eq(resultsNoIndexNoQuery, resultsNoIndexRangeOnName);
+ assert(resultsEq([{_id: "cat", value: 3}, {_id: "dog", value: 2}, {_id: "mouse", value: 1}],
+ resultsNoIndexNoQuery));
+ assert(
+ resultsEq([{_id: "cat", value: 1}, {_id: "dog", value: 1}], resultsNoIndexEqualityOnName));
+ assert(resultsEq(resultsNoIndexNoQuery, resultsNoIndexRangeOnName));
assert.commandWorked(coll.ensureIndex({name: 1, tags: 1}));
const resultsIndexedNoQuery =
assert
.commandWorked(db.runCommand(
- {mapreduce: coll.getName(), map: mapFn, reduce: reduceFn, out: {inline: true}}))
+ {mapreduce: coll.getName(), map: mapFn, reduce: reduceFn, out: {inline: 1}}))
.results;
const resultsIndexedEqualityOnName = assert
.commandWorked(db.runCommand({
@@ -101,7 +101,7 @@ outColl.drop();
map: mapFn,
reduce: reduceFn,
query: {name: 'name1'},
- out: {inline: true}
+ out: {inline: 1}
}))
.results;
const resultsIndexedRangeOnName = assert
@@ -110,12 +110,12 @@ outColl.drop();
map: mapFn,
reduce: reduceFn,
query: {name: {$gt: 'name'}},
- out: {inline: true}
+ out: {inline: 1}
}))
.results;
- assert.eq(resultsNoIndexNoQuery, resultsIndexedNoQuery);
- assert.eq(resultsNoIndexEqualityOnName, resultsIndexedEqualityOnName);
- assert.eq(resultsNoIndexRangeOnName, resultsIndexedRangeOnName);
+ assert(resultsEq(resultsNoIndexNoQuery, resultsIndexedNoQuery));
+ assert(resultsEq(resultsNoIndexEqualityOnName, resultsIndexedEqualityOnName));
+ assert(resultsEq(resultsNoIndexRangeOnName, resultsIndexedRangeOnName));
}());
}());
diff --git a/jstests/core/mr_output_other_db.js b/jstests/core/mr_output_other_db.js
index 4d0d7e83a7e..0ecd28ed0cb 100644
--- a/jstests/core/mr_output_other_db.js
+++ b/jstests/core/mr_output_other_db.js
@@ -25,7 +25,9 @@ const reduceFn = function(k, vs) {
return Array.sum(vs);
};
-(function testReplace() {
+// TODO SERVER-42511 we should fix up and call the functions in this file once we flip on the new
+// implementation.
+function testReplace() {
let res = assert.commandWorked(
coll.mapReduce(mapFn, reduceFn, {out: {replace: outCollStr, db: outDbStr}}));
const expected =
@@ -42,11 +44,8 @@ const reduceFn = function(k, vs) {
coll.mapReduce(mapFn, reduceFn, {out: {replace: outCollStr, db: outDbStr}}));
actual = outColl.find().sort({_id: 1}).toArray();
assert.eq(expected, actual);
-}());
+}
-// TODO SERVER-42511 we should call 'testMerge()' and 'testReduce()' once we flip on the new
-// implementation. Today these would fail in certain sharded passthroughs, as described in
-// SERVER-44238.
function testMerge() {
outColl.drop();
assert.commandWorked(
diff --git a/jstests/core/mr_scope.js b/jstests/core/mr_scope.js
index 6038da167ad..017d39b9268 100644
--- a/jstests/core/mr_scope.js
+++ b/jstests/core/mr_scope.js
@@ -34,7 +34,7 @@ const reduceFn = function(key, values) {
};
assert.commandWorked(
- coll.mapReduce(mapFn, reduceFn, {out: outputColl.getName(), scope: {xx: {val: 1}}}));
+ coll.mapReduce(mapFn, reduceFn, {out: {merge: outputColl.getName()}, scope: {xx: {val: 1}}}));
assert.eq(3, outputColl.find().itcount());
assert.eq(1, outputColl.count({_id: "a", "value.count": 2}));
@@ -43,7 +43,7 @@ assert.eq(1, outputColl.count({_id: "c", "value.count": 3}));
outputColl.drop();
assert.commandWorked(
- coll.mapReduce(mapFn, reduceFn, {scope: {xx: {val: 2}}, out: outputColl.getName()}));
+ coll.mapReduce(mapFn, reduceFn, {scope: {xx: {val: 2}}, out: {merge: outputColl.getName()}}));
assert.eq(3, outputColl.find().itcount());
assert.eq(1, outputColl.count({_id: "a", "value.count": 4}));
diff --git a/jstests/core/mr_use_this_object.js b/jstests/core/mr_use_this_object.js
index 15ccc263c0d..8c6061d0fa8 100644
--- a/jstests/core/mr_use_this_object.js
+++ b/jstests/core/mr_use_this_object.js
@@ -41,7 +41,7 @@ const reducer = function(k, v) {
return {stats: stats, total: total};
};
-assert.commandWorked(coll.mapReduce(mapper, reducer, {out: outputColl.getName()}));
+assert.commandWorked(coll.mapReduce(mapper, reducer, {out: {merge: outputColl.getName()}}));
let resultAsObj = outputColl.convertToSingleObject("value");
assert.eq(2,
@@ -59,7 +59,7 @@ mapper = function() {
emit(this[x], {stats: [this[y]]});
};
-assert.commandWorked(coll.mapReduce(mapper, reducer, {out: outputColl.getName()}));
+assert.commandWorked(coll.mapReduce(mapper, reducer, {out: {merge: outputColl.getName()}}));
resultAsObj = outputColl.convertToSingleObject("value");
assert.eq(2,
diff --git a/jstests/core/or4.js b/jstests/core/or4.js
index c682a2e34c9..8cb5163544a 100644
--- a/jstests/core/or4.js
+++ b/jstests/core/or4.js
@@ -5,11 +5,13 @@
// requires_non_retryable_writes,
// ]
+load("jstests/aggregation/extras/utils.js"); // For resultsEq
(function() {
"use strict";
const coll = db.or4;
coll.drop();
+db.getCollection("mrOutput").drop();
coll.ensureIndex({a: 1});
coll.ensureIndex({b: 1});
@@ -64,16 +66,20 @@ assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).toArray().length);
assert.eq([1, 2], Array.sort(coll.distinct('a', {$or: [{a: 2}, {b: 3}]})));
-assert.eq(5,
- coll.mapReduce(
- function() {
- emit('a', this.a);
- },
- function(key, vals) {
- return vals.length;
- },
- {out: {inline: 1}, query: {$or: [{a: 2}, {b: 3}]}})
- .counts.input);
+assert.commandWorked(coll.mapReduce(
+ function() {
+ if (!this.hasOwnProperty('a')) {
+ emit('a', 0);
+ } else {
+ emit('a', this.a);
+ }
+ },
+ function(key, vals) {
+ return vals.reduce((a, b) => a + b, 0);
+ },
+ {out: {merge: "mrOutput"}, query: {$or: [{a: 2}, {b: 3}]}}));
+assert(resultsEq([{"_id": "a", "value": 7}], db.getCollection("mrOutput").find().toArray()),
+ db.getCollection("mrOutput").find().toArray());
coll.remove({});
diff --git a/jstests/core/recursion.js b/jstests/core/recursion.js
index 2577aef79ae..32e4547fb75 100644
--- a/jstests/core/recursion.js
+++ b/jstests/core/recursion.js
@@ -27,7 +27,7 @@ function mapReduceRecursion() {
})();
},
function() {},
- {out: 'inline'});
+ {out: {merge: 'out_coll'}});
}
assert.commandWorked(db.recursion.insert({}));
diff --git a/jstests/core/slice1.js b/jstests/core/slice1.js
index 4fbcf81de5f..cf454689203 100644
--- a/jstests/core/slice1.js
+++ b/jstests/core/slice1.js
@@ -1,10 +1,13 @@
-t = db.slice1;
+(function() {
+"use strict";
+
+let t = db.slice1;
t.drop();
t.insert({_id: 1, a: [0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1], b: 1, c: 1});
// first three
-out = t.findOne({}, {a: {$slice: 3}});
+let out = t.findOne({}, {a: {$slice: 3}});
assert.eq(out.a, [0, 1, 2], '1');
// last three
@@ -73,3 +76,4 @@ assert.eq(out.a, [[3, 3, 3]], 'n 2');
out = t.findOne({}, {a: {$slice: [0, 2]}});
assert.eq(out.a, [[1, 1, 1], [2, 2, 2]], 'n 2');
+})();
diff --git a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
index fc83df394cb..3e7e8dc3c55 100644
--- a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
+++ b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
@@ -181,7 +181,7 @@ Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
let outputSpec = cmdObj.out;
if (typeof (outputSpec) === "string") {
this.getDB(dbName)[outputSpec].drop(); // This will implicitly shard it.
- outputSpec = {replace: outputSpec, sharded: true};
+ outputSpec = {replace: outputSpec};
} else if (typeof (outputSpec) !== "object") {
// This is a malformed command, just send it along.
return originalRunCommand.apply(this, arguments);
diff --git a/jstests/libs/profiler.js b/jstests/libs/profiler.js
index 63af457299b..45b64d33d30 100644
--- a/jstests/libs/profiler.js
+++ b/jstests/libs/profiler.js
@@ -4,13 +4,9 @@
function buildCommandProfile(command, sharded) {
let commandProfile = {};
- if (sharded && command.mapReduce) {
- // Unlike other read commands, mapReduce is rewritten to a different format when sent to
- // shards if the input collection is sharded, because it is executed in two phases.
- // We do not check for the 'map' and 'reduce' fields, because they are functions, and
- // we cannot compaare functions for equality.
- commandProfile["command.out"] = {$regex: "^tmp.mrs"};
- commandProfile["command.shardedFirstPass"] = true;
+ if (command.mapReduce) {
+ // MapReduce is rewritten to an aggregate pipeline.
+ commandProfile["command.aggregate"] = command.mapReduce;
} else if (command.update) {
// Updates are batched, but only allow using buildCommandProfile() for an update batch that
// contains a single update, since the profiler generates separate entries for each update
diff --git a/jstests/multiVersion/map_reduce_multiversion_cluster.js b/jstests/multiVersion/map_reduce_multiversion_cluster.js
index e2020744610..64e6947f76c 100644
--- a/jstests/multiVersion/map_reduce_multiversion_cluster.js
+++ b/jstests/multiVersion/map_reduce_multiversion_cluster.js
@@ -169,13 +169,6 @@ runValidMrTests(sourceColl);
//
st.upgradeCluster("latest", {upgradeShards: true, upgradeConfigs: true, upgradeMongos: false});
-// Now that we've upgraded the shards to the latest binary version, switch on the query knob to
-// enable MR in agg.
-assert.commandWorked(st.rs0.getPrimary().getDB(dbName).adminCommand(
- {setParameter: 1, internalQueryUseAggMapReduce: true}));
-assert.commandWorked(st.rs1.getPrimary().getDB(dbName).adminCommand(
- {setParameter: 1, internalQueryUseAggMapReduce: true}));
-
//
// Test against a mixed version cluster where the shards are upgraded to the latest binary but still
// in FCV 4.2. Mongos is still on the 4.2 binary version.
@@ -188,8 +181,6 @@ runValidMrTests(sourceColl);
st.upgradeCluster("latest", {upgradeShards: false, upgradeConfigs: false, upgradeMongos: true});
mongosConn = st.s;
sourceColl = mongosConn.getDB(dbName)[collName];
-assert.commandWorked(
- mongosConn.getDB(dbName).adminCommand({setParameter: 1, internalQueryUseAggMapReduce: true}));
//
// Test against a cluster where both mongos and the shards are upgraded to the latest binary
diff --git a/jstests/multiVersion/map_reduce_multiversion_repl_set.js b/jstests/multiVersion/map_reduce_multiversion_repl_set.js
index 8853b9b5e9e..2ef7f71a6e3 100644
--- a/jstests/multiVersion/map_reduce_multiversion_repl_set.js
+++ b/jstests/multiVersion/map_reduce_multiversion_repl_set.js
@@ -102,11 +102,6 @@ rst.upgradeSet({binVersion: "latest"});
sourceDB = rst.getPrimary().getDB(dbName);
sourceColl = sourceDB[collName];
-// Now that we've upgraded the replica set to the latest binary version, switch on the query knob to
-// enable MR in agg.
-assert.commandWorked(rst.getPrimary().getDB(dbName).adminCommand(
- {setParameter: 1, internalQueryUseAggMapReduce: true}));
-
//
// Binary version 4.4 and FCV 4.2.
//
diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js
index 56f39449cce..e4df7249966 100644
--- a/jstests/noPassthrough/currentop_query.js
+++ b/jstests/noPassthrough/currentop_query.js
@@ -261,16 +261,22 @@ function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
{
test: function(db) {
assert.commandWorked(db.currentop_query.mapReduce(() => {}, (a, b) => {}, {
- query: {$comment: "currentop_query"},
+ query: {$comment: "currentop_query_mr"},
out: {inline: 1},
}));
},
- command: "mapreduce",
planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.query.$comment": "currentop_query",
- "ns": /^currentop_query.*currentop_query/
- }
+ // A mapReduce which gets sent to the shards is internally translated to an
+ // aggregation.
+ currentOpFilter:
+ (isRemoteShardCurOp ? {
+ "cursor.originatingCommand.aggregate": "currentop_query",
+ "cursor.originatingCommand.pipeline.0.$match.$comment": "currentop_query_mr"
+ }
+ : {
+ "command.query.$comment": "currentop_query_mr",
+ "ns": /^currentop_query.*currentop_query/
+ }),
},
{
test: function(db) {
diff --git a/jstests/noPassthrough/index_partial_no_explain_cmds.js b/jstests/noPassthrough/index_partial_no_explain_cmds.js
index 9d0dc8eb246..2ad426d9d23 100644
--- a/jstests/noPassthrough/index_partial_no_explain_cmds.js
+++ b/jstests/noPassthrough/index_partial_no_explain_cmds.js
@@ -1,12 +1,15 @@
// Test partial indexes with commands that don't use explain. These commands are tested against
// mongod with the --notablescan flag set, so that they fail if the index is not used.
+load("jstests/aggregation/extras/utils.js"); // For resultsEq
(function() {
"use strict";
var runner = MongoRunner.runMongod({setParameter: "notablescan=1"});
-var coll = runner.getDB("test").index_partial_no_explain_cmds;
+const db = runner.getDB("test");
+var coll = db.index_partial_no_explain_cmds;
var ret;
coll.drop();
+db.getCollection("mrOutput").drop();
assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}}));
@@ -29,8 +32,9 @@ var reduceFunc = function(keyId, countArray) {
return Array.sum(countArray);
};
-ret = coll.mapReduce(mapFunc, reduceFunc, {out: "inline", query: {x: {$gt: 1}, a: 1}});
-assert.eq(1, ret.counts.input);
+assert.commandWorked(
+ coll.mapReduce(mapFunc, reduceFunc, {out: "mrOutput", query: {x: {$gt: 1}, a: 1}}));
+assert(resultsEq([{"_id": 2, "value": 1}], db.getCollection("mrOutput").find().toArray()));
//
// Test distinct.
diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
index 52bd8b22ceb..e2bd18146e4 100644
--- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
+++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
@@ -1,47 +1,41 @@
-// This test validates that map/reduce runs intermediate reduce steps in order to keep the
-// in-memory state small. See SERVER-12949 for more details.
-//
-function assertGLEOK(status) {
- assert(status.ok && status.err === null, "Expected OK status object; found " + tojson(status));
-}
-
-var db = db.getSisterDB("MapReduceTestDB");
-db.dropDatabase();
-
-var coll = db.getCollection("mrInput");
-
-// Insert 10 x 49 elements (10 i-s, 49 j-s)
-//
-var expectedOutColl = [];
-
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < 10; i++) {
- for (var j = 1; j < 50; j++) {
- bulk.insert({idx: i, j: j});
- }
- expectedOutColl.push({_id: i, value: j - 1});
-}
-assert.commandWorked(bulk.execute());
-
-function mapFn() {
- emit(this.idx, 1);
-}
-function reduceFn(key, values) {
- return Array.sum(values);
-}
-
-var out = coll.mapReduce(mapFn, reduceFn, {out: {replace: "mrOutput"}});
-
-// Check the output is as expected
-//
-var outColl = db.getCollection("mrOutput").find().toArray();
-assert.eq(outColl, expectedOutColl, "The output collection is incorrect.");
-
-assert.eq(out.counts.input, 490, "input count is wrong");
-assert.eq(out.counts.emit, 490, "emit count is wrong");
-
-// If this fails, most probably some of the configuration settings under mongo::mr::Config have
-// changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else
-// must have changed with when intermediate reduces occur (see mongo::mr::State::checkSize).
-//
-assert.eq(out.counts.reduce, 14, "reduce count is wrong");
+// This test validates that map/reduce runs intermediate reduce steps in order to keep the
+// in-memory state small. See SERVER-12949 for more details.
+
+load("jstests/aggregation/extras/utils.js"); // For resultsEq
+(function() {
+"use strict";
+
+function assertGLEOK(status) {
+ assert(status.ok && status.err === null, "Expected OK status object; found " + tojson(status));
+}
+
+db = db.getSisterDB("MapReduceTestDB");
+db.dropDatabase();
+
+var coll = db.getCollection("mrInput");
+
+// Insert 10 x 49 elements (10 i-s, 49 j-s)
+var expectedOutColl = [];
+
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 10; i++) {
+ for (var j = 1; j < 50; j++) {
+ bulk.insert({idx: i, j: j});
+ }
+ expectedOutColl.push({_id: i, value: j - 1});
+}
+assert.commandWorked(bulk.execute());
+
+function mapFn() {
+ emit(this.idx, 1);
+}
+function reduceFn(key, values) {
+ return Array.sum(values);
+}
+
+var out = coll.mapReduce(mapFn, reduceFn, {out: {replace: "mrOutput"}});
+
+// Check the output is as expected
+var outColl = db.getCollection("mrOutput").find().toArray();
+assert(resultsEq(outColl, expectedOutColl));
+})();
diff --git a/jstests/noPassthroughWithMongod/mr_noscripting.js b/jstests/noPassthroughWithMongod/mr_noscripting.js
index 715a0204e2c..59d16e37627 100644
--- a/jstests/noPassthroughWithMongod/mr_noscripting.js
+++ b/jstests/noPassthroughWithMongod/mr_noscripting.js
@@ -2,9 +2,6 @@ var conn = MongoRunner.runMongod({noscripting: ''});
var testDB = conn.getDB('foo');
var coll = testDB.bar;
-// TODO SERVER-42511 Remove the usage of internalQueryUseAggMapReduce.
-assert.commandWorked(testDB.adminCommand({setParameter: 1, internalQueryUseAggMapReduce: true}));
-
coll.insert({x: 1});
var map = function() {
@@ -22,4 +19,4 @@ assert.eq(0, mrResult.ok, 'mr result: ' + tojson(mrResult));
// Confirm that mongod did not crash
var cmdResult = testDB.adminCommand({serverStatus: 1});
assert(cmdResult.ok, 'serverStatus failed, result: ' + tojson(cmdResult));
-MongoRunner.stopMongod(conn); \ No newline at end of file
+MongoRunner.stopMongod(conn);
diff --git a/jstests/replsets/prepare_conflict_read_concern_behavior.js b/jstests/replsets/prepare_conflict_read_concern_behavior.js
index 86f33973006..c580eca69bd 100644
--- a/jstests/replsets/prepare_conflict_read_concern_behavior.js
+++ b/jstests/replsets/prepare_conflict_read_concern_behavior.js
@@ -3,7 +3,7 @@
* and afterClusterTime reads are the only reads that should block on a prepared transaction. Reads
* that happen as part of a write should also block on a prepared transaction.
*
- * Also test that dbHash and mapReduce, which acquire collection S locks for reads, do not block on
+ * Also test that dbHash, which acquires a collection S lock for reads, does not block on
* a prepared transaction on secondaries. Otherwise, it would cause deadlocks when the prepared
* transaction reacquires locks (since locks were yielded on secondaries) at commit time. This test
* makes sure dbHash and mapReduce do not accept a non local read concern or afterClusterTime and so
@@ -203,10 +203,9 @@ function runTest() {
mapReduce({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
ErrorCodes.InvalidOptions);
- jsTestLog("Test mapReduce doesn't support read concern other than local.");
+ jsTestLog("Test mapReduce doesn't support read concern other than local or available.");
assert.commandWorked(mapReduce({level: 'local'}, secondaryTestDB));
- assert.commandFailedWithCode(mapReduce({level: 'available'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
+ assert.commandWorked(mapReduce({level: 'available'}, secondaryTestDB));
assert.commandFailedWithCode(mapReduce({level: 'majority'}, secondaryTestDB),
ErrorCodes.InvalidOptions);
assert.commandFailedWithCode(mapReduce({level: 'snapshot'}, secondaryTestDB),
@@ -215,17 +214,13 @@ function runTest() {
ErrorCodes.InvalidOptions);
jsTestLog("Test mapReduce that writes is not allowed to run on secondaries.");
- // It currently returns ErrorCodes.PrimarySteppedDown in this case.
- assert.commandFailedWithCode(mapReduce({}, secondaryTestDB, "outColl"),
- [ErrorCodes.InvalidOptions, ErrorCodes.PrimarySteppedDown]);
+ assert.commandFailedWithCode(mapReduce({}, secondaryTestDB, "outColl"), [ErrorCodes.NotMaster]);
jsTestLog("Test mapReduce on secondary doesn't block on a prepared transaction.");
assert.commandWorked(mapReduce({}, secondaryTestDB));
- jsTestLog("Test mapReduce on primary blocks on collection S lock which conflicts with " +
- "a prepared transaction.");
- assert.commandFailedWithCode(mapReduce({}, testDB, {inline: 1}, failureTimeout),
- ErrorCodes.MaxTimeMSExpired);
+ jsTestLog("Test mapReduce on a primary doesn't block on a prepared transaction.");
+ assert.commandWorked(mapReduce({}, testDB));
// validate does not accept a non local read concern or afterClusterTime and it also sets
// ignore_prepare=true during its execution. Therefore, validate should never get prepare
diff --git a/jstests/sharding/collation_targeting.js b/jstests/sharding/collation_targeting.js
index 0e1f06de5b3..5485a9cc428 100644
--- a/jstests/sharding/collation_targeting.js
+++ b/jstests/sharding/collation_targeting.js
@@ -1,4 +1,5 @@
// Test shard targeting for queries with collation.
+// @tags: [requires_fcv_44]
(function() {
"use strict";
@@ -214,62 +215,59 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// MapReduce.
-// The following set of tests assume that we're running against the new version of MR.
-if (TestData.setParameters.internalQueryUseAggMapReduce == 1) {
- // Test that the filter on mapReduce respects the non-simple collation from the user.
- assert.eq(2,
- assert
- .commandWorked(coll.mapReduce(
- function() {
- emit(this._id, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}, collation: caseInsensitive}))
- .results.length);
-
- // Test that mapReduce respects the non-simple collation for the emitted keys. In this case, the
- // emitted keys "foo" and "FOO" should be considered equal.
- assert.eq(1,
- assert
- .commandWorked(coll.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}, collation: caseInsensitive}))
- .results.length);
-
- // Test that the filter on mapReduce respects the simple collation if none is specified.
- assert.eq(1,
- assert
- .commandWorked(coll.mapReduce(
- function() {
- emit(this._id, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}}))
- .results.length);
-
- // Test that mapReduce respects the simple collation for the emitted keys. In this case, the
- // emitted keys "foo" and "FOO" should *not* be considered equal.
- assert.eq(2,
- assert
- .commandWorked(coll.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: {$type: "string"}}}))
- .results.length);
-}
+// Test that the filter on mapReduce respects the non-simple collation from the user.
+assert.eq(2,
+ assert
+ .commandWorked(coll.mapReduce(
+ function() {
+ emit(this._id, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}, collation: caseInsensitive}))
+ .results.length);
+
+// Test that mapReduce respects the non-simple collation for the emitted keys. In this case, the
+// emitted keys "foo" and "FOO" should be considered equal.
+assert.eq(1,
+ assert
+ .commandWorked(coll.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}, collation: caseInsensitive}))
+ .results.length);
+
+// Test that the filter on mapReduce respects the simple collation if none is specified.
+assert.eq(1,
+ assert
+ .commandWorked(coll.mapReduce(
+ function() {
+ emit(this._id, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}}))
+ .results.length);
+
+// Test that mapReduce respects the simple collation for the emitted keys. In this case, the
+// emitted keys "foo" and "FOO" should *not* be considered equal.
+assert.eq(2,
+ assert
+ .commandWorked(coll.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: {$type: "string"}}}))
+ .results.length);
// Remove.
diff --git a/jstests/sharding/collation_targeting_inherited.js b/jstests/sharding/collation_targeting_inherited.js
index 230727e106f..ab3a94ef6d5 100644
--- a/jstests/sharding/collation_targeting_inherited.js
+++ b/jstests/sharding/collation_targeting_inherited.js
@@ -1,4 +1,5 @@
// Test shard targeting for queries on a collection with a default collation.
+// @tags: [requires_fcv_44]
(function() {
"use strict";
@@ -233,63 +234,59 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
// MapReduce.
-// The following set of tests assume that we're running against the new version of MR.
-if (TestData.setParameters.internalQueryUseAggMapReduce == 1) {
- // Test that the filter on mapReduce respects the non-simple collation inherited from the
- // collection default.
- assert.eq(2,
- assert
- .commandWorked(collCaseInsensitive.mapReduce(
- function() {
- emit(this._id, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}}))
- .results.length);
-
- // Test that mapReduce respects the non-simple default collation for the emitted keys. In this
- // case, the emitted keys "foo" and "FOO" should be considered equal.
- assert.eq(1,
- assert
- .commandWorked(collCaseInsensitive.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}}))
- .results.length);
-
- // Test that the filter on mapReduce respects the simple collation if specified.
- assert.eq(1,
- assert
- .commandWorked(collCaseInsensitive.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}, collation: {locale: "simple"}}))
- .results.length);
-
- // Test that mapReduce respects the user-specified simple collation for the emitted keys.
- assert.eq(
- 2,
- assert
- .commandWorked(collCaseInsensitive.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: {$type: "string"}}, collation: {locale: "simple"}}))
- .results.length);
-}
+// Test that the filter on mapReduce respects the non-simple collation inherited from the
+// collection default.
+assert.eq(2,
+ assert
+ .commandWorked(collCaseInsensitive.mapReduce(
+ function() {
+ emit(this._id, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}}))
+ .results.length);
+
+// Test that mapReduce respects the non-simple default collation for the emitted keys. In this
+// case, the emitted keys "foo" and "FOO" should be considered equal.
+assert.eq(1,
+ assert
+ .commandWorked(collCaseInsensitive.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}}))
+ .results.length);
+
+// Test that the filter on mapReduce respects the simple collation if specified.
+assert.eq(1,
+ assert
+ .commandWorked(collCaseInsensitive.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}, collation: {locale: "simple"}}))
+ .results.length);
+
+// Test that mapReduce respects the user-specified simple collation for the emitted keys.
+assert.eq(2,
+ assert
+ .commandWorked(collCaseInsensitive.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: {$type: "string"}}, collation: {locale: "simple"}}))
+ .results.length);
// Remove.
diff --git a/jstests/sharding/database_versioning_all_commands.js b/jstests/sharding/database_versioning_all_commands.js
index ef65810722f..bd69dc68d12 100644
--- a/jstests/sharding/database_versioning_all_commands.js
+++ b/jstests/sharding/database_versioning_all_commands.js
@@ -551,10 +551,6 @@ let testCases = {
mapReduce: {
run: {
sendsDbVersion: true,
- setUp: function(mongosConn, dbName, collName) {
- assert.commandWorked(
- mongosConn.adminCommand({setParameter: 1, internalQueryUseAggMapReduce: true}));
- },
command: function(dbName, collName) {
return {
mapReduce: collName,
@@ -570,10 +566,6 @@ let testCases = {
},
explain: {
sendsDbVersion: true,
- setUp: function(mongosConn, dbName, collName) {
- assert.commandWorked(
- mongosConn.adminCommand({setParameter: 1, internalQueryUseAggMapReduce: true}));
- },
command: function(dbName, collName) {
return {
explain: {
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index 81418228093..94c2f461cca 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -98,7 +98,6 @@ let doMR = function(n) {
var res = db.mr.mapReduce(m, r, "smr1_out");
printjson(res);
- assert.eq(4, res.counts.input, "MR T0 " + n);
var x = db[res.result];
assert.eq(3, x.find().count(), "MR T1 " + n);
@@ -118,7 +117,6 @@ let doMR = function(n) {
var res = db.mr.mapReduce(m, r, {out: {inline: 1}});
printjson(res);
- assert.eq(4, res.counts.input, "MR T6 " + n);
var z = {};
res.results.forEach(function(a) {
diff --git a/jstests/sharding/mapReduce_outSharded_checkUUID.js b/jstests/sharding/mapReduce_outSharded_checkUUID.js
deleted file mode 100644
index 03d3fcf59a2..00000000000
--- a/jstests/sharding/mapReduce_outSharded_checkUUID.js
+++ /dev/null
@@ -1,168 +0,0 @@
-(function() {
-"use strict";
-load("jstests/libs/uuid_util.js");
-
-function assertCollectionNotOnShard(db, coll) {
- const listCollsRes = db.runCommand({listCollections: 1, filter: {name: coll}});
- assert.commandWorked(listCollsRes);
- assert.neq(undefined, listCollsRes.cursor);
- assert.neq(undefined, listCollsRes.cursor.firstBatch);
- assert.eq(0, listCollsRes.cursor.firstBatch.length);
-}
-
-const st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1}});
-const testDB = st.s0.getDB("mrShard");
-const inputColl = testDB.srcSharded;
-
-st.adminCommand({enableSharding: testDB.getName()});
-st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
-st.adminCommand({shardCollection: inputColl.getFullName(), key: {_id: 1}});
-
-const nDistinctKeys = 512;
-const nValuesPerKey = 100;
-const nTotalDocs = nDistinctKeys * nValuesPerKey;
-
-const bulk = inputColl.initializeUnorderedBulkOp();
-for (let key = 0; key < nDistinctKeys; key++) {
- for (let value = 0; value < nValuesPerKey; value++) {
- bulk.insert({key: key, value: value});
- }
-}
-assert.commandWorked(bulk.execute());
-
-function verifyOutput(mrOutput, expectedNOutputDocs) {
- assert.commandWorked(mrOutput);
- assert.eq(mrOutput.counts.input, nTotalDocs, `input count is wrong: ${tojson(mrOutput)}`);
- assert.eq(mrOutput.counts.emit, nTotalDocs, `emit count is wrong: ${tojson(mrOutput)}`);
- assert.gt(
- mrOutput.counts.reduce, nValuesPerKey - 1, `reduce count is wrong: ${tojson(mrOutput)}`);
- assert.eq(
- mrOutput.counts.output, expectedNOutputDocs, `output count is wrong: ${tojson(mrOutput)}`);
-}
-
-function mapFn() {
- emit(this.key, 1);
-}
-function reduceFn(key, values) {
- return Array.sum(values);
-}
-
-(function testShardedOutput() {
- // Check that merge to an existing empty sharded collection works and creates a new UUID after
- // M/R
- const outputColl = testDB[inputColl.getName() + "Out"];
- st.adminCommand({shardCollection: outputColl.getFullName(), key: {_id: 1}});
- let origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- let out = testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {merge: outputColl.getName(), sharded: true}});
- verifyOutput(out, nDistinctKeys);
- let newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- assert.neq(origUUID, newUUID);
-
- // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
- // the new UUID should have been written to it.
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
-
- // Shard0 should not have any chunks from the output collection because all shards should have
- // returned an empty split point list in the first phase of the mapReduce, since the reduced
- // data size is far less than the chunk size setting of 1MB.
- assertCollectionNotOnShard(st.shard0.getDB(testDB.getName()), outputColl.getName());
-
- // Check that merge to an existing sharded collection that has data on all shards works and that
- // the collection uses the same UUID after M/R
- st.adminCommand({split: outputColl.getFullName(), middle: {"_id": 2000}});
- st.adminCommand(
- {moveChunk: outputColl.getFullName(), find: {"_id": 2000}, to: st.shard0.shardName});
- assert.commandWorked(outputColl.insert([{_id: 1000}, {_id: 2001}]));
- origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
-
- out = testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {merge: outputColl.getName(), sharded: true}});
- verifyOutput(out, nDistinctKeys + 2);
-
- newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- assert.eq(origUUID, newUUID);
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard0.getDB(testDB.getName()), outputColl.getName()));
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
-
- // Check that replace to an existing sharded collection has data on all shards works and that
- // the collection creates a new UUID after M/R.
- origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- out = testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {replace: outputColl.getName(), sharded: true}});
- verifyOutput(out, nDistinctKeys);
-
- newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- assert.neq(origUUID, newUUID);
-
- // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
- // the new UUID should have been written to it.
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
-
- // Shard0 should not have any chunks from the output collection because all shards should have
- // returned an empty split point list in the first phase of the mapReduce, since the reduced
- // data size is far less than the chunk size setting of 1MB.
- assertCollectionNotOnShard(st.shard0.getDB(testDB.getName()), outputColl.getName());
-}());
-
-(function testUnshardedOutputColl() {
- // Check that reduce to an existing unsharded collection fails when `sharded: true`.
- const reduceOutput = testDB.reduceUnsharded;
- assert.commandWorked(testDB.runCommand({create: reduceOutput.getName()}));
- assert.commandFailed(testDB.runCommand({
- mapReduce: inputColl.getName(),
- map: mapFn,
- reduce: reduceFn,
- out: {reduce: reduceOutput.getName(), sharded: true}
- }));
-
- assert.commandWorked(testDB.reduceUnsharded.insert({x: 1}));
- assert.commandFailed(testDB.runCommand({
- mapReduce: inputColl.getName(),
- map: mapFn,
- reduce: reduceFn,
- out: {reduce: reduceOutput.getName(), sharded: true}
- }));
-
- // Check that replace to an existing unsharded collection works when `sharded: true`.
- const replaceOutput = testDB.replaceUnsharded;
- assert.commandWorked(testDB.runCommand({create: replaceOutput.getName()}));
- let origUUID =
- getUUIDFromListCollections(st.s.getDB(testDB.getName()), replaceOutput.getName());
-
- assert.commandWorked(testDB.runCommand({
- mapReduce: inputColl.getName(),
- map: mapFn,
- reduce: reduceFn,
- out: {replace: replaceOutput.getName(), sharded: true}
- }));
-
- let newUUID = getUUIDFromConfigCollections(st.s, replaceOutput.getFullName());
- assert.neq(origUUID, newUUID);
- assert.eq(
- newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), replaceOutput.getName()));
-
- assert.commandWorked(testDB.replaceUnsharded.insert({x: 1}));
- origUUID = getUUIDFromListCollections(st.s.getDB(testDB.getName()), replaceOutput.getName());
-
- assert.commandWorked(testDB.runCommand({
- mapReduce: inputColl.getName(),
- map: mapFn,
- reduce: reduceFn,
- out: {replace: replaceOutput.getName(), sharded: true}
- }));
-
- newUUID = getUUIDFromConfigCollections(st.s, replaceOutput.getFullName());
- assert.neq(origUUID, newUUID);
- assert.eq(
- newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), replaceOutput.getName()));
-}());
-
-st.stop();
-})();
diff --git a/jstests/sharding/map_reduce_invalid_output_collection.js b/jstests/sharding/map_reduce_invalid_output_collection.js
index b635bd29165..6f688389d13 100644
--- a/jstests/sharding/map_reduce_invalid_output_collection.js
+++ b/jstests/sharding/map_reduce_invalid_output_collection.js
@@ -49,48 +49,30 @@ assert.commandWorked(
// legacy mapReduce.
assert.commandWorked(st.s.getDB(dbName).getCollection(outColl).insert({_id: -1, not_id: 0}));
-// TODO SERVER-42511 remove this once the switch to MR in agg is complete.
-const usingAgg = st.getDB(dbName)
- .adminCommand({getParameter: 1, internalQueryUseAggMapReduce: 1})
- .internalQueryUseAggMapReduce;
-const expectedError = usingAgg ? 31313 : 31311;
-
-// TODO SERVER-44461: Allow running this test when using non-agg MR.
-if (!usingAgg) {
- jsTestLog("Skipping test case. See SERVER-44461.");
- st.stop();
- return;
-}
-
// Through the same mongos, verify that mapReduce fails since the output collection is not sharded
// by _id.
assert.commandFailedWithCode(
st.s.getDB(dbName).runCommand(
{mapReduce: "coll", map: map, reduce: reduce, out: {merge: outColl, sharded: true}}),
- expectedError);
+ 31313);
assert.commandFailedWithCode(
st.s.getDB(dbName).runCommand(
{mapReduce: "coll", map: map, reduce: reduce, out: {reduce: outColl, sharded: true}}),
- expectedError);
+ 31313);
// Expect a similar failure through a stale mongos.
assert.commandFailedWithCode(
staleMongos1.getDB(dbName).runCommand(
{mapReduce: "coll", map: map, reduce: reduce, out: {merge: outColl, sharded: true}}),
- expectedError);
+ 31313);
// Mode replace is unique, since the legacy mapReduce will unconditionally drop and reshard the
// target collection on _id.
-if (usingAgg) {
- assert.commandFailedWithCode(
- st.s.getDB(dbName).runCommand(
- {mapReduce: "coll", map: map, reduce: reduce, out: {replace: outColl, sharded: true}}),
- expectedError);
-} else {
- assert.commandWorked(st.s.getDB(dbName).runCommand(
- {mapReduce: "coll", map: map, reduce: reduce, out: {replace: outColl, sharded: true}}));
-}
+assert.commandFailedWithCode(
+ st.s.getDB(dbName).runCommand(
+ {mapReduce: "coll", map: map, reduce: reduce, out: {replace: outColl, sharded: true}}),
+ 31313);
function testAgainstValidShardedOutput(shardKey) {
// Drop and reshard the target collection.
@@ -133,7 +115,7 @@ testAgainstValidShardedOutput({_id: "hashed"});
assert.commandFailedWithCode(
st.s.getDB(dbName).runCommand(
{mapReduce: "coll", map: map, reduce: reduce, out: {merge: outColl, sharded: true}}),
- expectedError);
+ 31313);
// Run the same mapReduce through a stale mongos and expect it to fail as well. Make sure to
// leave at least one document in the target collection for the same reason as above.
@@ -141,7 +123,7 @@ testAgainstValidShardedOutput({_id: "hashed"});
assert.commandFailedWithCode(
staleMongos1.getDB(dbName).runCommand(
{mapReduce: "coll", map: map, reduce: reduce, out: {merge: outColl, sharded: true}}),
- expectedError);
+ 31313);
})();
st.stop();
diff --git a/jstests/sharding/map_reduce_invalid_result_set.js b/jstests/sharding/map_reduce_invalid_result_set.js
index 40fc2dfa392..b2959be3548 100644
--- a/jstests/sharding/map_reduce_invalid_result_set.js
+++ b/jstests/sharding/map_reduce_invalid_result_set.js
@@ -1,5 +1,6 @@
// Tests that mapReduce commands fail if the result set does not fit into a single batch.
// @tags: [
+// requires_fcv_44,
// uses_map_reduce_with_temp_collections,
// does_not_support_stepdowns,
// ]
@@ -7,11 +8,6 @@
"use strict";
-// This test assumes we are running against the new version of mapReduce.
-if (TestData.setParameters.internalQueryUseAggMapReduce != 1) {
- return;
-}
-
const st = new ShardingTest({shards: 2});
const testDB = st.getDB("test");
const coll = "map_reduce_invalid_result_set";
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
deleted file mode 100644
index 53332b3c13d..00000000000
--- a/jstests/sharding/mrShardedOutput.js
+++ /dev/null
@@ -1,135 +0,0 @@
-// This test runs map reduce from a sharded input collection and outputs it to a sharded collection.
-// The test is done in 2 passes - the first pass runs the map reduce and outputs it to a
-// non-existing collection. The second pass runs map reduce with the collection input twice the size
-// of the first and outputs it to the new sharded collection created in the first pass.
-(function() {
-"use strict";
-
-const st = new ShardingTest({shards: 2, other: {chunkSize: 1}});
-
-const config = st.getDB("config");
-st.adminCommand({enablesharding: "test"});
-st.ensurePrimaryShard("test", st.shard1.shardName);
-st.adminCommand({shardcollection: "test.foo", key: {"a": 1}});
-
-const testDB = st.getDB("test");
-
-function map2() {
- emit(this.i, {count: 1, y: this.y});
-}
-function reduce2(key, values) {
- return values[0];
-}
-
-let numDocs = 0;
-const numBatch = 5000;
-const str = new Array(1024).join('a');
-
-// Pre split now so we don't have to balance the chunks later.
-// M/R is strange in that it chooses the output shards based on currently sharded
-// collections in the database. The upshot is that we need a sharded collection on
-// both shards in order to ensure M/R will output to two shards.
-st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
-st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: st.shard0.shardName});
-
-// Add some more data for input so that chunks will get split further
-for (let splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({split: 'test.foo', middle: {a: splitPoint}});
-}
-
-let bulk = testDB.foo.initializeUnorderedBulkOp();
-for (let i = 0; i < numBatch; ++i) {
- bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
-}
-assert.commandWorked(bulk.execute());
-
-numDocs += numBatch;
-
-// Do the MapReduce step
-jsTest.log("Setup OK: count matches (" + numDocs + ") -- Starting MapReduce");
-let res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sharded: true}});
-jsTest.log("MapReduce results:" + tojson(res));
-
-let reduceOutputCount = res.counts.output;
-assert.eq(numDocs,
- reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
-
-jsTest.log("Checking that all MapReduce output documents are in output collection");
-let outColl = testDB["mrShardedOut"];
-let outCollCount = outColl.find().itcount();
-assert.eq(numDocs,
- outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
- numDocs + ": this may happen intermittently until resolution of SERVER-3627");
-
-// Make sure it's sharded and split
-let newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(
- newNumChunks, 1, "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
-
-// Check that there are no "jumbo" chunks.
-const objSize = Object.bsonsize(testDB.mrShardedOut.findOne());
-const docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
-
-config.chunks.find({ns: testDB.mrShardedOut.getFullName()}).forEach(function(chunkDoc) {
- const count =
- testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}}).itcount();
- assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
-});
-
-// Check that chunks for the newly created sharded output collection are well distributed.
-const shard0Chunks =
- config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: st.shard0.shardName}).count();
-const shard1Chunks =
- config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: st.shard1.shardName}).count();
-assert.lte(Math.abs(shard0Chunks - shard1Chunks), 1);
-
-jsTest.log('Starting second pass');
-
-st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
-st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: st.shard0.shardName});
-
-// Add some more data for input so that chunks will get split further
-for (let splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({split: 'test.foo', middle: {a: numDocs + splitPoint}});
-}
-
-bulk = testDB.foo.initializeUnorderedBulkOp();
-for (let i = 0; i < numBatch; ++i) {
- bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
-}
-assert.commandWorked(bulk.execute());
-numDocs += numBatch;
-
-// Do the MapReduce step
-jsTest.log("Setup OK: count matches (" + numDocs + ") -- Starting MapReduce");
-res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sharded: true}});
-jsTest.log("MapReduce results:" + tojson(res));
-
-reduceOutputCount = res.counts.output;
-assert.eq(numDocs,
- reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
-
-jsTest.log("Checking that all MapReduce output documents are in output collection");
-outColl = testDB.mrShardedOut;
-outCollCount = outColl.find().itcount();
-assert.eq(numDocs,
- outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
- numDocs + ": this may happen intermittently until resolution of SERVER-3627");
-
-// Make sure it's sharded and split
-newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(
- newNumChunks, 1, "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
-
-config.chunks.find({ns: testDB.mrShardedOut.getFullName()}).forEach(function(chunkDoc) {
- const count =
- testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}}).itcount();
- assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
-});
-
-st.stop();
-}());
diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js
index abec3aa607f..f406794aceb 100644
--- a/jstests/sharding/mrShardedOutputAuth.js
+++ b/jstests/sharding/mrShardedOutputAuth.js
@@ -19,6 +19,7 @@ adminDb.createUser({user: "user", pwd: "pass", roles: jsTest.adminUserRoles});
const authenticatedConn = new Mongo(mongos.host);
authenticatedConn.getDB('admin').auth("user", "pass");
adminDb = authenticatedConn.getDB("admin");
+assert.commandWorked(adminDb.adminCommand({enablesharding: "output"}));
const configDb = authenticatedConn.getDB("config");
diff --git a/jstests/sharding/mr_noscripting.js b/jstests/sharding/mr_noscripting.js
index c48055aaba7..bfca188f0b0 100644
--- a/jstests/sharding/mr_noscripting.js
+++ b/jstests/sharding/mr_noscripting.js
@@ -1,4 +1,5 @@
// Tests that running mapReduce does not crash anything if the shards have scripting disabled.
+// @tags: [requires_fcv_44]
(function() {
"use strict";
const shardOpts = [
@@ -27,8 +28,7 @@ const reduceFn = function(key, values) {
// TODO SERVER-42511 Remove the usage of internalQueryUseAggMapReduce.
assert.commandFailedWithCode(
- testDB.runCommand({mapreduce: 'bar', map: mapFn, reduce: reduceFn, out: {inline: 1}}),
- TestData.setParameters.internalQueryUseAggMapReduce ? 31264 : 16149);
+ testDB.runCommand({mapreduce: 'bar', map: mapFn, reduce: reduceFn, out: {inline: 1}}), 31264);
st.stop();
}());
diff --git a/jstests/sharding/mr_output_options.js b/jstests/sharding/mr_output_options.js
index fb992f219cb..46f86578550 100644
--- a/jstests/sharding/mr_output_options.js
+++ b/jstests/sharding/mr_output_options.js
@@ -9,6 +9,7 @@ const testDB = st.getDB("mrShard");
const inputColl = testDB.srcSharded;
st.adminCommand({enableSharding: testDB.getName()});
+st.adminCommand({enableSharding: "mrShardOtherDB"});
st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
const nDistinctKeys = 512;
@@ -24,16 +25,6 @@ function seedCollection() {
assert.commandWorked(bulk.execute());
}
-function verifyOutput(mrOutput) {
- assert.commandWorked(mrOutput);
- const nTotalDocs = nDistinctKeys * nValuesPerKey;
- assert.eq(mrOutput.counts.input, nTotalDocs, `input count is wrong: ${tojson(mrOutput)}`);
- assert.eq(mrOutput.counts.emit, nTotalDocs, `emit count is wrong: ${tojson(mrOutput)}`);
- assert.gt(
- mrOutput.counts.reduce, nValuesPerKey - 1, `reduce count is wrong: ${tojson(mrOutput)}`);
- assert.eq(mrOutput.counts.output, nDistinctKeys, `output count is wrong: ${tojson(mrOutput)}`);
-}
-
function mapFn() {
emit(this.key, 1);
}
@@ -54,36 +45,37 @@ function testMrOutput({inputSharded, outputSharded}) {
}
function runMRTestWithOutput(outOptions) {
- verifyOutput(inputColl.mapReduce(mapFn, reduceFn, outOptions));
+ assert.commandWorked(inputColl.mapReduce(mapFn, reduceFn, outOptions));
}
- runMRTestWithOutput({out: {merge: outputColl.getName(), sharded: outputSharded}});
+ runMRTestWithOutput(
+ {out: Object.assign({merge: outputColl.getName()}, outputSharded ? {sharded: true} : {})});
assert.commandWorked(outputColl.remove({}));
- runMRTestWithOutput({out: {reduce: outputColl.getName(), sharded: outputSharded}});
+ runMRTestWithOutput(
+ {out: Object.assign({reduce: outputColl.getName()}, outputSharded ? {sharded: true} : {})});
// Test the same thing using runCommand directly.
- verifyOutput(testDB.runCommand({
+ assert.commandWorked(testDB.runCommand({
mapReduce: inputColl.getName(),
map: mapFn,
reduce: reduceFn,
- out: {reduce: outputColl.getName(), sharded: outputSharded}
+ out: Object.assign({reduce: outputColl.getName()}, outputSharded ? {sharded: true} : {})
}));
- const out = inputColl.mapReduce(mapFn, reduceFn, {out: {inline: 1}});
- verifyOutput(out);
- assert(out.results != 'undefined', "no results for inline");
+ const output = inputColl.mapReduce(mapFn, reduceFn, {out: {inline: 1}});
+ assert.commandWorked(output);
+ assert(output.results != 'undefined', "no results for inline");
if (!outputSharded) {
// We don't support replacing an existing sharded collection.
runMRTestWithOutput(outputColl.getName());
- runMRTestWithOutput({out: {replace: outputColl.getName(), sharded: outputSharded}});
- runMRTestWithOutput(
- {out: {replace: outputColl.getName(), sharded: outputSharded, db: "mrShardOtherDB"}});
- verifyOutput(testDB.runCommand({
+ runMRTestWithOutput({out: {replace: outputColl.getName()}});
+ runMRTestWithOutput({out: {replace: outputColl.getName(), db: "mrShardOtherDB"}});
+ assert.commandWorked(testDB.runCommand({
mapReduce: inputColl.getName(),
map: mapFn,
reduce: reduceFn,
- out: {replace: outputColl.getName(), sharded: outputSharded}
+ out: {replace: outputColl.getName()}
}));
}
}
@@ -94,9 +86,9 @@ testMrOutput({inputSharded: true, outputSharded: false});
testMrOutput({inputSharded: true, outputSharded: true});
// Ensure that mapReduce with a sharded input collection can accept the collation option.
-let out = inputColl.mapReduce(mapFn, reduceFn, {out: {inline: 1}, collation: {locale: "en_US"}});
-verifyOutput(out);
-assert(out.results != 'undefined', "no results for inline with collation");
+let output = inputColl.mapReduce(mapFn, reduceFn, {out: {inline: 1}, collation: {locale: "en_US"}});
+assert.commandWorked(output);
+assert(output.results != 'undefined', "no results for inline with collation");
assert.commandWorked(inputColl.remove({}));
@@ -105,13 +97,12 @@ assert.commandWorked(inputColl.remove({}));
// collation is passed along to the shards.
assert.eq(inputColl.find().itcount(), 0);
assert.commandWorked(inputColl.insert({key: 0, value: 0, str: "FOO"}));
-out = inputColl.mapReduce(
+output = inputColl.mapReduce(
mapFn,
reduceFn,
{out: {inline: 1}, query: {str: "foo"}, collation: {locale: "en_US", strength: 2}});
-assert.commandWorked(out);
-assert.eq(out.counts.input, 1);
-assert.eq(out.results, [{_id: 0, value: 1}]);
+assert.commandWorked(output);
+assert.eq(output.results, [{_id: 0, value: 1}]);
st.stop();
})();
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index 35f76d1d567..47b88405dab 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -1,7 +1,10 @@
/**
* This test is labeled resource intensive because its total io_write is 47MB compared to a median
* of 5MB across all sharding tests in wiredTiger.
- * @tags: [resource_intensive]
+ * @tags: [
+ * resource_intensive,
+ * requires_fcv_44,
+ * ]
*/
load("jstests/replsets/rslib.js");
@@ -130,30 +133,40 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, expec
};
// Test inline mapReduce on sharded collection.
- // Note that in sharded map reduce, it will output the result in a temp collection
- // even if out is inline.
if (isMongos) {
cmdTest({mapreduce: 'user', map: mapFunc, reduce: reduceFunc, out: {inline: 1}},
false,
- formatProfileQuery({mapreduce: 'user', shardedFirstPass: true}));
+ formatProfileQuery({aggregate: 'user'}));
}
// Test inline mapReduce on unsharded collection.
- cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {inline: 1}},
- true,
- formatProfileQuery({mapreduce: 'mrIn', 'out.inline': 1}));
+ if (isMongos) {
+ cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {inline: 1}},
+ true,
+ formatProfileQuery({aggregate: 'mrIn'}));
+ } else {
+ cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {inline: 1}},
+ true,
+ formatProfileQuery({mapreduce: 'mrIn', 'out.inline': 1}));
+ }
// Test non-inline mapReduce on sharded collection.
if (isMongos) {
cmdTest({mapreduce: 'user', map: mapFunc, reduce: reduceFunc, out: {replace: 'mrOut'}},
false,
- formatProfileQuery({mapreduce: 'user', shardedFirstPass: true}));
+ formatProfileQuery({aggregate: 'user'}));
}
// Test non-inline mapReduce on unsharded collection.
- cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {replace: 'mrOut'}},
- false,
- formatProfileQuery({mapreduce: 'mrIn', 'out.replace': 'mrOut'}));
+ if (isMongos) {
+ cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {replace: 'mrOut'}},
+ false,
+ formatProfileQuery({aggregate: 'user'}));
+ } else {
+ cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {replace: 'mrOut'}},
+ false,
+ formatProfileQuery({mapreduce: 'mrIn', 'out.replace': 'mrOut'}));
+ }
// Test other commands that can be sent to secondary.
cmdTest({count: 'user'}, true, formatProfileQuery({count: 'user'}));
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 0e8f28e12d6..dc7f395e6b9 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -238,7 +238,7 @@ let testCases = {
assert.commandWorked(res);
assert.eq(0, res.results.length, tojson(res));
},
- behavior: "targetsPrimaryUsesConnectionVersioning"
+ behavior: "versioned"
},
mergeChunks: {skip: "primary only"},
moveChunk: {skip: "primary only"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index 0c28fd3875c..3eb37a11fe8 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -13,6 +13,8 @@
* - setUp: A function that does any set up (inserts, etc.) needed to check the command's results.
* - command: The command to run, with all required options. Note, this field is also used to
* identify the operation in the system profiler.
+ * - filter: [OPTIONAL] When specified, used instead of 'command' to identify the operation in the
+ * system profiler.
* - checkResults: A function that asserts whether the command should succeed or fail. If the
* command is expected to succeed, the function should assert the expected results.
* *when the range has not been deleted from the donor.*
@@ -264,6 +266,36 @@ let testCases = {
},
out: {inline: 1}
},
+ filter: {
+ aggregate: coll,
+ "pipeline": [
+ {
+ "$project": {
+ "emits": {
+ "$_internalJsEmit": {
+ "eval":
+ "function() {\n emit(this.x, 1);\n }",
+ "this": "$$ROOT"
+ }
+ },
+ "_id": false
+ }
+ },
+ {"$unwind": {"path": "$emits"}},
+ {
+ "$group": {
+ "_id": "$emits.k",
+ "value": {
+ "$_internalJsReduce": {
+ "data": "$emits",
+ "eval":
+ "function(key, values) {\n return Array.sum(values);\n }"
+ }
+ }
+ }
+ }
+ ],
+ },
checkResults: function(res) {
assert.commandWorked(res);
assert.eq(1, res.results.length, tojson(res));
@@ -271,9 +303,12 @@ let testCases = {
assert.eq(2, res.results[0].value, tojson(res));
},
checkAvailableReadConcernResults: function(res) {
- assert.commandFailed(res);
+ assert.commandWorked(res);
+ assert.eq(1, res.results.length, tojson(res));
+ assert.eq(1, res.results[0]._id, tojson(res));
+ assert.eq(2, res.results[0].value, tojson(res));
},
- behavior: "targetsPrimaryUsesConnectionVersioning"
+ behavior: "versioned"
},
mergeChunks: {skip: "primary only"},
moveChunk: {skip: "primary only"},
@@ -442,19 +477,19 @@ for (let command of commands) {
test.checkAvailableReadConcernResults(availableReadConcernRes);
let defaultReadConcernRes = staleMongos.getDB(db).runCommand(cmdReadPrefSecondary);
- if (command === 'mapReduce') {
- // mapReduce is always sent to a primary, which defaults to 'local' readConcern
- test.checkResults(defaultReadConcernRes);
- } else {
- // Secondaries default to the 'available' readConcern
- test.checkAvailableReadConcernResults(defaultReadConcernRes);
- }
+ // Secondaries default to the 'available' readConcern
+ test.checkAvailableReadConcernResults(defaultReadConcernRes);
let localReadConcernRes = staleMongos.getDB(db).runCommand(cmdPrefSecondaryConcernLocal);
test.checkResults(localReadConcernRes);
// Build the query to identify the operation in the system profiler.
- let commandProfile = buildCommandProfile(test.command, true /* sharded */);
+ let filter = test.command;
+ if (test.filter != undefined) {
+ filter = test.filter;
+ }
+
+ let commandProfile = buildCommandProfile(filter, true /* sharded */);
if (test.behavior === "unshardedOnly") {
// Check that neither the donor nor recipient shard secondaries received either request.
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index 7a2e4278a04..7f7efb353e4 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -243,7 +243,7 @@ let testCases = {
assert.eq(1, res.results[0]._id, tojson(res));
assert.eq(2, res.results[0].value, tojson(res));
},
- behavior: "targetsPrimaryUsesConnectionVersioning"
+ behavior: "versioned"
},
mergeChunks: {skip: "primary only"},
moveChunk: {skip: "primary only"},
diff --git a/jstests/sharding/track_unsharded_collections_check_shard_version.js b/jstests/sharding/track_unsharded_collections_check_shard_version.js
index 9b37756978c..bc684522b10 100644
--- a/jstests/sharding/track_unsharded_collections_check_shard_version.js
+++ b/jstests/sharding/track_unsharded_collections_check_shard_version.js
@@ -211,8 +211,6 @@ let testCases = {
logRotate: {skip: "executes locally on mongos (not sent to any remote node)"},
logout: {skip: "not on a user database"},
mapReduce: {
- // Uses connection versioning.
- whenNamespaceDoesNotExistFailsWith: ErrorCodes.NamespaceNotFound,
whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
command: collName => {
return {