summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Storch <david.storch@10gen.com>2017-02-10 11:52:18 -0500
committerDavid Storch <david.storch@10gen.com>2017-03-13 09:46:14 -0400
commit82b16740f8a66093b453a73a04b3b9bd00e5d7a0 (patch)
tree62d156fc9676526ecbea19cd03ef7a293579c4df
parent73f9e8b8a8422becf8694fe3d82c0e647dc71189 (diff)
downloadmongo-82b16740f8a66093b453a73a04b3b9bd00e5d7a0.tar.gz
SERVER-19758 add support for "executionStats" and "allPlansExecution" to agg explain
Like other explainable commands, aggregate can now be explained using the explain command, e.g. db.runCommand({explain: {aggregate: ...}, verbosity: "executionStats"}). The existing explain:true flag corresponds to "queryPlanner" mode and is still supported. However, explain:true cannot be specified when explaining aggregate via the explain command. Additional execution information is provided only in the $cursor section of the aggregation explain output. Having aggregation stages themselves track and report execution info is further work.
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml2
-rw-r--r--jstests/core/explain_agg_write_concern.js64
-rw-r--r--jstests/core/views/views_aggregation.js42
-rw-r--r--jstests/core/views/views_count.js20
-rw-r--r--jstests/core/views/views_distinct.js17
-rw-r--r--jstests/core/views/views_find.js17
-rw-r--r--jstests/libs/override_methods/set_read_and_write_concerns.js11
-rw-r--r--jstests/sharding/views.js67
-rw-r--r--src/mongo/db/commands.cpp2
-rw-r--r--src/mongo/db/commands.h2
-rw-r--r--src/mongo/db/commands/SConscript7
-rw-r--r--src/mongo/db/commands/count_cmd.cpp19
-rw-r--r--src/mongo/db/commands/distinct.cpp16
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp9
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp2
-rw-r--r--src/mongo/db/commands/find_cmd.cpp16
-rw-r--r--src/mongo/db/commands/group_cmd.cpp2
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp539
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp511
-rw-r--r--src/mongo/db/commands/run_aggregate.h53
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp4
-rw-r--r--src/mongo/db/pipeline/SConscript6
-rw-r--r--src/mongo/db/pipeline/aggregation_request.cpp51
-rw-r--r--src/mongo/db/pipeline/aggregation_request.h30
-rw-r--r--src/mongo/db/pipeline/aggregation_request_test.cpp105
-rw-r--r--src/mongo/db/pipeline/document_source.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source.h19
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto.cpp9
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto.h2
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto_test.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_coll_stats.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_coll_stats.h2
-rw-r--r--src/mongo/db/pipeline/document_source_count_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp9
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.h2
-rw-r--r--src/mongo/db/pipeline/document_source_facet.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_facet.h2
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near.h2
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.h6
-rw-r--r--src/mongo/db/pipeline/document_source_group.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_group.h2
-rw-r--r--src/mongo/db/pipeline/document_source_index_stats.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_index_stats.h2
-rw-r--r--src/mongo/db/pipeline/document_source_limit.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_limit.h2
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.h6
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_match.h2
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors.h2
-rw-r--r--src/mongo/db/pipeline/document_source_mock.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_mock.h3
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_out.h2
-rw-r--r--src/mongo/db/pipeline/document_source_redact.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_redact.h2
-rw-r--r--src/mongo/db/pipeline/document_source_replace_root.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_sample.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_sample.h2
-rw-r--r--src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_sample_from_random_cursor.h2
-rw-r--r--src/mongo/db/pipeline/document_source_sample_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_single_document_transformation.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_single_document_transformation.h4
-rw-r--r--src/mongo/db/pipeline/document_source_skip.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_skip.h2
-rw-r--r--src/mongo/db/pipeline/document_source_sort.cpp17
-rw-r--r--src/mongo/db/pipeline/document_source_sort.h6
-rw-r--r--src/mongo/db/pipeline/document_source_sort_by_count_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_tee_consumer.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_tee_consumer.h2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.h2
-rw-r--r--src/mongo/db/pipeline/expression_context.cpp4
-rw-r--r--src/mongo/db/pipeline/expression_context.h6
-rw-r--r--src/mongo/db/pipeline/parsed_add_fields.h2
-rw-r--r--src/mongo/db/pipeline/parsed_add_fields_test.cpp45
-rw-r--r--src/mongo/db/pipeline/parsed_exclusion_projection.cpp3
-rw-r--r--src/mongo/db/pipeline/parsed_exclusion_projection.h2
-rw-r--r--src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp2
-rw-r--r--src/mongo/db/pipeline/parsed_inclusion_projection.cpp5
-rw-r--r--src/mongo/db/pipeline/parsed_inclusion_projection.h5
-rw-r--r--src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp36
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp4
-rw-r--r--src/mongo/db/pipeline/pipeline.h7
-rw-r--r--src/mongo/db/pipeline/pipeline_test.cpp11
-rw-r--r--src/mongo/db/query/SConscript22
-rw-r--r--src/mongo/db/query/count_request.cpp4
-rw-r--r--src/mongo/db/query/count_request_test.cpp6
-rw-r--r--src/mongo/db/query/explain.cpp68
-rw-r--r--src/mongo/db/query/explain.h20
-rw-r--r--src/mongo/db/query/explain_common.cpp75
-rw-r--r--src/mongo/db/query/explain_common.h76
-rw-r--r--src/mongo/db/query/explain_options.cpp93
-rw-r--r--src/mongo/db/query/explain_options.h89
-rw-r--r--src/mongo/db/query/explain_options_test.cpp95
-rw-r--r--src/mongo/db/query/find.cpp3
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp3
-rw-r--r--src/mongo/db/query/parsed_distinct_test.cpp10
-rw-r--r--src/mongo/db/query/query_request.cpp3
-rw-r--r--src/mongo/db/query/query_request_test.cpp14
-rw-r--r--src/mongo/db/views/resolved_view.cpp62
-rw-r--r--src/mongo/db/views/resolved_view.h8
-rw-r--r--src/mongo/db/views/resolved_view_test.cpp111
-rw-r--r--src/mongo/db/views/view_sharding_check.cpp8
-rw-r--r--src/mongo/db/views/view_sharding_check.h7
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp35
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp2
-rw-r--r--src/mongo/s/commands/cluster_aggregate.cpp142
-rw-r--r--src/mongo/s/commands/cluster_aggregate.h15
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp41
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp8
-rw-r--r--src/mongo/s/commands/cluster_explain.h6
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp11
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp32
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp45
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp2
-rw-r--r--src/mongo/s/commands/commands_public.cpp40
-rw-r--r--src/mongo/s/commands/strategy.cpp4
-rw-r--r--src/mongo/s/commands/strategy.h4
-rw-r--r--src/mongo/s/query/cluster_find.h2
-rw-r--r--src/mongo/shell/explainable.js15
127 files changed, 1963 insertions, 1157 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index 149ea021dc3..f624c266b0c 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -14,6 +14,8 @@ selector:
- jstests/sharding/covered_shard_key_indexes.js
# Can un-blacklist once SERVER-27701 backport (BACKPORT-483) is finished and in 3.4 release
- jstests/sharding/linearizable_read_concern.js
+ # Enable when 3.6 becomes last-stable.
+ - jstests/sharding/views.js
executor:
js_test:
diff --git a/jstests/core/explain_agg_write_concern.js b/jstests/core/explain_agg_write_concern.js
new file mode 100644
index 00000000000..3a70040a525
--- /dev/null
+++ b/jstests/core/explain_agg_write_concern.js
@@ -0,0 +1,64 @@
+/**
+ * Tests related to the aggregate commands behavior with writeConcern and writeConcern + explain.
+ */
+(function() {
+ "use strict";
+
+ let coll = db[jsTest.name()];
+ let outColl = db[jsTest.name() + "_out"];
+ coll.drop();
+ outColl.drop();
+
+ assert.writeOK(coll.insert({_id: 1}));
+
+ // Agg should accept write concern if the last stage is a $out.
+ assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$out: outColl.getName()}],
+ cursor: {},
+ writeConcern: {w: 1}
+ }));
+ assert.eq(1, outColl.find().itcount());
+ outColl.drop();
+
+ // Agg should reject writeConcern if the last stage is not an $out.
+ assert.commandFailed(
+ db.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {}, writeConcern: {w: 1}}));
+
+ // Agg should succeed if the last stage is an $out and the explain flag is set.
+ assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$out: outColl.getName()}],
+ explain: true,
+ }));
+ assert.eq(0, outColl.find().itcount());
+ outColl.drop();
+
+ // Agg should fail if the last stage is an $out and both the explain flag and writeConcern are
+ // set.
+ assert.commandFailed(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$out: outColl.getName()}],
+ explain: true,
+ writeConcern: {w: 1}
+ }));
+
+ // Agg explain helpers with all verbosities (or verbosity omitted) should fail if the last stage
+ // is an $out and writeConcern is set.
+ assert.throws(function() {
+ coll.explain().aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}});
+ });
+ assert.throws(function() {
+ coll.explain("queryPlanner").aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}});
+ });
+ assert.throws(function() {
+ coll.explain("executionStats").aggregate([{$out: outColl.getName()}], {
+ writeConcern: {w: 1}
+ });
+ });
+ assert.throws(function() {
+ coll.explain("allPlansExecution").aggregate([{$out: outColl.getName()}], {
+ writeConcern: {w: 1}
+ });
+ });
+}());
diff --git a/jstests/core/views/views_aggregation.js b/jstests/core/views/views_aggregation.js
index 4a7e141a49f..a0c5392deb4 100644
--- a/jstests/core/views/views_aggregation.js
+++ b/jstests/core/views/views_aggregation.js
@@ -119,6 +119,48 @@
{aggregate: "largeView", pipeline: [{$sort: {x: -1}}], cursor: {}, allowDiskUse: true}),
"Expected aggregate to succeed since 'allowDiskUse' was specified");
+ // Test explain modes on a view.
+ let explainPlan = assert.commandWorked(
+ viewsDB.popSortedView.explain("queryPlanner").aggregate([{$limit: 1}, {$match: {pop: 3}}]));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+ assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+
+ explainPlan = assert.commandWorked(viewsDB.popSortedView.explain("executionStats")
+ .aggregate([{$limit: 1}, {$match: {pop: 3}}]));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+ assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 1);
+ assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+ explainPlan = assert.commandWorked(viewsDB.popSortedView.explain("allPlansExecution")
+ .aggregate([{$limit: 1}, {$match: {pop: 3}}]));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+ assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 1);
+ assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+ // Passing a value of true for the explain option to the aggregation command, without using the
+ // shell explain helper, should continue to work.
+ explainPlan = assert.commandWorked(
+ viewsDB.popSortedView.aggregate([{$limit: 1}, {$match: {pop: 3}}], {explain: true}));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+ assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+
+ // Test allPlansExecution explain mode on the base collection.
+ explainPlan = assert.commandWorked(
+ viewsDB.coll.explain("allPlansExecution").aggregate([{$limit: 1}, {$match: {pop: 3}}]));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+ printjson(explainPlan.stages[0].$cursor.executionStats);
+ assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5);
+ assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+ // The explain:true option should not work when paired with the explain shell helper.
+ assert.throws(function() {
+ viewsDB.popSortedView.explain("executionStats")
+ .aggregate([{$limit: 1}, {$match: {pop: 3}}], {explain: true});
+ });
+
// The remaining tests involve $lookup and $graphLookup. We cannot lookup into sharded
// collections, so skip these tests if running in a sharded configuration.
let isMasterResponse = assert.commandWorked(viewsDB.runCommand("isMaster"));
diff --git a/jstests/core/views/views_count.js b/jstests/core/views/views_count.js
index ddf97ce0bb2..932021e899f 100644
--- a/jstests/core/views/views_count.js
+++ b/jstests/core/views/views_count.js
@@ -53,6 +53,26 @@
assert.commandWorked(explainPlan);
assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"], "views_count.coll");
+ // Count with explicit explain modes works on a view.
+ explainPlan =
+ assert.commandWorked(lessThanSevenView.explain("queryPlanner").count({x: {$gte: 5}}));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll");
+ assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+
+ explainPlan =
+ assert.commandWorked(lessThanSevenView.explain("executionStats").count({x: {$gte: 5}}));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll");
+ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+ assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
+ assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+ explainPlan =
+ assert.commandWorked(lessThanSevenView.explain("allPlansExecution").count({x: {$gte: 5}}));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll");
+ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+ assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
+ assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
// Count with hint works on a view.
assert.commandWorked(viewsDB.runCommand({count: "identityView", hint: "_id_"}));
diff --git a/jstests/core/views/views_distinct.js b/jstests/core/views/views_distinct.js
index b53a8e9e871..0a0b6002b15 100644
--- a/jstests/core/views/views_distinct.js
+++ b/jstests/core/views/views_distinct.js
@@ -68,6 +68,23 @@
assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"],
"views_distinct.coll");
+ // Distinct with explicit explain modes works on a view.
+ explainPlan = assert.commandWorked(largePopView.explain("queryPlanner").distinct("pop"));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll");
+ assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+
+ explainPlan = assert.commandWorked(largePopView.explain("executionStats").distinct("pop"));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll");
+ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+ assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
+ assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+ explainPlan = assert.commandWorked(largePopView.explain("allPlansExecution").distinct("pop"));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll");
+ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+ assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
+ assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
// Distinct commands fail when they try to change the collation of a view.
assert.commandFailedWithCode(
viewsDB.runCommand({distinct: "identityView", key: "state", collation: {locale: "en_US"}}),
diff --git a/jstests/core/views/views_find.js b/jstests/core/views/views_find.js
index c196e980ce5..da555e454b7 100644
--- a/jstests/core/views/views_find.js
+++ b/jstests/core/views/views_find.js
@@ -72,6 +72,23 @@
// Views support find with explain.
assert.commandWorked(viewsDB.identityView.find().explain());
+ // Find with explicit explain modes works on a view.
+ let explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("queryPlanner"));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_find.coll");
+ assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+
+ explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("executionStats"));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_find.coll");
+ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+ assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5);
+ assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+ explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("allPlansExecution"));
+ assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_find.coll");
+ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+ assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5);
+ assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
// Only simple 0 or 1 projections are allowed on views.
assert.writeOK(viewsDB.coll.insert({arr: [{x: 1}]}));
assert.commandFailedWithCode(
diff --git a/jstests/libs/override_methods/set_read_and_write_concerns.js b/jstests/libs/override_methods/set_read_and_write_concerns.js
index b0a94c27326..468e52522d2 100644
--- a/jstests/libs/override_methods/set_read_and_write_concerns.js
+++ b/jstests/libs/override_methods/set_read_and_write_concerns.js
@@ -126,10 +126,13 @@
: undefined;
const hasOut =
lastStage && (typeof lastStage === 'object') && lastStage.hasOwnProperty('$out');
- if (hasOut) {
- forceWriteConcern = true;
- } else {
- forceReadConcern = true;
+ const hasExplain = obj.hasOwnProperty("explain");
+ if (!hasExplain) {
+ if (hasOut) {
+ forceWriteConcern = true;
+ } else {
+ forceReadConcern = true;
+ }
}
}
diff --git a/jstests/sharding/views.js b/jstests/sharding/views.js
index 75a8eeda258..a120eb32157 100644
--- a/jstests/sharding/views.js
+++ b/jstests/sharding/views.js
@@ -5,6 +5,33 @@
(function() {
"use strict";
+ // Given sharded explain output in 'shardedExplain', verifies that the explain mode 'verbosity'
+ // affected the output verbosity appropriately, and that the response has the expected format.
+ function verifyExplainResult(shardedExplain, verbosity) {
+ assert.commandWorked(shardedExplain);
+ assert(shardedExplain.hasOwnProperty("shards"), tojson(shardedExplain));
+ for (let elem in shardedExplain.shards) {
+ let shard = shardedExplain.shards[elem];
+ assert(shard.stages[0].hasOwnProperty("$cursor"), tojson(shardedExplain));
+ assert(shard.stages[0].$cursor.hasOwnProperty("queryPlanner"), tojson(shardedExplain));
+ if (verbosity === "queryPlanner") {
+ assert(!shard.stages[0].$cursor.hasOwnProperty("executionStats"),
+ tojson(shardedExplain));
+ } else if (verbosity === "executionStats") {
+ assert(shard.stages[0].$cursor.hasOwnProperty("executionStats"),
+ tojson(shardedExplain));
+ assert(!shard.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"),
+ tojson("shardedExplain"));
+ } else {
+ assert.eq(verbosity, "allPlansExecution", tojson(shardedExplain));
+ assert(shard.stages[0].$cursor.hasOwnProperty("executionStats"),
+ tojson(shardedExplain));
+ assert(shard.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"),
+ tojson(shardedExplain));
+ }
+ }
+ }
+
let st = new ShardingTest({name: "views_sharded", shards: 2, other: {enableBalancer: false}});
let mongos = st.s;
@@ -29,24 +56,40 @@
assert.commandWorked(db.createView("view", coll.getName(), [{$match: {a: {$gte: 4}}}]));
let view = db.getCollection("view");
+ const explainVerbosities = ["queryPlanner", "executionStats", "allPlansExecution"];
+
//
// find
//
assert.eq(5, view.find({a: {$lte: 8}}).itcount());
let result = db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}});
- assert.commandWorked(result);
- assert(result.hasOwnProperty("shards"), tojson(result));
+ verifyExplainResult(result, "allPlansExecution");
+ for (let verbosity of explainVerbosities) {
+ result =
+ db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}, verbosity: verbosity});
+ verifyExplainResult(result, verbosity);
+ }
//
// aggregate
//
assert.eq(5, view.aggregate([{$match: {a: {$lte: 8}}}]).itcount());
+ // Test that the explain:true flag for the aggregate command results in queryPlanner verbosity.
result =
db.runCommand({aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], explain: true});
- assert.commandWorked(result);
- assert(result.hasOwnProperty("shards"), tojson(result));
+ verifyExplainResult(result, "queryPlanner");
+
+ result = db.runCommand({explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}]}});
+ verifyExplainResult(result, "allPlansExecution");
+ for (let verbosity of explainVerbosities) {
+ result = db.runCommand({
+ explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}]},
+ verbosity: verbosity
+ });
+ verifyExplainResult(result, verbosity);
+ }
//
// count
@@ -54,8 +97,12 @@
assert.eq(5, view.count({a: {$lte: 8}}));
result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}});
- assert.commandWorked(result);
- assert(result.hasOwnProperty("shards"), tojson(result));
+ verifyExplainResult(result, "allPlansExecution");
+ for (let verbosity of explainVerbosities) {
+ result =
+ db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}, verbosity: verbosity});
+ verifyExplainResult(result, verbosity);
+ }
//
// distinct
@@ -65,8 +112,12 @@
assert.eq([4, 5, 6, 7, 8], result.values.sort());
result = db.runCommand({explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}});
- assert.commandWorked(result);
- assert(result.hasOwnProperty("shards"), tojson(result));
+ verifyExplainResult(result, "allPlansExecution");
+ for (let verbosity of explainVerbosities) {
+ result = db.runCommand(
+ {explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}, verbosity: verbosity});
+ verifyExplainResult(result, verbosity);
+ }
//
// Confirm cleanupOrphaned command fails.
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index e726bb3918a..12e60527080 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -146,7 +146,7 @@ void Command::help(stringstream& help) const {
Status Command::explain(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const {
return {ErrorCodes::IllegalOperation, str::stream() << "Cannot explain cmd: " << getName()};
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index 832a8e7c4b8..d185c0cf6d6 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -217,7 +217,7 @@ public:
virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const;
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index 48638bcd726..0a3b1442caf 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -35,15 +35,15 @@ env.Library(
"fail_point_cmd.cpp",
"feature_compatibility_version_command_parser.cpp",
"find_and_modify_common.cpp",
+ "generic.cpp",
"hashcmd.cpp",
"isself.cpp",
"mr_common.cpp",
+ "parameters.cpp",
"rename_collection_common.cpp",
"server_status.cpp",
- "parameters.cpp",
"user_management_commands_common.cpp",
"write_commands/write_commands_common.cpp",
- "generic.cpp",
],
LIBDEPS=[
'$BUILD_DIR/mongo/client/clientdriver',
@@ -92,13 +92,13 @@ env.Library(
"clone.cpp",
"clone_collection.cpp",
"collection_to_capped.cpp",
- "dbcommands.cpp",
"compact.cpp",
"copydb.cpp",
"copydb_start_commands.cpp",
"count_cmd.cpp",
"create_indexes.cpp",
"current_op.cpp",
+ "dbcommands.cpp",
"dbhash.cpp",
"distinct.cpp",
"driverHelpers.cpp",
@@ -128,6 +128,7 @@ env.Library(
"plan_cache_commands.cpp",
"rename_collection_cmd.cpp",
"repair_cursor.cpp",
+ "run_aggregate.cpp",
"set_feature_compatibility_version_command.cpp",
"snapshot_management.cpp",
"test_commands.cpp",
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 7ecbe2f08ea..b8418b0bff3 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -30,9 +30,9 @@
#include "mongo/platform/basic.h"
-
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
+#include "mongo/db/commands/run_aggregate.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/count.h"
@@ -104,7 +104,7 @@ public:
virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const {
const bool isExplain = true;
@@ -133,10 +133,17 @@ public:
return viewAggregation.getStatus();
}
- std::string errmsg;
- (void)Command::findCommand("aggregate")
- ->run(opCtx, dbname, viewAggregation.getValue(), 0, errmsg, *out);
- return Status::OK();
+ auto viewAggRequest = AggregationRequest::parseFromBSON(
+ request.getValue().getNs(), viewAggregation.getValue(), verbosity);
+ if (!viewAggRequest.isOK()) {
+ return viewAggRequest.getStatus();
+ }
+
+ return runAggregate(opCtx,
+ viewAggRequest.getValue().getNamespaceString(),
+ viewAggRequest.getValue(),
+ viewAggregation.getValue(),
+ *out);
}
// Prevent chunks from being cleaned up during yields - this allows us to only check the
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index febed50512f..cec31d5f9a5 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/client.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
+#include "mongo/db/commands/run_aggregate.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/jsobj.h"
@@ -114,7 +115,7 @@ public:
virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
@@ -143,10 +144,15 @@ public:
if (!viewAggregation.isOK()) {
return viewAggregation.getStatus();
}
- std::string errmsg;
- (void)Command::findCommand("aggregate")
- ->run(opCtx, dbname, viewAggregation.getValue(), 0, errmsg, *out);
- return Status::OK();
+
+ auto viewAggRequest =
+ AggregationRequest::parseFromBSON(nss, viewAggregation.getValue(), verbosity);
+ if (!viewAggRequest.isOK()) {
+ return viewAggRequest.getStatus();
+ }
+
+ return runAggregate(
+ opCtx, nss, viewAggRequest.getValue(), viewAggregation.getValue(), *out);
}
auto executor = getExecutorDistinct(
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 7c72c26977f..1f431ec6f6e 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -114,10 +114,9 @@ public:
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- ExplainCommon::Verbosity verbosity;
- Status parseStatus = ExplainCommon::parseCmdBSON(cmdObj, &verbosity);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus);
+ auto verbosity = ExplainOptions::parseCmdBSON(cmdObj);
+ if (!verbosity.isOK()) {
+ return appendCommandStatus(result, verbosity.getStatus());
}
// This is the nested command which we are explaining.
@@ -157,7 +156,7 @@ public:
Status explainStatus = commToExplain->explain(opCtx,
dbname,
explainObj,
- verbosity,
+ verbosity.getValue(),
rpc::ServerSelectionMetadata::get(opCtx),
&result);
if (!explainStatus.isOK()) {
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 3aa7eb2018c..a2deb9db62a 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -252,7 +252,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const override {
const NamespaceString fullNs = parseNsCollectionRequired(dbName, cmdObj);
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index c3d8e88b227..5bb043d438a 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/client.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
+#include "mongo/db/commands/run_aggregate.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/matcher/extensions_callback_real.h"
@@ -132,7 +133,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const override {
const NamespaceString nss(parseNs(dbname, cmdObj));
@@ -180,11 +181,17 @@ public:
if (!viewAggregationCommand.isOK())
return viewAggregationCommand.getStatus();
- Command* agg = Command::findCommand("aggregate");
- std::string errmsg;
+ // Create the agg request equivalent of the find operation, with the explain verbosity
+ // included.
+ auto aggRequest = AggregationRequest::parseFromBSON(
+ nss, viewAggregationCommand.getValue(), verbosity);
+ if (!aggRequest.isOK()) {
+ return aggRequest.getStatus();
+ }
try {
- agg->run(opCtx, dbname, viewAggregationCommand.getValue(), 0, errmsg, *out);
+ return runAggregate(
+ opCtx, nss, aggRequest.getValue(), viewAggregationCommand.getValue(), *out);
} catch (DBException& error) {
if (error.getCode() == ErrorCodes::InvalidPipelineOperator) {
return {ErrorCodes::InvalidPipelineOperator,
@@ -192,7 +199,6 @@ public:
}
return error.toStatus();
}
- return Status::OK();
}
// The collection may be NULL. If so, getExecutor() should handle it by returning an
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index cf42c368e75..a9e6eeca6ca 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -123,7 +123,7 @@ private:
virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const {
GroupRequest groupRequest;
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 4fcfb97f574..47db0e450c6 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -26,270 +26,21 @@
* it in the license file.
*/
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
-
#include "mongo/platform/basic.h"
-#include <boost/optional.hpp>
-#include <deque>
-#include <vector>
-
#include "mongo/base/init.h"
-#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/privilege.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
-#include "mongo/db/curop.h"
-#include "mongo/db/db_raii.h"
-#include "mongo/db/exec/pipeline_proxy.h"
-#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/pipeline/accumulator.h"
-#include "mongo/db/pipeline/aggregation_request.h"
-#include "mongo/db/pipeline/document.h"
-#include "mongo/db/pipeline/document_source.h"
-#include "mongo/db/pipeline/expression.h"
-#include "mongo/db/pipeline/expression_context.h"
-#include "mongo/db/pipeline/lite_parsed_pipeline.h"
+#include "mongo/db/commands/run_aggregate.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/pipeline/pipeline.h"
-#include "mongo/db/pipeline/pipeline_d.h"
-#include "mongo/db/query/collation/collator_factory_interface.h"
-#include "mongo/db/query/cursor_response.h"
-#include "mongo/db/query/find_common.h"
-#include "mongo/db/query/get_executor.h"
-#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/db/server_options.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/storage/storage_options.h"
-#include "mongo/db/views/view.h"
-#include "mongo/db/views/view_catalog.h"
-#include "mongo/db/views/view_sharding_check.h"
-#include "mongo/stdx/memory.h"
-#include "mongo/util/log.h"
-#include "mongo/util/string_map.h"
namespace mongo {
-
-using boost::intrusive_ptr;
-using std::endl;
-using std::shared_ptr;
-using std::string;
-using std::stringstream;
-using std::unique_ptr;
-using stdx::make_unique;
-
namespace {
-/**
- * Returns true if we need to keep a ClientCursor saved for this pipeline (for future getMore
- * requests). Otherwise, returns false. The passed 'nsForCursor' is only used to determine the
- * namespace used in the returned cursor. In the case of views, this can be different from that
- * in 'request'.
- */
-bool handleCursorCommand(OperationContext* opCtx,
- const string& nsForCursor,
- ClientCursor* cursor,
- PlanExecutor* exec,
- const AggregationRequest& request,
- BSONObjBuilder& result) {
- if (cursor) {
- invariant(cursor->getExecutor() == exec);
- invariant(cursor->isAggCursor());
- }
-
- long long batchSize = request.getBatchSize();
-
- // can't use result BSONObjBuilder directly since it won't handle exceptions correctly.
- BSONArrayBuilder resultsArray;
- BSONObj next;
- for (int objCount = 0; objCount < batchSize; objCount++) {
- // The initial getNext() on a PipelineProxyStage may be very expensive so we don't
- // do it when batchSize is 0 since that indicates a desire for a fast return.
- PlanExecutor::ExecState state;
- if ((state = exec->getNext(&next, NULL)) == PlanExecutor::IS_EOF) {
- // make it an obvious error to use cursor or executor after this point
- cursor = NULL;
- exec = NULL;
- break;
- }
-
- uassert(34426,
- "Plan executor error during aggregation: " + WorkingSetCommon::toStatusString(next),
- PlanExecutor::ADVANCED == state);
-
- // If adding this object will cause us to exceed the message size limit, then we stash it
- // for later.
- if (!FindCommon::haveSpaceForNext(next, objCount, resultsArray.len())) {
- exec->enqueue(next);
- break;
- }
-
- resultsArray.append(next);
- }
-
- // NOTE: exec->isEOF() can have side effects such as writing by $out. However, it should
- // be relatively quick since if there was no cursor then the input is empty. Also, this
- // violates the contract for batchSize==0. Sharding requires a cursor to be returned in that
- // case. This is ok for now however, since you can't have a sharded collection that doesn't
- // exist.
- const bool canReturnMoreBatches = cursor;
- if (!canReturnMoreBatches && exec && !exec->isEOF()) {
- // msgasserting since this shouldn't be possible to trigger from today's aggregation
- // language. The wording assumes that the only reason cursor would be null is if the
- // collection doesn't exist.
- msgasserted(
- 17391,
- str::stream() << "Aggregation has more results than fit in initial batch, but can't "
- << "create cursor since collection "
- << nsForCursor
- << " doesn't exist");
- }
-
- if (cursor) {
- // If a time limit was set on the pipeline, remaining time is "rolled over" to the
- // cursor (for use by future getmore ops).
- cursor->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
-
- CurOp::get(opCtx)->debug().cursorid = cursor->cursorid();
-
- // Cursor needs to be in a saved state while we yield locks for getmore. State
- // will be restored in getMore().
- exec->saveState();
- exec->detachFromOperationContext();
- } else {
- CurOp::get(opCtx)->debug().cursorExhausted = true;
- }
-
- const long long cursorId = cursor ? cursor->cursorid() : 0LL;
- appendCursorResponseObject(cursorId, nsForCursor, resultsArray.arr(), &result);
-
- return static_cast<bool>(cursor);
-}
-
-StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNamespaces(
- OperationContext* opCtx, const AggregationRequest& request) {
- // We intentionally do not drop and reacquire our DB lock after resolving the view definition in
- // order to prevent the definition for any view namespaces we've already resolved from changing.
- // This is necessary to prevent a cycle from being formed among the view definitions cached in
- // 'resolvedNamespaces' because we won't re-resolve a view namespace we've already encountered.
- AutoGetDb autoDb(opCtx, request.getNamespaceString().db(), MODE_IS);
- Database* const db = autoDb.getDb();
- ViewCatalog* viewCatalog = db ? db->getViewCatalog() : nullptr;
-
- const LiteParsedPipeline liteParsedPipeline(request);
- const auto& pipelineInvolvedNamespaces = liteParsedPipeline.getInvolvedNamespaces();
- std::deque<NamespaceString> involvedNamespacesQueue(pipelineInvolvedNamespaces.begin(),
- pipelineInvolvedNamespaces.end());
- StringMap<ExpressionContext::ResolvedNamespace> resolvedNamespaces;
-
- while (!involvedNamespacesQueue.empty()) {
- auto involvedNs = std::move(involvedNamespacesQueue.front());
- involvedNamespacesQueue.pop_front();
-
- if (resolvedNamespaces.find(involvedNs.coll()) != resolvedNamespaces.end()) {
- continue;
- }
-
- if (!db || db->getCollection(involvedNs.ns())) {
- // If the database exists and 'involvedNs' refers to a collection namespace, then we
- // resolve it as an empty pipeline in order to read directly from the underlying
- // collection. If the database doesn't exist, then we still resolve it as an empty
- // pipeline because 'involvedNs' doesn't refer to a view namespace in our consistent
- // snapshot of the view catalog.
- resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
- } else if (viewCatalog->lookup(opCtx, involvedNs.ns())) {
- // If 'involvedNs' refers to a view namespace, then we resolve its definition.
- auto resolvedView = viewCatalog->resolveView(opCtx, involvedNs);
- if (!resolvedView.isOK()) {
- return {ErrorCodes::FailedToParse,
- str::stream() << "Failed to resolve view '" << involvedNs.ns() << "': "
- << resolvedView.getStatus().toString()};
- }
-
- resolvedNamespaces[involvedNs.coll()] = {resolvedView.getValue().getNamespace(),
- resolvedView.getValue().getPipeline()};
-
- // We parse the pipeline corresponding to the resolved view in case we must resolve
- // other view namespaces that are also involved.
- LiteParsedPipeline resolvedViewLitePipeline(
- {resolvedView.getValue().getNamespace(), resolvedView.getValue().getPipeline()});
-
- const auto& resolvedViewInvolvedNamespaces =
- resolvedViewLitePipeline.getInvolvedNamespaces();
- involvedNamespacesQueue.insert(involvedNamespacesQueue.end(),
- resolvedViewInvolvedNamespaces.begin(),
- resolvedViewInvolvedNamespaces.end());
- } else {
- // 'involvedNs' is neither a view nor a collection, so resolve it as an empty pipeline
- // to treat it as reading from a non-existent collection.
- resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
- }
- }
-
- return resolvedNamespaces;
-}
-
-/**
- * Round trips the pipeline through serialization by calling serialize(), then Pipeline::parse().
- * fasserts if it fails to parse after being serialized.
- */
-boost::intrusive_ptr<Pipeline> reparsePipeline(
- const boost::intrusive_ptr<Pipeline>& pipeline,
- const AggregationRequest& request,
- const boost::intrusive_ptr<ExpressionContext>& expCtx) {
- auto serialized = pipeline->serialize();
-
- // Convert vector<Value> to vector<BSONObj>.
- std::vector<BSONObj> parseableSerialization;
- parseableSerialization.reserve(serialized.size());
- for (auto&& serializedStage : serialized) {
- invariant(serializedStage.getType() == BSONType::Object);
- parseableSerialization.push_back(serializedStage.getDocument().toBson());
- }
-
- auto reparsedPipeline = Pipeline::parse(parseableSerialization, expCtx);
- if (!reparsedPipeline.isOK()) {
- error() << "Aggregation command did not round trip through parsing and serialization "
- "correctly. Input pipeline: "
- << Value(request.getPipeline()) << ", serialized pipeline: " << Value(serialized);
- fassertFailedWithStatusNoTrace(40175, reparsedPipeline.getStatus());
- }
-
- reparsedPipeline.getValue()->optimizePipeline();
- return reparsedPipeline.getValue();
-}
-
-/**
- * Returns Status::OK if each view namespace in 'pipeline' has a default collator equivalent to
- * 'collator'. Otherwise, returns ErrorCodes::OptionNotSupportedOnView.
- */
-Status collatorCompatibleWithPipeline(OperationContext* opCtx,
- Database* db,
- const CollatorInterface* collator,
- const intrusive_ptr<Pipeline> pipeline) {
- if (!db || !pipeline) {
- return Status::OK();
- }
- for (auto&& potentialViewNs : pipeline->getInvolvedCollections()) {
- if (db->getCollection(potentialViewNs.ns())) {
- continue;
- }
-
- auto view = db->getViewCatalog()->lookup(opCtx, potentialViewNs.ns());
- if (!view) {
- continue;
- }
- if (!CollatorInterface::collatorsMatch(view->defaultCollator(), collator)) {
- return {ErrorCodes::OptionNotSupportedOnView,
- str::stream() << "Cannot override default collation of view "
- << potentialViewNs.ns()};
- }
- }
- return Status::OK();
-}
-
bool isMergePipeline(const std::vector<BSONObj>& pipeline) {
if (pipeline.empty()) {
return false;
@@ -323,12 +74,12 @@ public:
return ReadWriteType::kRead;
}
- virtual void help(stringstream& help) const {
- help << "{ pipeline: [ { $operator: {...}}, ... ]"
- << ", explain: <bool>"
+ virtual void help(std::stringstream& help) const {
+ help << "{ pipeline: [ { $stage: {...} }, ... ]"
<< ", allowDiskUse: <bool>"
<< ", cursor: {batchSize: <number>}"
- << " }" << endl
+ << ", ..."
+ << " }" << std::endl
<< "See http://dochub.mongodb.org/core/aggregation for more details.";
}
@@ -339,252 +90,11 @@ public:
return AuthorizationSession::get(client)->checkAuthForAggregate(nss, cmdObj);
}
- bool runParsed(OperationContext* opCtx,
- const NamespaceString& origNss,
- const AggregationRequest& request,
- BSONObj& cmdObj,
- string& errmsg,
- BSONObjBuilder& result) {
- // For operations on views, this will be the underlying namespace.
- const NamespaceString& nss = request.getNamespaceString();
-
- // Parse the user-specified collation, if any.
- std::unique_ptr<CollatorInterface> userSpecifiedCollator = request.getCollation().isEmpty()
- ? nullptr
- : uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
- ->makeFromBSON(request.getCollation()));
-
- boost::optional<ClientCursorPin> pin; // either this OR the exec will be non-null
- unique_ptr<PlanExecutor> exec;
- boost::intrusive_ptr<ExpressionContext> expCtx;
- boost::intrusive_ptr<Pipeline> pipeline;
- auto curOp = CurOp::get(opCtx);
- {
- // This will throw if the sharding version for this connection is out of date. If the
- // namespace is a view, the lock will be released before re-running the aggregation.
- // Otherwise, the lock must be held continuously from now until we have we created both
- // the output ClientCursor and the input executor. This ensures that both are using the
- // same sharding version that we synchronize on here. This is also why we always need to
- // create a ClientCursor even when we aren't outputting to a cursor. See the comment on
- // ShardFilterStage for more details.
- AutoGetCollectionOrViewForRead ctx(opCtx, nss);
- Collection* collection = ctx.getCollection();
-
- // If this is a view, resolve it by finding the underlying collection and stitching view
- // pipelines and this request's pipeline together. We then release our locks before
- // recursively calling run, which will re-acquire locks on the underlying collection.
- // (The lock must be released because recursively acquiring locks on the database will
- // prohibit yielding.)
- const LiteParsedPipeline liteParsedPipeline(request);
- if (ctx.getView() && !liteParsedPipeline.startsWithCollStats()) {
- // Check that the default collation of 'view' is compatible with the operation's
- // collation. The check is skipped if the 'request' has the empty collation, which
- // means that no collation was specified.
- if (!request.getCollation().isEmpty()) {
- if (!CollatorInterface::collatorsMatch(ctx.getView()->defaultCollator(),
- userSpecifiedCollator.get())) {
- return appendCommandStatus(result,
- {ErrorCodes::OptionNotSupportedOnView,
- "Cannot override a view's default collation"});
- }
- }
-
- auto viewDefinition =
- ViewShardingCheck::getResolvedViewIfSharded(opCtx, ctx.getDb(), ctx.getView());
- if (!viewDefinition.isOK()) {
- return appendCommandStatus(result, viewDefinition.getStatus());
- }
-
- if (!viewDefinition.getValue().isEmpty()) {
- ViewShardingCheck::appendShardedViewStatus(viewDefinition.getValue(), &result);
- return false;
- }
-
- auto resolvedView = ctx.getDb()->getViewCatalog()->resolveView(opCtx, nss);
- if (!resolvedView.isOK()) {
- return appendCommandStatus(result, resolvedView.getStatus());
- }
-
- auto collationSpec = ctx.getView()->defaultCollator()
- ? ctx.getView()->defaultCollator()->getSpec().toBSON().getOwned()
- : CollationSpec::kSimpleSpec;
-
- // With the view & collation resolved, we can relinquish locks.
- ctx.releaseLocksForView();
-
- // Parse the resolved view into a new aggregation request.
- auto newCmd = resolvedView.getValue().asExpandedViewAggregation(request);
- if (!newCmd.isOK()) {
- return appendCommandStatus(result, newCmd.getStatus());
- }
- auto newNss = resolvedView.getValue().getNamespace();
- auto newRequest = AggregationRequest::parseFromBSON(newNss, newCmd.getValue());
- if (!newRequest.isOK()) {
- return appendCommandStatus(result, newRequest.getStatus());
- }
- newRequest.getValue().setCollation(collationSpec);
-
- bool status = runParsed(
- opCtx, origNss, newRequest.getValue(), newCmd.getValue(), errmsg, result);
- {
- // Set the namespace of the curop back to the view namespace so ctx records
- // stats on this view namespace on destruction.
- stdx::lock_guard<Client> lk(*opCtx->getClient());
- curOp->setNS_inlock(nss.ns());
- }
- return status;
- }
-
- // Determine the appropriate collation to make the ExpressionContext.
-
- // If the pipeline does not have a user-specified collation, set it from the collection
- // default. Be careful to consult the original request BSON to check if a collation was
- // specified, since a specification of {locale: "simple"} will result in a null
- // collator.
- auto collatorToUse = std::move(userSpecifiedCollator);
- if (request.getCollation().isEmpty() && collection &&
- collection->getDefaultCollator()) {
- invariant(!collatorToUse);
- collatorToUse = collection->getDefaultCollator()->clone();
- }
-
- expCtx.reset(
- new ExpressionContext(opCtx,
- request,
- std::move(collatorToUse),
- uassertStatusOK(resolveInvolvedNamespaces(opCtx, request))));
- expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
-
- // Parse the pipeline.
- auto statusWithPipeline = Pipeline::parse(request.getPipeline(), expCtx);
- if (!statusWithPipeline.isOK()) {
- return appendCommandStatus(result, statusWithPipeline.getStatus());
- }
- pipeline = std::move(statusWithPipeline.getValue());
-
- // Check that the view's collation matches the collation of any views involved
- // in the pipeline.
- auto pipelineCollationStatus =
- collatorCompatibleWithPipeline(opCtx, ctx.getDb(), expCtx->getCollator(), pipeline);
- if (!pipelineCollationStatus.isOK()) {
- return appendCommandStatus(result, pipelineCollationStatus);
- }
-
- pipeline->optimizePipeline();
-
- if (kDebugBuild && !expCtx->isExplain && !expCtx->inShard) {
- // Make sure all operations round-trip through Pipeline::serialize() correctly by
- // re-parsing every command in debug builds. This is important because sharded
- // aggregations rely on this ability. Skipping when inShard because this has
- // already been through the transformation (and this un-sets expCtx->inShard).
- pipeline = reparsePipeline(pipeline, request, expCtx);
- }
-
- // This does mongod-specific stuff like creating the input PlanExecutor and adding
- // it to the front of the pipeline if needed.
- PipelineD::prepareCursorSource(collection, &request, pipeline);
-
- // Create the PlanExecutor which returns results from the pipeline. The WorkingSet
- // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
- // PlanExecutor.
- auto ws = make_unique<WorkingSet>();
- auto proxy = make_unique<PipelineProxyStage>(opCtx, pipeline, ws.get());
-
- auto statusWithPlanExecutor = (NULL == collection)
- ? PlanExecutor::make(
- opCtx, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
- : PlanExecutor::make(opCtx,
- std::move(ws),
- std::move(proxy),
- collection,
- PlanExecutor::YIELD_MANUAL);
- invariant(statusWithPlanExecutor.isOK());
- exec = std::move(statusWithPlanExecutor.getValue());
-
- {
- auto planSummary = Explain::getPlanSummary(exec.get());
- stdx::lock_guard<Client> lk(*opCtx->getClient());
- curOp->setPlanSummary_inlock(std::move(planSummary));
- }
-
- if (collection) {
- const bool isAggCursor = true; // enable special locking behavior
- pin.emplace(collection->getCursorManager()->registerCursor(
- {exec.release(),
- nss.ns(),
- opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
- 0,
- cmdObj.getOwned(),
- isAggCursor}));
- // Don't add any code between here and the start of the try block.
- }
-
- // At this point, it is safe to release the collection lock.
- // - In the case where we have a collection: we will need to reacquire the
- // collection lock later when cleaning up our ClientCursorPin.
- // - In the case where we don't have a collection: our PlanExecutor won't be
- // registered, so it will be safe to clean it up outside the lock.
- invariant(!exec || !collection);
- }
-
- try {
- // Unless set to true, the ClientCursor created above will be deleted on block exit.
- bool keepCursor = false;
-
- // If both explain and cursor are specified, explain wins.
- if (expCtx->isExplain) {
- result << "stages" << Value(pipeline->writeExplainOps());
- } else {
- // Cursor must be specified, if explain is not.
- keepCursor = handleCursorCommand(opCtx,
- origNss.ns(),
- pin ? pin->getCursor() : nullptr,
- pin ? pin->getCursor()->getExecutor() : exec.get(),
- request,
- result);
- }
-
- if (!expCtx->isExplain) {
- PlanSummaryStats stats;
- Explain::getSummaryStats(pin ? *pin->getCursor()->getExecutor() : *exec.get(),
- &stats);
- curOp->debug().setPlanSummaryMetrics(stats);
- curOp->debug().nreturned = stats.nReturned;
- }
-
- // Clean up our ClientCursorPin, if needed. We must reacquire the collection lock
- // in order to do so.
- if (pin) {
- // We acquire locks here with DBLock and CollectionLock instead of using
- // AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
- // sharding version is out of date, and we don't care if the sharding version
- // has changed.
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
- if (keepCursor) {
- pin->release();
- } else {
- pin->deleteUnderlying();
- }
- }
- } catch (...) {
- // On our way out of scope, we clean up our ClientCursorPin if needed.
- if (pin) {
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
- pin->deleteUnderlying();
- }
- throw;
- }
- // Any code that needs the cursor pinned must be inside the try block, above.
- return appendCommandStatus(result, Status::OK());
- }
-
virtual bool run(OperationContext* opCtx,
- const string& db,
+ const std::string& db,
BSONObj& cmdObj,
int options,
- string& errmsg,
+ std::string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(db, cmdObj));
@@ -610,7 +120,38 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- return runParsed(opCtx, nss, request.getValue(), cmdObj, errmsg, result);
+ return appendCommandStatus(result,
+ runAggregate(opCtx, nss, request.getValue(), cmdObj, result));
+ }
+
+ Status explain(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ ExplainOptions::Verbosity verbosity,
+ const rpc::ServerSelectionMetadata& serverSelectionMetadata,
+ BSONObjBuilder* out) const override {
+ const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+
+ // Parse the options for this request, supplying the explain verbosity separately.
+ auto request = AggregationRequest::parseFromBSON(nss, cmdObj, verbosity);
+ if (!request.isOK()) {
+ return request.getStatus();
+ }
+ // If the featureCompatibilityVersion is 3.2, we disallow collation from the user. However,
+ // operations should still respect the collection default collation. The mongos attaches the
+ // collection default collation to the merger pipeline, since the merger may not have the
+ // collection metadata. So the merger needs to accept a collation, and we rely on the shards
+ // to reject collations from the user.
+ if (!request.getValue().getCollation().isEmpty() &&
+ serverGlobalParams.featureCompatibility.version.load() ==
+ ServerGlobalParams::FeatureCompatibility::Version::k32 &&
+ !isMergePipeline(request.getValue().getPipeline())) {
+ return Status(ErrorCodes::InvalidOptions,
+ "The featureCompatibilityVersion must be 3.4 to use collation. See "
+ "http://dochub.mongodb.org/core/3.4-feature-compatibility.");
+ }
+
+ return runAggregate(opCtx, nss, request.getValue(), cmdObj, *out);
}
};
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
new file mode 100644
index 00000000000..684b954b0c5
--- /dev/null
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -0,0 +1,511 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/commands/run_aggregate.h"
+
+#include <boost/optional.hpp>
+#include <vector>
+
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/exec/pipeline_proxy.h"
+#include "mongo/db/exec/working_set_common.h"
+#include "mongo/db/pipeline/accumulator.h"
+#include "mongo/db/pipeline/document.h"
+#include "mongo/db/pipeline/document_source.h"
+#include "mongo/db/pipeline/expression.h"
+#include "mongo/db/pipeline/expression_context.h"
+#include "mongo/db/pipeline/lite_parsed_pipeline.h"
+#include "mongo/db/pipeline/pipeline.h"
+#include "mongo/db/pipeline/pipeline_d.h"
+#include "mongo/db/query/collation/collator_factory_interface.h"
+#include "mongo/db/query/cursor_response.h"
+#include "mongo/db/query/find_common.h"
+#include "mongo/db/query/get_executor.h"
+#include "mongo/db/query/plan_summary_stats.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/storage/storage_options.h"
+#include "mongo/db/views/view.h"
+#include "mongo/db/views/view_catalog.h"
+#include "mongo/db/views/view_sharding_check.h"
+#include "mongo/stdx/memory.h"
+#include "mongo/util/log.h"
+#include "mongo/util/string_map.h"
+
+namespace mongo {
+
+using boost::intrusive_ptr;
+using std::endl;
+using std::shared_ptr;
+using std::string;
+using std::stringstream;
+using std::unique_ptr;
+using stdx::make_unique;
+
+namespace {
+/**
+ * Returns true if we need to keep a ClientCursor saved for this pipeline (for future getMore
+ * requests). Otherwise, returns false. The passed 'nsForCursor' is only used to determine the
+ * namespace used in the returned cursor. In the case of views, this can be different from that
+ * in 'request'.
+ */
+bool handleCursorCommand(OperationContext* opCtx,
+ const string& nsForCursor,
+ ClientCursor* cursor,
+ PlanExecutor* exec,
+ const AggregationRequest& request,
+ BSONObjBuilder& result) {
+ if (cursor) {
+ invariant(cursor->getExecutor() == exec);
+ invariant(cursor->isAggCursor());
+ }
+
+ long long batchSize = request.getBatchSize();
+
+ // can't use result BSONObjBuilder directly since it won't handle exceptions correctly.
+ BSONArrayBuilder resultsArray;
+ BSONObj next;
+ for (int objCount = 0; objCount < batchSize; objCount++) {
+ // The initial getNext() on a PipelineProxyStage may be very expensive so we don't
+ // do it when batchSize is 0 since that indicates a desire for a fast return.
+ PlanExecutor::ExecState state;
+ if ((state = exec->getNext(&next, NULL)) == PlanExecutor::IS_EOF) {
+ // make it an obvious error to use cursor or executor after this point
+ cursor = NULL;
+ exec = NULL;
+ break;
+ }
+
+ uassert(34426,
+ "Plan executor error during aggregation: " + WorkingSetCommon::toStatusString(next),
+ PlanExecutor::ADVANCED == state);
+
+ // If adding this object will cause us to exceed the message size limit, then we stash it
+ // for later.
+ if (!FindCommon::haveSpaceForNext(next, objCount, resultsArray.len())) {
+ exec->enqueue(next);
+ break;
+ }
+
+ resultsArray.append(next);
+ }
+
+ // NOTE: exec->isEOF() can have side effects such as writing by $out. However, it should
+ // be relatively quick since if there was no cursor then the input is empty. Also, this
+ // violates the contract for batchSize==0. Sharding requires a cursor to be returned in that
+ // case. This is ok for now however, since you can't have a sharded collection that doesn't
+ // exist.
+ const bool canReturnMoreBatches = cursor;
+ if (!canReturnMoreBatches && exec && !exec->isEOF()) {
+ // msgasserting since this shouldn't be possible to trigger from today's aggregation
+ // language. The wording assumes that the only reason cursor would be null is if the
+ // collection doesn't exist.
+ msgasserted(
+ 17391,
+ str::stream() << "Aggregation has more results than fit in initial batch, but can't "
+ << "create cursor since collection "
+ << nsForCursor
+ << " doesn't exist");
+ }
+
+ if (cursor) {
+ // If a time limit was set on the pipeline, remaining time is "rolled over" to the
+ // cursor (for use by future getmore ops).
+ cursor->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
+
+ CurOp::get(opCtx)->debug().cursorid = cursor->cursorid();
+
+ // Cursor needs to be in a saved state while we yield locks for getmore. State
+ // will be restored in getMore().
+ exec->saveState();
+ exec->detachFromOperationContext();
+ } else {
+ CurOp::get(opCtx)->debug().cursorExhausted = true;
+ }
+
+ const long long cursorId = cursor ? cursor->cursorid() : 0LL;
+ appendCursorResponseObject(cursorId, nsForCursor, resultsArray.arr(), &result);
+
+ return static_cast<bool>(cursor);
+}
+
+StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNamespaces(
+ OperationContext* opCtx, const AggregationRequest& request) {
+ // We intentionally do not drop and reacquire our DB lock after resolving the view definition in
+ // order to prevent the definition for any view namespaces we've already resolved from changing.
+ // This is necessary to prevent a cycle from being formed among the view definitions cached in
+ // 'resolvedNamespaces' because we won't re-resolve a view namespace we've already encountered.
+ AutoGetDb autoDb(opCtx, request.getNamespaceString().db(), MODE_IS);
+ Database* const db = autoDb.getDb();
+ ViewCatalog* viewCatalog = db ? db->getViewCatalog() : nullptr;
+
+ const LiteParsedPipeline liteParsedPipeline(request);
+ const auto& pipelineInvolvedNamespaces = liteParsedPipeline.getInvolvedNamespaces();
+ std::deque<NamespaceString> involvedNamespacesQueue(pipelineInvolvedNamespaces.begin(),
+ pipelineInvolvedNamespaces.end());
+ StringMap<ExpressionContext::ResolvedNamespace> resolvedNamespaces;
+
+ while (!involvedNamespacesQueue.empty()) {
+ auto involvedNs = std::move(involvedNamespacesQueue.front());
+ involvedNamespacesQueue.pop_front();
+
+ if (resolvedNamespaces.find(involvedNs.coll()) != resolvedNamespaces.end()) {
+ continue;
+ }
+
+ if (!db || db->getCollection(involvedNs.ns())) {
+ // If the database exists and 'involvedNs' refers to a collection namespace, then we
+ // resolve it as an empty pipeline in order to read directly from the underlying
+ // collection. If the database doesn't exist, then we still resolve it as an empty
+ // pipeline because 'involvedNs' doesn't refer to a view namespace in our consistent
+ // snapshot of the view catalog.
+ resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
+ } else if (viewCatalog->lookup(opCtx, involvedNs.ns())) {
+ // If 'involvedNs' refers to a view namespace, then we resolve its definition.
+ auto resolvedView = viewCatalog->resolveView(opCtx, involvedNs);
+ if (!resolvedView.isOK()) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "Failed to resolve view '" << involvedNs.ns() << "': "
+ << resolvedView.getStatus().toString()};
+ }
+
+ resolvedNamespaces[involvedNs.coll()] = {resolvedView.getValue().getNamespace(),
+ resolvedView.getValue().getPipeline()};
+
+ // We parse the pipeline corresponding to the resolved view in case we must resolve
+ // other view namespaces that are also involved.
+ LiteParsedPipeline resolvedViewLitePipeline(
+ {resolvedView.getValue().getNamespace(), resolvedView.getValue().getPipeline()});
+
+ const auto& resolvedViewInvolvedNamespaces =
+ resolvedViewLitePipeline.getInvolvedNamespaces();
+ involvedNamespacesQueue.insert(involvedNamespacesQueue.end(),
+ resolvedViewInvolvedNamespaces.begin(),
+ resolvedViewInvolvedNamespaces.end());
+ } else {
+ // 'involvedNs' is neither a view nor a collection, so resolve it as an empty pipeline
+ // to treat it as reading from a non-existent collection.
+ resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
+ }
+ }
+
+ return resolvedNamespaces;
+}
+
+/**
+ * Round trips the pipeline through serialization by calling serialize(), then Pipeline::parse().
+ * fasserts if it fails to parse after being serialized.
+ */
+boost::intrusive_ptr<Pipeline> reparsePipeline(
+ const boost::intrusive_ptr<Pipeline>& pipeline,
+ const AggregationRequest& request,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx) {
+ auto serialized = pipeline->serialize();
+
+ // Convert vector<Value> to vector<BSONObj>.
+ std::vector<BSONObj> parseableSerialization;
+ parseableSerialization.reserve(serialized.size());
+ for (auto&& serializedStage : serialized) {
+ invariant(serializedStage.getType() == BSONType::Object);
+ parseableSerialization.push_back(serializedStage.getDocument().toBson());
+ }
+
+ auto reparsedPipeline = Pipeline::parse(parseableSerialization, expCtx);
+ if (!reparsedPipeline.isOK()) {
+ error() << "Aggregation command did not round trip through parsing and serialization "
+ "correctly. Input pipeline: "
+ << Value(request.getPipeline()) << ", serialized pipeline: " << Value(serialized);
+ fassertFailedWithStatusNoTrace(40175, reparsedPipeline.getStatus());
+ }
+
+ reparsedPipeline.getValue()->optimizePipeline();
+ return reparsedPipeline.getValue();
+}
+
+/**
+ * Returns Status::OK if each view namespace in 'pipeline' has a default collator equivalent to
+ * 'collator'. Otherwise, returns ErrorCodes::OptionNotSupportedOnView.
+ */
+Status collatorCompatibleWithPipeline(OperationContext* opCtx,
+ Database* db,
+ const CollatorInterface* collator,
+ const intrusive_ptr<Pipeline> pipeline) {
+ if (!db || !pipeline) {
+ return Status::OK();
+ }
+ for (auto&& potentialViewNs : pipeline->getInvolvedCollections()) {
+ if (db->getCollection(potentialViewNs.ns())) {
+ continue;
+ }
+
+ auto view = db->getViewCatalog()->lookup(opCtx, potentialViewNs.ns());
+ if (!view) {
+ continue;
+ }
+ if (!CollatorInterface::collatorsMatch(view->defaultCollator(), collator)) {
+ return {ErrorCodes::OptionNotSupportedOnView,
+ str::stream() << "Cannot override default collation of view "
+ << potentialViewNs.ns()};
+ }
+ }
+ return Status::OK();
+}
+} // namespace
+
+Status runAggregate(OperationContext* opCtx,
+ const NamespaceString& origNss,
+ const AggregationRequest& request,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) {
+ // For operations on views, this will be the underlying namespace.
+ const NamespaceString& nss = request.getNamespaceString();
+
+ // Parse the user-specified collation, if any.
+ std::unique_ptr<CollatorInterface> userSpecifiedCollator = request.getCollation().isEmpty()
+ ? nullptr
+ : uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
+ ->makeFromBSON(request.getCollation()));
+
+ boost::optional<ClientCursorPin> pin; // either this OR the exec will be non-null
+ unique_ptr<PlanExecutor> exec;
+ boost::intrusive_ptr<ExpressionContext> expCtx;
+ boost::intrusive_ptr<Pipeline> pipeline;
+ auto curOp = CurOp::get(opCtx);
+ {
+ // This will throw if the sharding version for this connection is out of date. If the
+ // namespace is a view, the lock will be released before re-running the aggregation.
+ // Otherwise, the lock must be held continuously from now until we have we created both
+ // the output ClientCursor and the input executor. This ensures that both are using the
+ // same sharding version that we synchronize on here. This is also why we always need to
+ // create a ClientCursor even when we aren't outputting to a cursor. See the comment on
+ // ShardFilterStage for more details.
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
+ Collection* collection = ctx.getCollection();
+
+ // If this is a view, resolve it by finding the underlying collection and stitching view
+ // pipelines and this request's pipeline together. We then release our locks before
+ // recursively calling runAggregate(), which will re-acquire locks on the underlying
+ // collection. (The lock must be released because recursively acquiring locks on the
+ // database will prohibit yielding.)
+ const LiteParsedPipeline liteParsedPipeline(request);
+ if (ctx.getView() && !liteParsedPipeline.startsWithCollStats()) {
+ // Check that the default collation of 'view' is compatible with the operation's
+ // collation. The check is skipped if the 'request' has the empty collation, which
+ // means that no collation was specified.
+ if (!request.getCollation().isEmpty()) {
+ if (!CollatorInterface::collatorsMatch(ctx.getView()->defaultCollator(),
+ userSpecifiedCollator.get())) {
+ return {ErrorCodes::OptionNotSupportedOnView,
+ "Cannot override a view's default collation"};
+ }
+ }
+
+ auto viewDefinition =
+ ViewShardingCheck::getResolvedViewIfSharded(opCtx, ctx.getDb(), ctx.getView());
+ if (!viewDefinition.isOK()) {
+ return viewDefinition.getStatus();
+ }
+
+ if (!viewDefinition.getValue().isEmpty()) {
+ return ViewShardingCheck::appendShardedViewResponse(viewDefinition.getValue(),
+ &result);
+ }
+
+ auto resolvedView = ctx.getDb()->getViewCatalog()->resolveView(opCtx, nss);
+ if (!resolvedView.isOK()) {
+ return resolvedView.getStatus();
+ }
+
+ auto collationSpec = ctx.getView()->defaultCollator()
+ ? ctx.getView()->defaultCollator()->getSpec().toBSON().getOwned()
+ : CollationSpec::kSimpleSpec;
+
+ // With the view & collation resolved, we can relinquish locks.
+ ctx.releaseLocksForView();
+
+ // Parse the resolved view into a new aggregation request.
+ auto newRequest = resolvedView.getValue().asExpandedViewAggregation(request);
+ newRequest.setCollation(collationSpec);
+ auto newCmd = newRequest.serializeToCommandObj().toBson();
+
+ auto status = runAggregate(opCtx, origNss, newRequest, newCmd, result);
+ {
+ // Set the namespace of the curop back to the view namespace so ctx records
+ // stats on this view namespace on destruction.
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ curOp->setNS_inlock(nss.ns());
+ }
+ return status;
+ }
+
+ // Determine the appropriate collation to make the ExpressionContext.
+
+ // If the pipeline does not have a user-specified collation, set it from the collection
+ // default. Be careful to consult the original request BSON to check if a collation was
+ // specified, since a specification of {locale: "simple"} will result in a null
+ // collator.
+ auto collatorToUse = std::move(userSpecifiedCollator);
+ if (request.getCollation().isEmpty() && collection && collection->getDefaultCollator()) {
+ invariant(!collatorToUse);
+ collatorToUse = collection->getDefaultCollator()->clone();
+ }
+
+ expCtx.reset(
+ new ExpressionContext(opCtx,
+ request,
+ std::move(collatorToUse),
+ uassertStatusOK(resolveInvolvedNamespaces(opCtx, request))));
+ expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
+
+ // Parse the pipeline.
+ auto statusWithPipeline = Pipeline::parse(request.getPipeline(), expCtx);
+ if (!statusWithPipeline.isOK()) {
+ return statusWithPipeline.getStatus();
+ }
+ pipeline = std::move(statusWithPipeline.getValue());
+
+ // Check that the view's collation matches the collation of any views involved
+ // in the pipeline.
+ auto pipelineCollationStatus =
+ collatorCompatibleWithPipeline(opCtx, ctx.getDb(), expCtx->getCollator(), pipeline);
+ if (!pipelineCollationStatus.isOK()) {
+ return pipelineCollationStatus;
+ }
+
+ pipeline->optimizePipeline();
+
+ if (kDebugBuild && !expCtx->explain && !expCtx->inShard) {
+ // Make sure all operations round-trip through Pipeline::serialize() correctly by
+ // re-parsing every command in debug builds. This is important because sharded
+ // aggregations rely on this ability. Skipping when inShard because this has
+ // already been through the transformation (and this un-sets expCtx->inShard).
+ pipeline = reparsePipeline(pipeline, request, expCtx);
+ }
+
+ // This does mongod-specific stuff like creating the input PlanExecutor and adding
+ // it to the front of the pipeline if needed.
+ PipelineD::prepareCursorSource(collection, &request, pipeline);
+
+ // Create the PlanExecutor which returns results from the pipeline. The WorkingSet
+ // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
+ // PlanExecutor.
+ auto ws = make_unique<WorkingSet>();
+ auto proxy = make_unique<PipelineProxyStage>(opCtx, pipeline, ws.get());
+
+ auto statusWithPlanExecutor = (NULL == collection)
+ ? PlanExecutor::make(
+ opCtx, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
+ : PlanExecutor::make(
+ opCtx, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
+ invariant(statusWithPlanExecutor.isOK());
+ exec = std::move(statusWithPlanExecutor.getValue());
+
+ {
+ auto planSummary = Explain::getPlanSummary(exec.get());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ curOp->setPlanSummary_inlock(std::move(planSummary));
+ }
+
+ if (collection) {
+ const bool isAggCursor = true; // enable special locking behavior
+ pin.emplace(collection->getCursorManager()->registerCursor(
+ {exec.release(),
+ nss.ns(),
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
+ 0,
+ cmdObj.getOwned(),
+ isAggCursor}));
+ // Don't add any code between here and the start of the try block.
+ }
+
+ // At this point, it is safe to release the collection lock.
+ // - In the case where we have a collection: we will need to reacquire the
+ // collection lock later when cleaning up our ClientCursorPin.
+ // - In the case where we don't have a collection: our PlanExecutor won't be
+ // registered, so it will be safe to clean it up outside the lock.
+ invariant(!exec || !collection);
+ }
+
+ try {
+ // Unless set to true, the ClientCursor created above will be deleted on block exit.
+ bool keepCursor = false;
+
+ // If both explain and cursor are specified, explain wins.
+ if (expCtx->explain) {
+ result << "stages" << Value(pipeline->writeExplainOps(*expCtx->explain));
+ } else {
+ // Cursor must be specified, if explain is not.
+ keepCursor = handleCursorCommand(opCtx,
+ origNss.ns(),
+ pin ? pin->getCursor() : nullptr,
+ pin ? pin->getCursor()->getExecutor() : exec.get(),
+ request,
+ result);
+ }
+
+ if (!expCtx->explain) {
+ PlanSummaryStats stats;
+ Explain::getSummaryStats(pin ? *pin->getCursor()->getExecutor() : *exec.get(), &stats);
+ curOp->debug().setPlanSummaryMetrics(stats);
+ curOp->debug().nreturned = stats.nReturned;
+ }
+
+ // Clean up our ClientCursorPin, if needed. We must reacquire the collection lock
+ // in order to do so.
+ if (pin) {
+ // We acquire locks here with DBLock and CollectionLock instead of using
+ // AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
+ // sharding version is out of date, and we don't care if the sharding version
+ // has changed.
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
+ if (keepCursor) {
+ pin->release();
+ } else {
+ pin->deleteUnderlying();
+ }
+ }
+ } catch (...) {
+ // On our way out of scope, we clean up our ClientCursorPin if needed.
+ if (pin) {
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
+ pin->deleteUnderlying();
+ }
+ throw;
+ }
+ // Any code that needs the cursor pinned must be inside the try block, above.
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/run_aggregate.h b/src/mongo/db/commands/run_aggregate.h
new file mode 100644
index 00000000000..1412768ddcf
--- /dev/null
+++ b/src/mongo/db/commands/run_aggregate.h
@@ -0,0 +1,53 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/bson/bsonobj.h"
+#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/pipeline/aggregation_request.h"
+
+namespace mongo {
+
+/**
+ * Executes the aggregation 'request' over the specified namespace 'nss' using context 'opCtx'.
+ *
+ * The raw aggregate command parameters should be passed in 'cmdObj', and will be reported as the
+ * originatingCommand in subsequent getMores on the resulting agg cursor.
+ *
+ * On success, fills out 'result' with the command response.
+ */
+Status runAggregate(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const AggregationRequest& request,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result);
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 4093bcf083e..3c30ddc9e66 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -304,7 +304,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const final {
const auto batch = parseUpdateCommand(dbname, cmdObj);
@@ -376,7 +376,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const final {
const auto batch = parseDeleteCommand(dbname, cmdObj);
diff --git a/src/mongo/db/pipeline/SConscript b/src/mongo/db/pipeline/SConscript
index 49783bc5def..e243adda24a 100644
--- a/src/mongo/db/pipeline/SConscript
+++ b/src/mongo/db/pipeline/SConscript
@@ -85,13 +85,15 @@ env.Library(
'aggregation_request.cpp',
],
LIBDEPS=[
- 'document_value',
'$BUILD_DIR/mongo/base',
- '$BUILD_DIR/mongo/db/query/command_request_response',
'$BUILD_DIR/mongo/db/namespace_string',
+ '$BUILD_DIR/mongo/db/query/command_request_response',
+ '$BUILD_DIR/mongo/db/query/explain_options',
'$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/db/repl/read_concern_args',
'$BUILD_DIR/mongo/db/storage/storage_options',
+ '$BUILD_DIR/mongo/db/write_concern_options',
+ 'document_value',
]
)
diff --git a/src/mongo/db/pipeline/aggregation_request.cpp b/src/mongo/db/pipeline/aggregation_request.cpp
index 947cfbb3f81..920073c6558 100644
--- a/src/mongo/db/pipeline/aggregation_request.cpp
+++ b/src/mongo/db/pipeline/aggregation_request.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/query/query_request.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/storage/storage_options.h"
+#include "mongo/db/write_concern_options.h"
namespace mongo {
@@ -61,8 +62,10 @@ const long long AggregationRequest::kDefaultBatchSize = 101;
AggregationRequest::AggregationRequest(NamespaceString nss, std::vector<BSONObj> pipeline)
: _nss(std::move(nss)), _pipeline(std::move(pipeline)), _batchSize(kDefaultBatchSize) {}
-StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString nss,
- const BSONObj& cmdObj) {
+StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
+ NamespaceString nss,
+ const BSONObj& cmdObj,
+ boost::optional<ExplainOptions::Verbosity> explainVerbosity) {
// Parse required parameters.
auto pipelineElem = cmdObj[kPipelineName];
if (pipelineElem.eoo() || pipelineElem.type() != BSONType::Array) {
@@ -81,12 +84,13 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString
const std::initializer_list<StringData> optionsParsedElseWhere = {
QueryRequest::cmdOptionMaxTimeMS,
- "writeConcern"_sd,
+ WriteConcernOptions::kWriteConcernField,
kPipelineName,
kCommandName,
repl::ReadConcernArgs::kReadConcernFieldName};
bool hasCursorElem = false;
+ bool hasExplainElem = false;
// Parse optional parameters.
for (auto&& elem : cmdObj) {
@@ -138,7 +142,11 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString
str::stream() << kExplainName << " must be a boolean, not a "
<< typeName(elem.type())};
}
- request.setExplain(elem.Bool());
+
+ hasExplainElem = true;
+ if (elem.Bool()) {
+ request.setExplain(ExplainOptions::Verbosity::kQueryPlanner);
+ }
} else if (kFromRouterName == fieldName) {
if (elem.type() != BSONType::Bool) {
return {ErrorCodes::TypeMismatch,
@@ -165,11 +173,35 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString
}
}
- if (!hasCursorElem && !request.isExplain()) {
+ if (explainVerbosity) {
+ if (hasExplainElem) {
+ return {
+ ErrorCodes::FailedToParse,
+ str::stream() << "The '" << kExplainName
+ << "' option is illegal when a explain verbosity is also provided"};
+ }
+
+ request.setExplain(explainVerbosity);
+ }
+
+ if (!hasCursorElem && !request.getExplain()) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "The '" << kCursorName
+ << "' option is required, except for aggregation explain"};
+ }
+
+ if (request.getExplain() && cmdObj[repl::ReadConcernArgs::kReadConcernFieldName]) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "Aggregation explain does not support the '"
+ << repl::ReadConcernArgs::kReadConcernFieldName
+ << "' option"};
+ }
+
+ if (request.getExplain() && cmdObj[WriteConcernOptions::kWriteConcernField]) {
return {ErrorCodes::FailedToParse,
- str::stream() << "The '" << kCursorName << "' option is required, unless '"
- << kExplainName
- << "' is true"};
+ str::stream() << "Aggregation explain does not support the'"
+ << WriteConcernOptions::kWriteConcernField
+ << "' option"};
}
return request;
@@ -181,7 +213,6 @@ Document AggregationRequest::serializeToCommandObj() const {
{kCommandName, _nss.coll()},
{kPipelineName, _pipeline},
// Only serialize booleans if different than their default.
- {kExplainName, _explain ? Value(true) : Value()},
{kAllowDiskUseName, _allowDiskUse ? Value(true) : Value()},
{kFromRouterName, _fromRouter ? Value(true) : Value()},
{bypassDocumentValidationCommandOption(),
@@ -189,7 +220,7 @@ Document AggregationRequest::serializeToCommandObj() const {
// Only serialize a collation if one was specified.
{kCollationName, _collation.isEmpty() ? Value() : Value(_collation)},
// Only serialize batchSize when explain is false.
- {kCursorName, _explain ? Value() : Value(Document{{kBatchSizeName, _batchSize}})},
+ {kCursorName, _explainMode ? Value() : Value(Document{{kBatchSizeName, _batchSize}})},
// Only serialize a hint if one was specified.
{kHintName, _hint.isEmpty() ? Value() : Value(_hint)}};
}
diff --git a/src/mongo/db/pipeline/aggregation_request.h b/src/mongo/db/pipeline/aggregation_request.h
index b25eaa020dc..156d397cbb0 100644
--- a/src/mongo/db/pipeline/aggregation_request.h
+++ b/src/mongo/db/pipeline/aggregation_request.h
@@ -34,6 +34,7 @@
#include "mongo/bson/bsonelement.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/query/explain_options.h"
namespace mongo {
@@ -62,8 +63,15 @@ public:
* Create a new instance of AggregationRequest by parsing the raw command object. Returns a
* non-OK status if a required field was missing, if there was an unrecognized field name or if
* there was a bad value for one of the fields.
+ *
+ * If we are parsing a request for an explained aggregation with an explain verbosity provided,
+ * then 'explainVerbosity' contains this information. In this case, 'cmdObj' may not itself
+ * contain the explain specifier. Otherwise, 'explainVerbosity' should be boost::none.
*/
- static StatusWith<AggregationRequest> parseFromBSON(NamespaceString nss, const BSONObj& cmdObj);
+ static StatusWith<AggregationRequest> parseFromBSON(
+ NamespaceString nss,
+ const BSONObj& cmdObj,
+ boost::optional<ExplainOptions::Verbosity> explainVerbosity = boost::none);
/**
* Constructs an AggregationRequest over the given namespace with the given pipeline. All
@@ -75,6 +83,9 @@ public:
* Serializes the options to a Document. Note that this serialization includes the original
* pipeline object, as specified. Callers will likely want to override this field with a
* serialization of a parsed and optimized Pipeline object.
+ *
+ * The explain option is not serialized. Since the explain command format is {explain:
+ * {aggregate: ...}, ...}, explain options are not part of the aggregate command object.
*/
Document serializeToCommandObj() const;
@@ -97,10 +108,6 @@ public:
return _pipeline;
}
- bool isExplain() const {
- return _explain;
- }
-
bool isFromRouter() const {
return _fromRouter;
}
@@ -124,6 +131,10 @@ public:
return _hint;
}
+ boost::optional<ExplainOptions::Verbosity> getExplain() const {
+ return _explainMode;
+ }
+
//
// Setters for optional fields.
//
@@ -144,8 +155,8 @@ public:
_hint = hint.getOwned();
}
- void setExplain(bool isExplain) {
- _explain = isExplain;
+ void setExplain(boost::optional<ExplainOptions::Verbosity> verbosity) {
+ _explainMode = verbosity;
}
void setAllowDiskUse(bool allowDiskUse) {
@@ -162,7 +173,6 @@ public:
private:
// Required fields.
-
const NamespaceString _nss;
// An unparsed version of the pipeline.
@@ -181,7 +191,9 @@ private:
// {$hint: <String>}, where <String> is the index name hinted.
BSONObj _hint;
- bool _explain = false;
+ // The explain mode to use, or boost::none if this is not a request for an aggregation explain.
+ boost::optional<ExplainOptions::Verbosity> _explainMode;
+
bool _allowDiskUse = false;
bool _fromRouter = false;
bool _bypassDocumentValidation = false;
diff --git a/src/mongo/db/pipeline/aggregation_request_test.cpp b/src/mongo/db/pipeline/aggregation_request_test.cpp
index 45313fc8a1e..a40bca8d8d1 100644
--- a/src/mongo/db/pipeline/aggregation_request_test.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_test.cpp
@@ -58,7 +58,8 @@ TEST(AggregationRequestTest, ShouldParseAllKnownOptions) {
"bypassDocumentValidation: true, collation: {locale: 'en_US'}, cursor: {batchSize: 10}, "
"hint: {a: 1}}");
auto request = unittest::assertGet(AggregationRequest::parseFromBSON(nss, inputBson));
- ASSERT_TRUE(request.isExplain());
+ ASSERT_TRUE(request.getExplain());
+ ASSERT(*request.getExplain() == ExplainOptions::Verbosity::kQueryPlanner);
ASSERT_TRUE(request.shouldAllowDiskUse());
ASSERT_TRUE(request.isFromRouter());
ASSERT_TRUE(request.shouldBypassDocumentValidation());
@@ -69,6 +70,33 @@ TEST(AggregationRequestTest, ShouldParseAllKnownOptions) {
<< "en_US"));
}
+TEST(AggregationRequestTest, ShouldParseExplicitExplainFalseWithCursorOption) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], explain: false, cursor: {batchSize: 10}}");
+ auto request = unittest::assertGet(AggregationRequest::parseFromBSON(nss, inputBson));
+ ASSERT_FALSE(request.getExplain());
+ ASSERT_EQ(request.getBatchSize(), 10);
+}
+
+TEST(AggregationRequestTest, ShouldParseWithSeparateQueryPlannerExplainModeArg) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: []}");
+ auto request = unittest::assertGet(AggregationRequest::parseFromBSON(
+ nss, inputBson, ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_TRUE(request.getExplain());
+ ASSERT(*request.getExplain() == ExplainOptions::Verbosity::kQueryPlanner);
+}
+
+TEST(AggregationRequestTest, ShouldParseWithSeparateQueryPlannerExplainModeArgAndCursorOption) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], cursor: {batchSize: 10}}");
+ auto request = unittest::assertGet(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats));
+ ASSERT_TRUE(request.getExplain());
+ ASSERT(*request.getExplain() == ExplainOptions::Verbosity::kExecStats);
+ ASSERT_EQ(request.getBatchSize(), 10);
+}
+
//
// Serialization
//
@@ -87,7 +115,7 @@ TEST(AggregationRequestTest, ShouldOnlySerializeRequiredFieldsIfNoOptionalFields
TEST(AggregationRequestTest, ShouldNotSerializeOptionalValuesIfEquivalentToDefault) {
NamespaceString nss("a.collection");
AggregationRequest request(nss, {});
- request.setExplain(false);
+ request.setExplain(boost::none);
request.setAllowDiskUse(false);
request.setFromRouter(false);
request.setBypassDocumentValidation(false);
@@ -104,11 +132,10 @@ TEST(AggregationRequestTest, ShouldNotSerializeOptionalValuesIfEquivalentToDefau
TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
NamespaceString nss("a.collection");
AggregationRequest request(nss, {});
- request.setExplain(true);
request.setAllowDiskUse(true);
request.setFromRouter(true);
request.setBypassDocumentValidation(true);
- request.setBatchSize(10); // batchSize not serialzed when explain is true.
+ request.setBatchSize(10);
const auto hintObj = BSON("a" << 1);
request.setHint(hintObj);
const auto collationObj = BSON("locale"
@@ -118,11 +145,12 @@ TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
auto expectedSerialization =
Document{{AggregationRequest::kCommandName, nss.coll()},
{AggregationRequest::kPipelineName, Value(std::vector<Value>{})},
- {AggregationRequest::kExplainName, true},
{AggregationRequest::kAllowDiskUseName, true},
{AggregationRequest::kFromRouterName, true},
{bypassDocumentValidationCommandOption(), true},
{AggregationRequest::kCollationName, collationObj},
+ {AggregationRequest::kCursorName,
+ Value(Document({{AggregationRequest::kBatchSizeName, 10}}))},
{AggregationRequest::kHintName, hintObj}};
ASSERT_DOCUMENT_EQ(request.serializeToCommandObj(), expectedSerialization);
}
@@ -159,6 +187,18 @@ TEST(AggregationRequestTest, ShouldAcceptHintAsString) {
<< "a_1"));
}
+TEST(AggregationRequestTest, ShouldNotSerializeBatchSizeOrExplainWhenExplainSet) {
+ NamespaceString nss("a.collection");
+ AggregationRequest request(nss, {});
+ request.setBatchSize(10);
+ request.setExplain(ExplainOptions::Verbosity::kQueryPlanner);
+
+ auto expectedSerialization =
+ Document{{AggregationRequest::kCommandName, nss.coll()},
+ {AggregationRequest::kPipelineName, Value(std::vector<Value>{})}};
+ ASSERT_DOCUMENT_EQ(request.serializeToCommandObj(), expectedSerialization);
+}
+
//
// Error cases.
//
@@ -201,13 +241,20 @@ TEST(AggregationRequestTest, ShouldRejectHintAsArray) {
AggregationRequest::parseFromBSON(NamespaceString("a.collection"), inputBson).getStatus());
}
-TEST(AggregationRequestTest, ShouldRejectNonBoolExplain) {
+TEST(AggregationRequestTest, ShouldRejectExplainIfNumber) {
NamespaceString nss("a.collection");
const BSONObj inputBson =
fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, explain: 1}");
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
+TEST(AggregationRequestTest, ShouldRejectExplainIfObject) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, explain: {}}");
+ ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
+}
+
TEST(AggregationRequestTest, ShouldRejectNonBoolFromRouter) {
NamespaceString nss("a.collection");
const BSONObj inputBson =
@@ -228,6 +275,52 @@ TEST(AggregationRequestTest, ShouldRejectNoCursorNoExplain) {
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
+TEST(AggregationRequestTest, ShouldRejectExplainTrueWithSeparateExplainArg) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], explain: true}");
+ ASSERT_NOT_OK(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats)
+ .getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainFalseWithSeparateExplainArg) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], explain: false}");
+ ASSERT_NOT_OK(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats)
+ .getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainWithReadConcernMajority) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [], explain: true, readConcern: {level: 'majority'}}");
+ ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainExecStatsVerbosityWithReadConcernMajority) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], readConcern: {level: 'majority'}}");
+ ASSERT_NOT_OK(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats)
+ .getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainWithWriteConcernMajority) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [], explain: true, writeConcern: {w: 'majority'}}");
+ ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainExecStatsVerbosityWithWriteConcernMajority) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], writeConcern: {w: 'majority'}}");
+ ASSERT_NOT_OK(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats)
+ .getStatus());
+}
+
//
// Ignore fields parsed elsewhere.
//
diff --git a/src/mongo/db/pipeline/document_source.cpp b/src/mongo/db/pipeline/document_source.cpp
index 440dc762175..ec1d8edfcb3 100644
--- a/src/mongo/db/pipeline/document_source.cpp
+++ b/src/mongo/db/pipeline/document_source.cpp
@@ -198,7 +198,8 @@ void DocumentSource::dispose() {
}
}
-void DocumentSource::serializeToArray(vector<Value>& array, bool explain) const {
+void DocumentSource::serializeToArray(vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain) const {
Value entry = serialize(explain);
if (!entry.missing()) {
array.push_back(entry);
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index 0dd3b7b3154..b6ef71d1480 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -49,6 +49,7 @@
#include "mongo/db/pipeline/lite_parsed_document_source.h"
#include "mongo/db/pipeline/pipeline.h"
#include "mongo/db/pipeline/value.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/stdx/functional.h"
#include "mongo/util/intrusive_counter.h"
@@ -244,11 +245,15 @@ public:
/**
* In the default case, serializes the DocumentSource and adds it to the std::vector<Value>.
*
- * A subclass may choose to overwrite this, rather than serialize,
- * if it should output multiple stages (eg, $sort sometimes also outputs a $limit).
+ * A subclass may choose to overwrite this, rather than serialize, if it should output multiple
+ * stages (eg, $sort sometimes also outputs a $limit).
+ *
+ * The 'explain' parameter indicates the explain verbosity mode, or is equal boost::none if no
+ * explain is requested.
*/
-
- virtual void serializeToArray(std::vector<Value>& array, bool explain = false) const;
+ virtual void serializeToArray(
+ std::vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const;
/**
* Returns true if doesn't require an input source (most DocumentSources do).
@@ -473,8 +478,12 @@ private:
* This is used by the default implementation of serializeToArray() to add this object
* to a pipeline being serialized. Returning a missing() Value results in no entry
* being added to the array for this stage (DocumentSource).
+ *
+ * The 'explain' parameter indicates the explain verbosity mode, or is equal boost::none if no
+ * explain is requested.
*/
- virtual Value serialize(bool explain = false) const = 0;
+ virtual Value serialize(
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const = 0;
};
/** This class marks DocumentSources that should be split between the merger and the shards.
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto.cpp b/src/mongo/db/pipeline/document_source_bucket_auto.cpp
index b02e8d0226f..f2015df5f64 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_auto.cpp
@@ -333,10 +333,11 @@ void DocumentSourceBucketAuto::dispose() {
pSource->dispose();
}
-Value DocumentSourceBucketAuto::serialize(bool explain) const {
+Value DocumentSourceBucketAuto::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument insides;
- insides["groupBy"] = _groupByExpression->serialize(explain);
+ insides["groupBy"] = _groupByExpression->serialize(static_cast<bool>(explain));
insides["buckets"] = Value(_nBuckets);
if (_granularityRounder) {
@@ -347,8 +348,8 @@ Value DocumentSourceBucketAuto::serialize(bool explain) const {
MutableDocument outputSpec(nOutputFields);
for (size_t i = 0; i < nOutputFields; i++) {
intrusive_ptr<Accumulator> accum = _accumulatorFactories[i](pExpCtx);
- outputSpec[_fieldNames[i]] =
- Value{Document{{accum->getOpName(), _expressions[i]->serialize(explain)}}};
+ outputSpec[_fieldNames[i]] = Value{
+ Document{{accum->getOpName(), _expressions[i]->serialize(static_cast<bool>(explain))}}};
}
insides["output"] = outputSpec.freezeToValue();
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto.h b/src/mongo/db/pipeline/document_source_bucket_auto.h
index 61f7da30c71..75c3f07b683 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto.h
+++ b/src/mongo/db/pipeline/document_source_bucket_auto.h
@@ -43,7 +43,7 @@ namespace mongo {
*/
class DocumentSourceBucketAuto final : public DocumentSource, public SplittableDocumentSource {
public:
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final;
GetNextResult getNext() final;
void dispose() final;
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
index 2d71b676024..debd2eede82 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
@@ -87,9 +87,9 @@ public:
auto bucketAutoStage = createBucketAuto(bucketAutoSpec);
assertBucketAutoType(bucketAutoStage);
- const bool explain = true;
vector<Value> explainedStages;
- bucketAutoStage->serializeToArray(explainedStages, explain);
+ bucketAutoStage->serializeToArray(explainedStages,
+ ExplainOptions::Verbosity::kQueryPlanner);
ASSERT_EQUALS(explainedStages.size(), 1UL);
Value expectedExplain = Value(expectedObj);
diff --git a/src/mongo/db/pipeline/document_source_bucket_test.cpp b/src/mongo/db/pipeline/document_source_bucket_test.cpp
index bdfb3635bb7..1a61934dcb5 100644
--- a/src/mongo/db/pipeline/document_source_bucket_test.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_test.cpp
@@ -66,7 +66,7 @@ public:
// Serialize the DocumentSourceGroup and DocumentSourceSort from $bucket so that we can
// check the explain output to make sure $group and $sort have the correct fields.
- const bool explain = true;
+ auto explain = ExplainOptions::Verbosity::kQueryPlanner;
vector<Value> explainedStages;
groupStage->serializeToArray(explainedStages, explain);
sortStage->serializeToArray(explainedStages, explain);
diff --git a/src/mongo/db/pipeline/document_source_coll_stats.cpp b/src/mongo/db/pipeline/document_source_coll_stats.cpp
index 30ebb4916fc..56ebaca69e3 100644
--- a/src/mongo/db/pipeline/document_source_coll_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_coll_stats.cpp
@@ -126,7 +126,7 @@ bool DocumentSourceCollStats::isValidInitialSource() const {
return true;
}
-Value DocumentSourceCollStats::serialize(bool explain) const {
+Value DocumentSourceCollStats::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(Document{{getSourceName(), _collStatsSpec}});
}
diff --git a/src/mongo/db/pipeline/document_source_coll_stats.h b/src/mongo/db/pipeline/document_source_coll_stats.h
index d8cf2eb6718..9e5aa2a4f3f 100644
--- a/src/mongo/db/pipeline/document_source_coll_stats.h
+++ b/src/mongo/db/pipeline/document_source_coll_stats.h
@@ -63,7 +63,7 @@ public:
bool isValidInitialSource() const final;
- Value serialize(bool explain = false) const;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
static boost::intrusive_ptr<DocumentSource> createFromBson(
BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
diff --git a/src/mongo/db/pipeline/document_source_count_test.cpp b/src/mongo/db/pipeline/document_source_count_test.cpp
index 5535ee5ea83..e083399b0fa 100644
--- a/src/mongo/db/pipeline/document_source_count_test.cpp
+++ b/src/mongo/db/pipeline/document_source_count_test.cpp
@@ -63,7 +63,7 @@ public:
dynamic_cast<DocumentSourceSingleDocumentTransformation*>(result[1].get());
ASSERT(projectStage);
- const bool explain = true;
+ auto explain = ExplainOptions::Verbosity::kQueryPlanner;
vector<Value> explainedStages;
groupStage->serializeToArray(explainedStages, explain);
projectStage->serializeToArray(explainedStages, explain);
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 276a16196bc..ae62da0a978 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -174,7 +174,7 @@ void DocumentSourceCursor::recordPlanSummaryStats() {
_planSummaryStats.hasSortStage = hasSortStage;
}
-Value DocumentSourceCursor::serialize(bool explain) const {
+Value DocumentSourceCursor::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
// we never parse a documentSourceCursor, so we only serialize for explain
if (!explain)
return Value();
@@ -187,8 +187,7 @@ Value DocumentSourceCursor::serialize(bool explain) const {
massert(17392, "No _exec. Were we disposed before explained?", _exec);
_exec->restoreState();
- Explain::explainStages(
- _exec.get(), autoColl.getCollection(), ExplainCommon::QUERY_PLANNER, &explainBuilder);
+ Explain::explainStages(_exec.get(), autoColl.getCollection(), *explain, &explainBuilder);
_exec->saveState();
}
@@ -209,6 +208,10 @@ Value DocumentSourceCursor::serialize(bool explain) const {
BSONObj explainObj = explainBuilder.obj();
invariant(explainObj.hasField("queryPlanner"));
out["queryPlanner"] = Value(explainObj["queryPlanner"]);
+ if (*explain >= ExplainOptions::Verbosity::kExecStats) {
+ invariant(explainObj.hasField("executionStats"));
+ out["executionStats"] = Value(explainObj["executionStats"]);
+ }
return Value(DOC(getSourceName() << out.freezeToValue()));
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.h b/src/mongo/db/pipeline/document_source_cursor.h
index 360b855ad39..3faeea86de6 100644
--- a/src/mongo/db/pipeline/document_source_cursor.h
+++ b/src/mongo/db/pipeline/document_source_cursor.h
@@ -58,7 +58,7 @@ public:
*/
Pipeline::SourceContainer::iterator doOptimizeAt(Pipeline::SourceContainer::iterator itr,
Pipeline::SourceContainer* container) final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
bool isValidInitialSource() const final {
return true;
}
diff --git a/src/mongo/db/pipeline/document_source_facet.cpp b/src/mongo/db/pipeline/document_source_facet.cpp
index 9b03b233051..0842f7f68bf 100644
--- a/src/mongo/db/pipeline/document_source_facet.cpp
+++ b/src/mongo/db/pipeline/document_source_facet.cpp
@@ -179,11 +179,11 @@ DocumentSource::GetNextResult DocumentSourceFacet::getNext() {
return resultDoc.freeze();
}
-Value DocumentSourceFacet::serialize(bool explain) const {
+Value DocumentSourceFacet::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument serialized;
for (auto&& facet : _facets) {
- serialized[facet.name] =
- Value(explain ? facet.pipeline->writeExplainOps() : facet.pipeline->serialize());
+ serialized[facet.name] = Value(explain ? facet.pipeline->writeExplainOps(*explain)
+ : facet.pipeline->serialize());
}
return Value(Document{{"$facet", serialized.freezeToValue()}});
}
diff --git a/src/mongo/db/pipeline/document_source_facet.h b/src/mongo/db/pipeline/document_source_facet.h
index 921b2e87609..4da558e37a1 100644
--- a/src/mongo/db/pipeline/document_source_facet.h
+++ b/src/mongo/db/pipeline/document_source_facet.h
@@ -135,7 +135,7 @@ private:
DocumentSourceFacet(std::vector<FacetPipeline> facetPipelines,
const boost::intrusive_ptr<ExpressionContext>& expCtx);
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
boost::intrusive_ptr<TeeBuffer> _teeBuffer;
std::vector<FacetPipeline> _facets;
diff --git a/src/mongo/db/pipeline/document_source_geo_near.cpp b/src/mongo/db/pipeline/document_source_geo_near.cpp
index 17e9997bbf5..c971509e373 100644
--- a/src/mongo/db/pipeline/document_source_geo_near.cpp
+++ b/src/mongo/db/pipeline/document_source_geo_near.cpp
@@ -97,7 +97,7 @@ intrusive_ptr<DocumentSource> DocumentSourceGeoNear::getMergeSource() {
return DocumentSourceSort::create(pExpCtx, BSON(distanceField->fullPath() << 1), limit);
}
-Value DocumentSourceGeoNear::serialize(bool explain) const {
+Value DocumentSourceGeoNear::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument result;
if (coordsIsArray) {
diff --git a/src/mongo/db/pipeline/document_source_geo_near.h b/src/mongo/db/pipeline/document_source_geo_near.h
index c38c311a4e5..03bcf304eee 100644
--- a/src/mongo/db/pipeline/document_source_geo_near.h
+++ b/src/mongo/db/pipeline/document_source_geo_near.h
@@ -49,7 +49,7 @@ public:
bool isValidInitialSource() const final {
return true;
}
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
BSONObjSet getOutputSorts() final {
return SimpleBSONObjComparator::kInstance.makeBSONObjSet(
{BSON(distanceField->fullPath() << -1)});
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
index a11af0a44c3..e8e2cacc108 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
@@ -390,7 +390,8 @@ void DocumentSourceGraphLookUp::checkMemoryUsage() {
_cache.evictDownTo(_maxMemoryUsageBytes - _frontierUsageBytes - _visitedUsageBytes);
}
-void DocumentSourceGraphLookUp::serializeToArray(std::vector<Value>& array, bool explain) const {
+void DocumentSourceGraphLookUp::serializeToArray(
+ std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
// Serialize default options.
MutableDocument spec(DOC("from" << _from.coll() << "as" << _as.fullPath() << "connectToField"
<< _connectToField.fullPath()
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.h b/src/mongo/db/pipeline/document_source_graph_lookup.h
index 6e94e98b4af..8c14a2ab8fa 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.h
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.h
@@ -45,7 +45,9 @@ public:
const char* getSourceName() const final;
void dispose() final;
BSONObjSet getOutputSorts() final;
- void serializeToArray(std::vector<Value>& array, bool explain = false) const final;
+ void serializeToArray(
+ std::vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
/**
* Returns the 'as' path, and possibly the fields modified by an absorbed $unwind.
@@ -107,7 +109,7 @@ private:
boost::optional<long long> maxDepth,
boost::optional<boost::intrusive_ptr<DocumentSourceUnwind>> unwindSrc);
- Value serialize(bool explain = false) const final {
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final {
// Should not be called; use serializeToArray instead.
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/pipeline/document_source_group.cpp b/src/mongo/db/pipeline/document_source_group.cpp
index a70a65c06b9..d4f44850a8f 100644
--- a/src/mongo/db/pipeline/document_source_group.cpp
+++ b/src/mongo/db/pipeline/document_source_group.cpp
@@ -198,19 +198,19 @@ intrusive_ptr<DocumentSource> DocumentSourceGroup::optimize() {
return this;
}
-Value DocumentSourceGroup::serialize(bool explain) const {
+Value DocumentSourceGroup::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument insides;
// Add the _id.
if (_idFieldNames.empty()) {
invariant(_idExpressions.size() == 1);
- insides["_id"] = _idExpressions[0]->serialize(explain);
+ insides["_id"] = _idExpressions[0]->serialize(static_cast<bool>(explain));
} else {
// Decomposed document case.
invariant(_idExpressions.size() == _idFieldNames.size());
MutableDocument md;
for (size_t i = 0; i < _idExpressions.size(); i++) {
- md[_idFieldNames[i]] = _idExpressions[i]->serialize(explain);
+ md[_idFieldNames[i]] = _idExpressions[i]->serialize(static_cast<bool>(explain));
}
insides["_id"] = md.freezeToValue();
}
@@ -219,8 +219,8 @@ Value DocumentSourceGroup::serialize(bool explain) const {
const size_t n = vFieldName.size();
for (size_t i = 0; i < n; ++i) {
intrusive_ptr<Accumulator> accum = vpAccumulatorFactory[i](pExpCtx);
- insides[vFieldName[i]] =
- Value(DOC(accum->getOpName() << vpExpression[i]->serialize(explain)));
+ insides[vFieldName[i]] = Value(
+ DOC(accum->getOpName() << vpExpression[i]->serialize(static_cast<bool>(explain))));
}
if (_doingMerge) {
diff --git a/src/mongo/db/pipeline/document_source_group.h b/src/mongo/db/pipeline/document_source_group.h
index e79ce631817..86b973b9e04 100644
--- a/src/mongo/db/pipeline/document_source_group.h
+++ b/src/mongo/db/pipeline/document_source_group.h
@@ -48,7 +48,7 @@ public:
// Virtuals from DocumentSource.
boost::intrusive_ptr<DocumentSource> optimize() final;
GetDepsReturn getDependencies(DepsTracker* deps) const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetNextResult getNext() final;
void dispose() final;
const char* getSourceName() const final;
diff --git a/src/mongo/db/pipeline/document_source_index_stats.cpp b/src/mongo/db/pipeline/document_source_index_stats.cpp
index 3256f8e5211..863c37119c3 100644
--- a/src/mongo/db/pipeline/document_source_index_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_index_stats.cpp
@@ -81,7 +81,8 @@ intrusive_ptr<DocumentSource> DocumentSourceIndexStats::createFromBson(
return new DocumentSourceIndexStats(pExpCtx);
}
-Value DocumentSourceIndexStats::serialize(bool explain) const {
+Value DocumentSourceIndexStats::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << Document()));
}
}
diff --git a/src/mongo/db/pipeline/document_source_index_stats.h b/src/mongo/db/pipeline/document_source_index_stats.h
index 1c1bc521aeb..26beb7edb8b 100644
--- a/src/mongo/db/pipeline/document_source_index_stats.h
+++ b/src/mongo/db/pipeline/document_source_index_stats.h
@@ -42,7 +42,7 @@ public:
// virtuals from DocumentSource
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
virtual bool isValidInitialSource() const final {
return true;
diff --git a/src/mongo/db/pipeline/document_source_limit.cpp b/src/mongo/db/pipeline/document_source_limit.cpp
index 797c6ed6652..704ba907165 100644
--- a/src/mongo/db/pipeline/document_source_limit.cpp
+++ b/src/mongo/db/pipeline/document_source_limit.cpp
@@ -85,7 +85,7 @@ DocumentSource::GetNextResult DocumentSourceLimit::getNext() {
return nextInput;
}
-Value DocumentSourceLimit::serialize(bool explain) const {
+Value DocumentSourceLimit::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(Document{{getSourceName(), _limit}});
}
diff --git a/src/mongo/db/pipeline/document_source_limit.h b/src/mongo/db/pipeline/document_source_limit.h
index 9c04213045d..c6660152d66 100644
--- a/src/mongo/db/pipeline/document_source_limit.h
+++ b/src/mongo/db/pipeline/document_source_limit.h
@@ -47,7 +47,7 @@ public:
*/
Pipeline::SourceContainer::iterator doOptimizeAt(Pipeline::SourceContainer::iterator itr,
Pipeline::SourceContainer* container) final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final {
return SEE_NEXT; // This doesn't affect needed fields
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index f173224f202..a474baefe6a 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -414,7 +414,8 @@ DocumentSource::GetNextResult DocumentSourceLookUp::unwindResult() {
return output.freeze();
}
-void DocumentSourceLookUp::serializeToArray(std::vector<Value>& array, bool explain) const {
+void DocumentSourceLookUp::serializeToArray(
+ std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument output(DOC(
getSourceName() << DOC("from" << _fromNs.coll() << "as" << _as.fullPath() << "localField"
<< _localField.fullPath()
diff --git a/src/mongo/db/pipeline/document_source_lookup.h b/src/mongo/db/pipeline/document_source_lookup.h
index e179abfe73e..febdedc0472 100644
--- a/src/mongo/db/pipeline/document_source_lookup.h
+++ b/src/mongo/db/pipeline/document_source_lookup.h
@@ -49,7 +49,9 @@ public:
GetNextResult getNext() final;
const char* getSourceName() const final;
- void serializeToArray(std::vector<Value>& array, bool explain = false) const final;
+ void serializeToArray(
+ std::vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
/**
* Returns the 'as' path, and possibly fields modified by an absorbed $unwind.
@@ -120,7 +122,7 @@ private:
std::string foreignField,
const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- Value serialize(bool explain = false) const final {
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final {
// Should not be called; use serializeToArray instead.
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 3107e8b7677..e72377ca04e 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -57,7 +57,7 @@ const char* DocumentSourceMatch::getSourceName() const {
return "$match";
}
-Value DocumentSourceMatch::serialize(bool explain) const {
+Value DocumentSourceMatch::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << Document(getQuery())));
}
diff --git a/src/mongo/db/pipeline/document_source_match.h b/src/mongo/db/pipeline/document_source_match.h
index badfe1ed8b9..faeda447cb3 100644
--- a/src/mongo/db/pipeline/document_source_match.h
+++ b/src/mongo/db/pipeline/document_source_match.h
@@ -42,7 +42,7 @@ public:
// virtuals from DocumentSource
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
boost::intrusive_ptr<DocumentSource> optimize() final;
BSONObjSet getOutputSorts() final {
return pSource ? pSource->getOutputSorts()
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors.cpp b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
index b03db6560df..a9f06ef6aea 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
@@ -86,7 +86,8 @@ intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::createFromBson(
return new DocumentSourceMergeCursors(std::move(cursorDescriptors), pExpCtx);
}
-Value DocumentSourceMergeCursors::serialize(bool explain) const {
+Value DocumentSourceMergeCursors::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
vector<Value> cursors;
for (size_t i = 0; i < _cursorDescriptors.size(); i++) {
cursors.push_back(
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors.h b/src/mongo/db/pipeline/document_source_merge_cursors.h
index ad853a9f755..0842ca89bdd 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors.h
+++ b/src/mongo/db/pipeline/document_source_merge_cursors.h
@@ -52,7 +52,7 @@ public:
GetNextResult getNext() final;
const char* getSourceName() const final;
void dispose() final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
bool isValidInitialSource() const final {
return true;
}
diff --git a/src/mongo/db/pipeline/document_source_mock.cpp b/src/mongo/db/pipeline/document_source_mock.cpp
index 3918bbd5ad7..9f1aab2977b 100644
--- a/src/mongo/db/pipeline/document_source_mock.cpp
+++ b/src/mongo/db/pipeline/document_source_mock.cpp
@@ -54,7 +54,7 @@ const char* DocumentSourceMock::getSourceName() const {
return "mock";
}
-Value DocumentSourceMock::serialize(bool explain) const {
+Value DocumentSourceMock::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(Document{{getSourceName(), Document()}});
}
diff --git a/src/mongo/db/pipeline/document_source_mock.h b/src/mongo/db/pipeline/document_source_mock.h
index 7b966f5c418..74973f3fd27 100644
--- a/src/mongo/db/pipeline/document_source_mock.h
+++ b/src/mongo/db/pipeline/document_source_mock.h
@@ -46,7 +46,8 @@ public:
GetNextResult getNext() override;
const char* getSourceName() const override;
- Value serialize(bool explain = false) const override;
+ Value serialize(
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const override;
void dispose() override;
bool isValidInitialSource() const override {
return true;
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index 7dab6365405..0bccc03643d 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -220,7 +220,7 @@ intrusive_ptr<DocumentSource> DocumentSourceOut::createFromBson(
return new DocumentSourceOut(outputNs, pExpCtx);
}
-Value DocumentSourceOut::serialize(bool explain) const {
+Value DocumentSourceOut::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
massert(
17000, "$out shouldn't have different db than input", _outputNs.db() == pExpCtx->ns.db());
diff --git a/src/mongo/db/pipeline/document_source_out.h b/src/mongo/db/pipeline/document_source_out.h
index 4e8a678404a..5fca80dfba1 100644
--- a/src/mongo/db/pipeline/document_source_out.h
+++ b/src/mongo/db/pipeline/document_source_out.h
@@ -41,7 +41,7 @@ public:
~DocumentSourceOut() final;
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final;
bool needsPrimaryShard() const final {
return true;
diff --git a/src/mongo/db/pipeline/document_source_redact.cpp b/src/mongo/db/pipeline/document_source_redact.cpp
index 0096017a2f8..ae79729f2e0 100644
--- a/src/mongo/db/pipeline/document_source_redact.cpp
+++ b/src/mongo/db/pipeline/document_source_redact.cpp
@@ -163,8 +163,8 @@ intrusive_ptr<DocumentSource> DocumentSourceRedact::optimize() {
return this;
}
-Value DocumentSourceRedact::serialize(bool explain) const {
- return Value(DOC(getSourceName() << _expression.get()->serialize(explain)));
+Value DocumentSourceRedact::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
+ return Value(DOC(getSourceName() << _expression.get()->serialize(static_cast<bool>(explain))));
}
intrusive_ptr<DocumentSource> DocumentSourceRedact::createFromBson(
diff --git a/src/mongo/db/pipeline/document_source_redact.h b/src/mongo/db/pipeline/document_source_redact.h
index b9cf6242d42..8177de12eeb 100644
--- a/src/mongo/db/pipeline/document_source_redact.h
+++ b/src/mongo/db/pipeline/document_source_redact.h
@@ -50,7 +50,7 @@ public:
static boost::intrusive_ptr<DocumentSource> createFromBson(
BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& expCtx);
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
private:
DocumentSourceRedact(const boost::intrusive_ptr<ExpressionContext>& expCtx,
diff --git a/src/mongo/db/pipeline/document_source_replace_root.cpp b/src/mongo/db/pipeline/document_source_replace_root.cpp
index 1ae82d5cf3a..521cf748784 100644
--- a/src/mongo/db/pipeline/document_source_replace_root.cpp
+++ b/src/mongo/db/pipeline/document_source_replace_root.cpp
@@ -76,8 +76,8 @@ public:
_newRoot->optimize();
}
- Document serialize(bool explain) const final {
- return Document{{"newRoot", _newRoot->serialize(explain)}};
+ Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const final {
+ return Document{{"newRoot", _newRoot->serialize(static_cast<bool>(explain))}};
}
DocumentSource::GetDepsReturn addDependencies(DepsTracker* deps) const final {
diff --git a/src/mongo/db/pipeline/document_source_sample.cpp b/src/mongo/db/pipeline/document_source_sample.cpp
index c7d98db7260..5117e5c516a 100644
--- a/src/mongo/db/pipeline/document_source_sample.cpp
+++ b/src/mongo/db/pipeline/document_source_sample.cpp
@@ -83,7 +83,7 @@ DocumentSource::GetNextResult DocumentSourceSample::getNext() {
return _sortStage->getNext();
}
-Value DocumentSourceSample::serialize(bool explain) const {
+Value DocumentSourceSample::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << DOC("size" << _size)));
}
diff --git a/src/mongo/db/pipeline/document_source_sample.h b/src/mongo/db/pipeline/document_source_sample.h
index 28f1dd97b05..ec88c0737a8 100644
--- a/src/mongo/db/pipeline/document_source_sample.h
+++ b/src/mongo/db/pipeline/document_source_sample.h
@@ -37,7 +37,7 @@ class DocumentSourceSample final : public DocumentSource, public SplittableDocum
public:
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final {
return SEE_NEXT;
diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
index 9fc3b5bf105..ff6c4e6ec16 100644
--- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
@@ -138,7 +138,8 @@ DocumentSource::GetNextResult DocumentSourceSampleFromRandomCursor::getNextNonDu
"sporadic failure, please try again.");
}
-Value DocumentSourceSampleFromRandomCursor::serialize(bool explain) const {
+Value DocumentSourceSampleFromRandomCursor::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << DOC("size" << _size)));
}
diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.h b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.h
index 0d0ac39ca49..19b7106b03d 100644
--- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.h
+++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.h
@@ -41,7 +41,7 @@ class DocumentSourceSampleFromRandomCursor final : public DocumentSource {
public:
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final;
static boost::intrusive_ptr<DocumentSourceSampleFromRandomCursor> create(
diff --git a/src/mongo/db/pipeline/document_source_sample_test.cpp b/src/mongo/db/pipeline/document_source_sample_test.cpp
index 30a59baf22b..528676cb294 100644
--- a/src/mongo/db/pipeline/document_source_sample_test.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_test.cpp
@@ -140,7 +140,7 @@ private:
* created with.
*/
void checkBsonRepresentation(const BSONObj& spec) {
- Value serialized = static_cast<DocumentSourceSample*>(sample())->serialize(false);
+ Value serialized = static_cast<DocumentSourceSample*>(sample())->serialize();
auto generatedSpec = serialized.getDocument().toBson();
ASSERT_BSONOBJ_EQ(spec, generatedSpec);
}
diff --git a/src/mongo/db/pipeline/document_source_single_document_transformation.cpp b/src/mongo/db/pipeline/document_source_single_document_transformation.cpp
index 35204d272a4..5fcd0ba5400 100644
--- a/src/mongo/db/pipeline/document_source_single_document_transformation.cpp
+++ b/src/mongo/db/pipeline/document_source_single_document_transformation.cpp
@@ -74,7 +74,8 @@ void DocumentSourceSingleDocumentTransformation::dispose() {
_parsedTransform.reset();
}
-Value DocumentSourceSingleDocumentTransformation::serialize(bool explain) const {
+Value DocumentSourceSingleDocumentTransformation::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(Document{{getSourceName(), _parsedTransform->serialize(explain)}});
}
diff --git a/src/mongo/db/pipeline/document_source_single_document_transformation.h b/src/mongo/db/pipeline/document_source_single_document_transformation.h
index 1251cb454a4..86f4607513c 100644
--- a/src/mongo/db/pipeline/document_source_single_document_transformation.h
+++ b/src/mongo/db/pipeline/document_source_single_document_transformation.h
@@ -55,7 +55,7 @@ public:
virtual ~TransformerInterface() = default;
virtual Document applyTransformation(Document input) = 0;
virtual void optimize() = 0;
- virtual Document serialize(bool explain) const = 0;
+ virtual Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const = 0;
virtual DocumentSource::GetDepsReturn addDependencies(DepsTracker* deps) const = 0;
virtual GetModPathsReturn getModifiedPaths() const = 0;
};
@@ -70,7 +70,7 @@ public:
GetNextResult getNext() final;
boost::intrusive_ptr<DocumentSource> optimize() final;
void dispose() final;
- Value serialize(bool explain) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
Pipeline::SourceContainer::iterator doOptimizeAt(Pipeline::SourceContainer::iterator itr,
Pipeline::SourceContainer* container) final;
DocumentSource::GetDepsReturn getDependencies(DepsTracker* deps) const final;
diff --git a/src/mongo/db/pipeline/document_source_skip.cpp b/src/mongo/db/pipeline/document_source_skip.cpp
index 114a2f7d1b4..61125a879f4 100644
--- a/src/mongo/db/pipeline/document_source_skip.cpp
+++ b/src/mongo/db/pipeline/document_source_skip.cpp
@@ -73,7 +73,7 @@ DocumentSource::GetNextResult DocumentSourceSkip::getNext() {
return pSource->getNext();
}
-Value DocumentSourceSkip::serialize(bool explain) const {
+Value DocumentSourceSkip::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << _nToSkip));
}
diff --git a/src/mongo/db/pipeline/document_source_skip.h b/src/mongo/db/pipeline/document_source_skip.h
index 92d087e3c75..46cb6a6f3ab 100644
--- a/src/mongo/db/pipeline/document_source_skip.h
+++ b/src/mongo/db/pipeline/document_source_skip.h
@@ -43,7 +43,7 @@ public:
*/
Pipeline::SourceContainer::iterator doOptimizeAt(Pipeline::SourceContainer::iterator itr,
Pipeline::SourceContainer* container) final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
boost::intrusive_ptr<DocumentSource> optimize() final;
BSONObjSet getOutputSorts() final {
return pSource ? pSource->getOutputSorts()
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
index 6554a892016..b32b94760d9 100644
--- a/src/mongo/db/pipeline/document_source_sort.cpp
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -79,16 +79,17 @@ DocumentSource::GetNextResult DocumentSourceSort::getNext() {
return _output->next().second;
}
-void DocumentSourceSort::serializeToArray(vector<Value>& array, bool explain) const {
+void DocumentSourceSort::serializeToArray(
+ std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
if (explain) { // always one Value for combined $sort + $limit
- array.push_back(
- Value(DOC(getSourceName()
- << DOC("sortKey" << serializeSortKey(explain) << "mergePresorted"
- << (_mergingPresorted ? Value(true) : Value())
- << "limit"
- << (limitSrc ? Value(limitSrc->getLimit()) : Value())))));
+ array.push_back(Value(
+ DOC(getSourceName() << DOC(
+ "sortKey" << serializeSortKey(static_cast<bool>(explain)) << "mergePresorted"
+ << (_mergingPresorted ? Value(true) : Value())
+ << "limit"
+ << (limitSrc ? Value(limitSrc->getLimit()) : Value())))));
} else { // one Value for $sort and maybe a Value for $limit
- MutableDocument inner(serializeSortKey(explain));
+ MutableDocument inner(serializeSortKey(static_cast<bool>(explain)));
if (_mergingPresorted)
inner["$mergePresorted"] = Value(true);
array.push_back(Value(DOC(getSourceName() << inner.freeze())));
diff --git a/src/mongo/db/pipeline/document_source_sort.h b/src/mongo/db/pipeline/document_source_sort.h
index d9266011a55..52d2ff2b687 100644
--- a/src/mongo/db/pipeline/document_source_sort.h
+++ b/src/mongo/db/pipeline/document_source_sort.h
@@ -43,7 +43,9 @@ public:
// virtuals from DocumentSource
GetNextResult getNext() final;
const char* getSourceName() const final;
- void serializeToArray(std::vector<Value>& array, bool explain = false) const final;
+ void serializeToArray(
+ std::vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetModPathsReturn getModifiedPaths() const final {
// A $sort does not modify any paths.
@@ -123,7 +125,7 @@ public:
private:
explicit DocumentSourceSort(const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- Value serialize(bool explain = false) const final {
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final {
MONGO_UNREACHABLE; // Should call serializeToArray instead.
}
diff --git a/src/mongo/db/pipeline/document_source_sort_by_count_test.cpp b/src/mongo/db/pipeline/document_source_sort_by_count_test.cpp
index 82e4aa6fb56..3e0a007c59b 100644
--- a/src/mongo/db/pipeline/document_source_sort_by_count_test.cpp
+++ b/src/mongo/db/pipeline/document_source_sort_by_count_test.cpp
@@ -68,7 +68,7 @@ public:
// Serialize the DocumentSourceGroup and DocumentSourceSort from $sortByCount so that we can
// check the explain output to make sure $group and $sort have the correct fields.
- const bool explain = true;
+ const auto explain = ExplainOptions::Verbosity::kQueryPlanner;
vector<Value> explainedStages;
groupStage->serializeToArray(explainedStages, explain);
sortStage->serializeToArray(explainedStages, explain);
diff --git a/src/mongo/db/pipeline/document_source_tee_consumer.cpp b/src/mongo/db/pipeline/document_source_tee_consumer.cpp
index 052bf7bad7b..c620fba222a 100644
--- a/src/mongo/db/pipeline/document_source_tee_consumer.cpp
+++ b/src/mongo/db/pipeline/document_source_tee_consumer.cpp
@@ -62,7 +62,8 @@ void DocumentSourceTeeConsumer::dispose() {
_bufferSource->dispose(_facetId);
}
-Value DocumentSourceTeeConsumer::serialize(bool explain) const {
+Value DocumentSourceTeeConsumer::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
// This stage will be inserted into the beginning of a pipeline, but should not show up in the
// explain output.
return Value();
diff --git a/src/mongo/db/pipeline/document_source_tee_consumer.h b/src/mongo/db/pipeline/document_source_tee_consumer.h
index bfdbfbda1cb..cebeaee9a2e 100644
--- a/src/mongo/db/pipeline/document_source_tee_consumer.h
+++ b/src/mongo/db/pipeline/document_source_tee_consumer.h
@@ -63,7 +63,7 @@ public:
return GetDepsReturn::SEE_NEXT;
}
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
private:
DocumentSourceTeeConsumer(const boost::intrusive_ptr<ExpressionContext>& expCtx,
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index 683069597e0..079439bd842 100644
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -239,7 +239,7 @@ DocumentSource::GetModPathsReturn DocumentSourceUnwind::getModifiedPaths() const
return {GetModPathsReturn::Type::kFiniteSet, std::move(modifiedFields)};
}
-Value DocumentSourceUnwind::serialize(bool explain) const {
+Value DocumentSourceUnwind::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << DOC(
"path" << _unwindPath.fullPathWithPrefix() << "preserveNullAndEmptyArrays"
<< (_preserveNullAndEmptyArrays ? Value(true) : Value())
diff --git a/src/mongo/db/pipeline/document_source_unwind.h b/src/mongo/db/pipeline/document_source_unwind.h
index 9a9c466d037..53fbd33d083 100644
--- a/src/mongo/db/pipeline/document_source_unwind.h
+++ b/src/mongo/db/pipeline/document_source_unwind.h
@@ -38,7 +38,7 @@ public:
// virtuals from DocumentSource
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
BSONObjSet getOutputSorts() final;
/**
diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp
index 5bccef201ae..2b149b1af9c 100644
--- a/src/mongo/db/pipeline/expression_context.cpp
+++ b/src/mongo/db/pipeline/expression_context.cpp
@@ -43,7 +43,7 @@ ExpressionContext::ExpressionContext(OperationContext* opCtx,
const AggregationRequest& request,
std::unique_ptr<CollatorInterface> collator,
StringMap<ResolvedNamespace> resolvedNamespaces)
- : isExplain(request.isExplain()),
+ : explain(request.getExplain()),
inShard(request.isFromRouter()),
extSortAllowed(request.shouldAllowDiskUse()),
bypassDocumentValidation(request.shouldBypassDocumentValidation()),
@@ -74,7 +74,7 @@ void ExpressionContext::setCollator(std::unique_ptr<CollatorInterface> coll) {
intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(NamespaceString ns) const {
intrusive_ptr<ExpressionContext> expCtx = new ExpressionContext();
- expCtx->isExplain = isExplain;
+ expCtx->explain = explain;
expCtx->inShard = inShard;
expCtx->inRouter = inRouter;
expCtx->extSortAllowed = extSortAllowed;
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index af6d7d199c9..4d4c0b75f3a 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -29,6 +29,7 @@
#pragma once
#include <boost/intrusive_ptr.hpp>
+#include <boost/optional.hpp>
#include <memory>
#include <string>
#include <vector>
@@ -40,6 +41,7 @@
#include "mongo/db/pipeline/document_comparator.h"
#include "mongo/db/pipeline/value_comparator.h"
#include "mongo/db/query/collation/collator_interface.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/util/intrusive_counter.h"
#include "mongo/util/string_map.h"
@@ -98,7 +100,9 @@ public:
return it->second;
};
- bool isExplain = false;
+ // The explain verbosity requested by the user, or boost::none if no explain was requested.
+ boost::optional<ExplainOptions::Verbosity> explain;
+
bool inShard = false;
bool inRouter = false;
bool extSortAllowed = false;
diff --git a/src/mongo/db/pipeline/parsed_add_fields.h b/src/mongo/db/pipeline/parsed_add_fields.h
index 2213191f6c8..8cc3d24f913 100644
--- a/src/mongo/db/pipeline/parsed_add_fields.h
+++ b/src/mongo/db/pipeline/parsed_add_fields.h
@@ -75,7 +75,7 @@ public:
_variables = stdx::make_unique<Variables>(idGenerator.getIdCount());
}
- Document serialize(bool explain = false) const final {
+ Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const final {
MutableDocument output;
_root->serialize(&output, explain);
return output.freeze();
diff --git a/src/mongo/db/pipeline/parsed_add_fields_test.cpp b/src/mongo/db/pipeline/parsed_add_fields_test.cpp
index d7b68afca1d..c9adc2b1a36 100644
--- a/src/mongo/db/pipeline/parsed_add_fields_test.cpp
+++ b/src/mongo/db/pipeline/parsed_add_fields_test.cpp
@@ -176,8 +176,13 @@ TEST(ParsedAddFieldsSerialize, SerializesToCorrectForm) {
fromjson("{a: {$add: [\"$a\", {$const: 2}]}, b: {d: {$const: 3}}, x: {y: {$const: 4}}}"));
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
// Verify that serialize treats the _id field as any other field: including when explicity included.
@@ -190,8 +195,13 @@ TEST(ParsedAddFieldsSerialize, AddsIdToSerializeWhenExplicitlyIncluded) {
auto expectedSerialization = Document(fromjson("{_id: {$const: false}}"));
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
// Verify that serialize treats the _id field as any other field: excluded when not explicitly
@@ -207,8 +217,13 @@ TEST(ParsedAddFieldsSerialize, OmitsIdFromSerializeWhenNotIncluded) {
auto expectedSerialization = Document(fromjson("{a: {$const: true}}"));
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
// Verify that the $addFields stage optimizes expressions into simpler forms when possible.
@@ -220,8 +235,13 @@ TEST(ParsedAddFieldsOptimize, OptimizesTopLevelExpressions) {
auto expectedSerialization = Document{{"a", Document{{"$const", 3}}}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
// Verify that the $addFields stage optimizes expressions even when they are nested.
@@ -233,8 +253,13 @@ TEST(ParsedAddFieldsOptimize, ShouldOptimizeNestedExpressions) {
auto expectedSerialization = Document{{"a", Document{{"b", Document{{"$const", 3}}}}}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
//
diff --git a/src/mongo/db/pipeline/parsed_exclusion_projection.cpp b/src/mongo/db/pipeline/parsed_exclusion_projection.cpp
index 0f800f9a112..ae3fa4ba2dc 100644
--- a/src/mongo/db/pipeline/parsed_exclusion_projection.cpp
+++ b/src/mongo/db/pipeline/parsed_exclusion_projection.cpp
@@ -135,7 +135,8 @@ void ExclusionNode::addModifiedPaths(std::set<std::string>* modifiedPaths) const
// ParsedExclusionProjection.
//
-Document ParsedExclusionProjection::serialize(bool explain) const {
+Document ParsedExclusionProjection::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
return _root->serialize();
}
diff --git a/src/mongo/db/pipeline/parsed_exclusion_projection.h b/src/mongo/db/pipeline/parsed_exclusion_projection.h
index d0988d2d2cb..74314d97439 100644
--- a/src/mongo/db/pipeline/parsed_exclusion_projection.h
+++ b/src/mongo/db/pipeline/parsed_exclusion_projection.h
@@ -103,7 +103,7 @@ public:
return ProjectionType::kExclusion;
}
- Document serialize(bool explain = false) const final;
+ Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const final;
/**
* Parses the projection specification given by 'spec', populating internal data structures.
diff --git a/src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp b/src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp
index 76b458260d0..f0e860067f7 100644
--- a/src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp
@@ -87,7 +87,7 @@ TEST(ExclusionProjection, ShouldSerializeToEquivalentProjection) {
// Converts numbers to bools, converts dotted paths to nested documents. Note order of excluded
// fields is subject to change.
- auto serialization = exclusion.serialize();
+ auto serialization = exclusion.serialize(boost::none);
ASSERT_EQ(serialization.size(), 4UL);
ASSERT_VALUE_EQ(serialization["a"], Value(false));
ASSERT_VALUE_EQ(serialization["_id"], Value(false));
diff --git a/src/mongo/db/pipeline/parsed_inclusion_projection.cpp b/src/mongo/db/pipeline/parsed_inclusion_projection.cpp
index d294761852f..da6b9e72ce9 100644
--- a/src/mongo/db/pipeline/parsed_inclusion_projection.cpp
+++ b/src/mongo/db/pipeline/parsed_inclusion_projection.cpp
@@ -54,7 +54,8 @@ void InclusionNode::optimize() {
}
}
-void InclusionNode::serialize(MutableDocument* output, bool explain) const {
+void InclusionNode::serialize(MutableDocument* output,
+ boost::optional<ExplainOptions::Verbosity> explain) const {
// Always put "_id" first if it was included (implicitly or explicitly).
if (_inclusions.find("_id") != _inclusions.end()) {
output->addField("_id", Value(true));
@@ -77,7 +78,7 @@ void InclusionNode::serialize(MutableDocument* output, bool explain) const {
} else {
auto expressionIt = _expressions.find(field);
invariant(expressionIt != _expressions.end());
- output->addField(field, expressionIt->second->serialize(explain));
+ output->addField(field, expressionIt->second->serialize(static_cast<bool>(explain)));
}
}
}
diff --git a/src/mongo/db/pipeline/parsed_inclusion_projection.h b/src/mongo/db/pipeline/parsed_inclusion_projection.h
index ec4f8d0466b..1c0a667b025 100644
--- a/src/mongo/db/pipeline/parsed_inclusion_projection.h
+++ b/src/mongo/db/pipeline/parsed_inclusion_projection.h
@@ -62,7 +62,8 @@ public:
/**
* Serialize this projection.
*/
- void serialize(MutableDocument* output, bool explain) const;
+ void serialize(MutableDocument* output,
+ boost::optional<ExplainOptions::Verbosity> explain) const;
/**
* Adds dependencies of any fields that need to be included, or that are used by any
@@ -197,7 +198,7 @@ public:
/**
* Serialize the projection.
*/
- Document serialize(bool explain = false) const final {
+ Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const final {
MutableDocument output;
if (_idExcluded) {
output.addField("_id", Value(false));
diff --git a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
index c2610418a40..a0e7398ef6f 100644
--- a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
@@ -141,8 +141,13 @@ TEST(InclusionProjection, ShouldSerializeToEquivalentProjection) {
"{_id: true, a: {$add: [\"$a\", {$const: 2}]}, b: {d: true}, x: {y: {$const: 4}}}"));
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
TEST(InclusionProjection, ShouldSerializeExplicitExclusionOfId) {
@@ -154,8 +159,13 @@ TEST(InclusionProjection, ShouldSerializeExplicitExclusionOfId) {
auto expectedSerialization = Document{{"_id", false}, {"a", true}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
@@ -169,8 +179,13 @@ TEST(InclusionProjection, ShouldOptimizeTopLevelExpressions) {
auto expectedSerialization = Document{{"_id", true}, {"a", Document{{"$const", 3}}}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
TEST(InclusionProjection, ShouldOptimizeNestedExpressions) {
@@ -184,8 +199,13 @@ TEST(InclusionProjection, ShouldOptimizeNestedExpressions) {
Document{{"_id", true}, {"a", Document{{"b", Document{{"$const", 3}}}}}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
TEST(InclusionProjection, ShouldReportThatAllExceptIncludedFieldsAreModified) {
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 5c13f710b40..027f4527aa0 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -320,10 +320,10 @@ boost::optional<Document> Pipeline::getNext() {
: boost::optional<Document>{nextResult.releaseDocument()};
}
-vector<Value> Pipeline::writeExplainOps() const {
+vector<Value> Pipeline::writeExplainOps(ExplainOptions::Verbosity verbosity) const {
vector<Value> array;
for (SourceContainer::const_iterator it = _sources.begin(); it != _sources.end(); ++it) {
- (*it)->serializeToArray(array, /*explain=*/true);
+ (*it)->serializeToArray(array, verbosity);
}
return array;
}
diff --git a/src/mongo/db/pipeline/pipeline.h b/src/mongo/db/pipeline/pipeline.h
index 3aceb6c28f0..4f15289efef 100644
--- a/src/mongo/db/pipeline/pipeline.h
+++ b/src/mongo/db/pipeline/pipeline.h
@@ -36,6 +36,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/pipeline/dependencies.h"
#include "mongo/db/pipeline/value.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/util/intrusive_counter.h"
#include "mongo/util/timer.h"
@@ -148,10 +149,10 @@ public:
boost::optional<Document> getNext();
/**
- * Write the pipeline's operators to a std::vector<Value>, with the
- * explain flag true (for DocumentSource::serializeToArray()).
+ * Write the pipeline's operators to a std::vector<Value>, providing the level of detail
+ * specified by 'verbosity'.
*/
- std::vector<Value> writeExplainOps() const;
+ std::vector<Value> writeExplainOps(ExplainOptions::Verbosity verbosity) const;
/**
* Returns the dependencies needed by this pipeline. 'metadataAvailable' should reflect what
diff --git a/src/mongo/db/pipeline/pipeline_test.cpp b/src/mongo/db/pipeline/pipeline_test.cpp
index 098371f5cd7..aed8f8d6889 100644
--- a/src/mongo/db/pipeline/pipeline_test.cpp
+++ b/src/mongo/db/pipeline/pipeline_test.cpp
@@ -97,8 +97,9 @@ public:
auto outputPipe = uassertStatusOK(Pipeline::parse(request.getPipeline(), ctx));
outputPipe->optimizePipeline();
- ASSERT_VALUE_EQ(Value(outputPipe->writeExplainOps()),
- Value(outputPipeExpected["pipeline"]));
+ ASSERT_VALUE_EQ(
+ Value(outputPipe->writeExplainOps(ExplainOptions::Verbosity::kQueryPlanner)),
+ Value(outputPipeExpected["pipeline"]));
ASSERT_VALUE_EQ(Value(outputPipe->serialize()), Value(serializePipeExpected["pipeline"]));
}
@@ -963,8 +964,10 @@ public:
shardPipe = mergePipe->splitForSharded();
ASSERT(shardPipe != nullptr);
- ASSERT_VALUE_EQ(Value(shardPipe->writeExplainOps()), Value(shardPipeExpected["pipeline"]));
- ASSERT_VALUE_EQ(Value(mergePipe->writeExplainOps()), Value(mergePipeExpected["pipeline"]));
+ ASSERT_VALUE_EQ(Value(shardPipe->writeExplainOps(ExplainOptions::Verbosity::kQueryPlanner)),
+ Value(shardPipeExpected["pipeline"]));
+ ASSERT_VALUE_EQ(Value(mergePipe->writeExplainOps(ExplainOptions::Verbosity::kQueryPlanner)),
+ Value(mergePipeExpected["pipeline"]));
}
virtual ~Base() {}
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 09253fa31e1..0a97fe2d7e9 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -96,18 +96,38 @@ env.Library(
source=[
"collation/collator_factory_icu_decoration.cpp",
"find_common.cpp",
- "explain_common.cpp",
"parsed_distinct.cpp",
],
LIBDEPS=[
"$BUILD_DIR/mongo/base",
"$BUILD_DIR/mongo/util/fail_point",
"collation/collator_icu",
+ "explain_options",
"query_planner",
"query_request",
],
)
+env.Library(
+ target="explain_options",
+ source=[
+ "explain_options.cpp",
+ ],
+ LIBDEPS=[
+ "$BUILD_DIR/mongo/base",
+ ],
+)
+
+env.CppUnitTest(
+ target="explain_options_test",
+ source=[
+ "explain_options_test.cpp",
+ ],
+ LIBDEPS=[
+ "explain_options",
+ ],
+)
+
env.CppUnitTest(
target="parsed_distinct_test",
source=[
diff --git a/src/mongo/db/query/count_request.cpp b/src/mongo/db/query/count_request.cpp
index 6abddd36ec1..3451cd7989d 100644
--- a/src/mongo/db/query/count_request.cpp
+++ b/src/mongo/db/query/count_request.cpp
@@ -171,10 +171,6 @@ StatusWith<BSONObj> CountRequest::asAggregationCommand() const {
pipelineBuilder.doneFast();
// Complete the command by appending the other options to count.
- if (_explain) {
- aggregationBuilder.append(kExplainField, _explain);
- }
-
if (_collation) {
aggregationBuilder.append(kCollationField, *_collation);
}
diff --git a/src/mongo/db/query/count_request_test.cpp b/src/mongo/db/query/count_request_test.cpp
index e2a40f87ccf..8508c75ead6 100644
--- a/src/mongo/db/query/count_request_test.cpp
+++ b/src/mongo/db/query/count_request_test.cpp
@@ -271,15 +271,17 @@ TEST(CountRequest, ConvertToAggregationWithQueryAndFilterAndLimit) {
SimpleBSONObjComparator::kInstance.makeEqualTo()));
}
-TEST(CountRequest, ConvertToAggregationWithExplain) {
+TEST(CountRequest, ConvertToAggregationOmitsExplain) {
CountRequest countRequest(testns, BSONObj());
countRequest.setExplain(true);
auto agg = countRequest.asAggregationCommand();
ASSERT_OK(agg);
+ ASSERT_FALSE(agg.getValue().hasField("explain"));
+
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(ar.getValue().isExplain());
+ ASSERT_FALSE(ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 7d83021dbd5..eb43caf5047 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -246,7 +246,7 @@ using mongoutils::str::stream;
// static
void Explain::statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* bob,
BSONObjBuilder* topLevelBob) {
invariant(bob);
@@ -268,7 +268,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
}
// Some top-level exec stats get pulled out of the root stage.
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nReturned", stats.common.advanced);
bob->appendNumber("executionTimeMillisEstimate", stats.common.executionTimeMillis);
bob->appendNumber("works", stats.common.works);
@@ -285,7 +285,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
if (STAGE_AND_HASH == stats.stageType) {
AndHashStats* spec = static_cast<AndHashStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("memUsage", spec->memUsage);
bob->appendNumber("memLimit", spec->memLimit);
@@ -299,7 +299,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_AND_SORTED == stats.stageType) {
AndSortedStats* spec = static_cast<AndSortedStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("flagged", spec->flagged);
for (size_t i = 0; i < spec->failedAnd.size(); ++i) {
bob->appendNumber(string(stream() << "failedAnd_" << i), spec->failedAnd[i]);
@@ -308,20 +308,20 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_COLLSCAN == stats.stageType) {
CollectionScanStats* spec = static_cast<CollectionScanStats*>(stats.specific.get());
bob->append("direction", spec->direction > 0 ? "forward" : "backward");
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("docsExamined", spec->docsTested);
}
} else if (STAGE_COUNT == stats.stageType) {
CountStats* spec = static_cast<CountStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nCounted", spec->nCounted);
bob->appendNumber("nSkipped", spec->nSkipped);
}
} else if (STAGE_COUNT_SCAN == stats.stageType) {
CountScanStats* spec = static_cast<CountScanStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("keysExamined", spec->keysExamined);
}
@@ -348,7 +348,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_DELETE == stats.stageType) {
DeleteStats* spec = static_cast<DeleteStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nWouldDelete", spec->docsDeleted);
bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
}
@@ -376,18 +376,18 @@ void Explain::statsToBSON(const PlanStageStats& stats,
bob->append("indexBounds", spec->indexBounds);
}
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("keysExamined", spec->keysExamined);
}
} else if (STAGE_ENSURE_SORTED == stats.stageType) {
EnsureSortedStats* spec = static_cast<EnsureSortedStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nDropped", spec->nDropped);
}
} else if (STAGE_FETCH == stats.stageType) {
FetchStats* spec = static_cast<FetchStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("docsExamined", spec->docsExamined);
bob->appendNumber("alreadyHasObj", spec->alreadyHasObj);
}
@@ -398,7 +398,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
bob->append("indexName", spec->indexName);
bob->append("indexVersion", spec->indexVersion);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
BSONArrayBuilder intervalsBob(bob->subarrayStart("searchIntervals"));
for (vector<IntervalStats>::const_iterator it = spec->intervalStats.begin();
it != spec->intervalStats.end();
@@ -414,12 +414,12 @@ void Explain::statsToBSON(const PlanStageStats& stats,
}
} else if (STAGE_GROUP == stats.stageType) {
GroupStats* spec = static_cast<GroupStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nGroups", spec->nGroups);
}
} else if (STAGE_IDHACK == stats.stageType) {
IDHackStats* spec = static_cast<IDHackStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("keysExamined", spec->keysExamined);
bob->appendNumber("docsExamined", spec->docsExamined);
}
@@ -447,7 +447,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
bob->append("indexBounds", spec->indexBounds);
}
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("keysExamined", spec->keysExamined);
bob->appendNumber("seeks", spec->seeks);
bob->appendNumber("dupsTested", spec->dupsTested);
@@ -457,7 +457,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_OR == stats.stageType) {
OrStats* spec = static_cast<OrStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("dupsTested", spec->dupsTested);
bob->appendNumber("dupsDropped", spec->dupsDropped);
bob->appendNumber("recordIdsForgotten", spec->recordIdsForgotten);
@@ -471,7 +471,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_SHARDING_FILTER == stats.stageType) {
ShardingFilterStats* spec = static_cast<ShardingFilterStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("chunkSkips", spec->chunkSkips);
}
} else if (STAGE_SKIP == stats.stageType) {
@@ -481,7 +481,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
SortStats* spec = static_cast<SortStats*>(stats.specific.get());
bob->append("sortPattern", spec->sortPattern);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("memUsage", spec->memUsage);
bob->appendNumber("memLimit", spec->memLimit);
}
@@ -493,7 +493,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
MergeSortStats* spec = static_cast<MergeSortStats*>(stats.specific.get());
bob->append("sortPattern", spec->sortPattern);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("dupsTested", spec->dupsTested);
bob->appendNumber("dupsDropped", spec->dupsDropped);
}
@@ -507,19 +507,19 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_TEXT_MATCH == stats.stageType) {
TextMatchStats* spec = static_cast<TextMatchStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("docsRejected", spec->docsRejected);
}
} else if (STAGE_TEXT_OR == stats.stageType) {
TextOrStats* spec = static_cast<TextOrStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("docsExamined", spec->fetches);
}
} else if (STAGE_UPDATE == stats.stageType) {
UpdateStats* spec = static_cast<UpdateStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nMatched", spec->nMatched);
bob->appendNumber("nWouldModify", spec->nModified);
bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
@@ -555,7 +555,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
}
// static
-BSONObj Explain::statsToBSON(const PlanStageStats& stats, ExplainCommon::Verbosity verbosity) {
+BSONObj Explain::statsToBSON(const PlanStageStats& stats, ExplainOptions::Verbosity verbosity) {
BSONObjBuilder bob;
statsToBSON(stats, &bob, verbosity);
return bob.obj();
@@ -564,7 +564,7 @@ BSONObj Explain::statsToBSON(const PlanStageStats& stats, ExplainCommon::Verbosi
// static
void Explain::statsToBSON(const PlanStageStats& stats,
BSONObjBuilder* bob,
- ExplainCommon::Verbosity verbosity) {
+ ExplainOptions::Verbosity verbosity) {
statsToBSON(stats, verbosity, bob, bob);
}
@@ -582,7 +582,7 @@ void Explain::getWinningPlanStats(const PlanExecutor* exec, BSONObjBuilder* bob)
mps ? std::move(mps->getStats()->children[mps->bestPlanIdx()])
: std::move(exec->getRootStage()->getStats()));
- statsToBSON(*winningStats, ExplainCommon::EXEC_STATS, bob, bob);
+ statsToBSON(*winningStats, ExplainOptions::Verbosity::kExecStats, bob, bob);
}
// static
@@ -627,14 +627,14 @@ void Explain::generatePlannerInfo(PlanExecutor* exec,
}
BSONObjBuilder winningPlanBob(plannerBob.subobjStart("winningPlan"));
- statsToBSON(*winnerStats, &winningPlanBob, ExplainCommon::QUERY_PLANNER);
+ statsToBSON(*winnerStats, &winningPlanBob, ExplainOptions::Verbosity::kQueryPlanner);
winningPlanBob.doneFast();
// Genenerate array of rejected plans.
BSONArrayBuilder allPlansBob(plannerBob.subarrayStart("rejectedPlans"));
for (size_t i = 0; i < rejectedStats.size(); i++) {
BSONObjBuilder childBob(allPlansBob.subobjStart());
- statsToBSON(*rejectedStats[i], &childBob, ExplainCommon::QUERY_PLANNER);
+ statsToBSON(*rejectedStats[i], &childBob, ExplainOptions::Verbosity::kQueryPlanner);
}
allPlansBob.doneFast();
@@ -643,7 +643,7 @@ void Explain::generatePlannerInfo(PlanExecutor* exec,
// static
void Explain::generateExecStats(PlanStageStats* stats,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out,
boost::optional<long long> totalTimeMillis) {
out->appendNumber("nReturned", stats->common.advanced);
@@ -693,7 +693,7 @@ void Explain::generateServerInfo(BSONObjBuilder* out) {
// static
void Explain::explainStages(PlanExecutor* exec,
const Collection* collection,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out) {
//
// Collect plan stats, running the plan if necessary. The stats also give the structure of the
@@ -707,7 +707,7 @@ void Explain::explainStages(PlanExecutor* exec,
// Get stats of the winning plan from the trial period, if the verbosity level is high enough
// and there was a runoff between multiple plans.
unique_ptr<PlanStageStats> winningStatsTrial;
- if (verbosity >= ExplainCommon::EXEC_ALL_PLANS && mps) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecAllPlans && mps) {
winningStatsTrial = std::move(mps->getStats()->children[mps->bestPlanIdx()]);
invariant(winningStatsTrial.get());
}
@@ -726,7 +726,7 @@ void Explain::explainStages(PlanExecutor* exec,
// If we need execution stats, then run the plan in order to gather the stats.
Status executePlanStatus = Status::OK();
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
executePlanStatus = exec->executePlan();
}
@@ -746,11 +746,11 @@ void Explain::explainStages(PlanExecutor* exec,
// Use the stats trees to produce explain BSON.
//
- if (verbosity >= ExplainCommon::QUERY_PLANNER) {
+ if (verbosity >= ExplainOptions::Verbosity::kQueryPlanner) {
generatePlannerInfo(exec, collection, winningStats.get(), allPlansStats, out);
}
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
BSONObjBuilder execBob(out->subobjStart("executionStats"));
// If there is an execution error while running the query, the error is reported under
@@ -768,7 +768,7 @@ void Explain::explainStages(PlanExecutor* exec,
// Also generate exec stats for all plans, if the verbosity level is high enough.
// These stats reflect what happened during the trial period that ranked the plans.
- if (verbosity >= ExplainCommon::EXEC_ALL_PLANS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecAllPlans) {
// If we ranked multiple plans against each other, then add stats collected
// from the trial period of the winning plan. The "allPlansExecution" section
// will contain an apples-to-apples comparison of the winning plan's stats against
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index fb9b8d56c2a..8202cbfbeec 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -31,7 +31,7 @@
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/query/explain_common.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_solution.h"
@@ -64,7 +64,7 @@ public:
*/
static void explainStages(PlanExecutor* exec,
const Collection* collection,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out);
/**
@@ -85,8 +85,9 @@ public:
* Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
* to execution stats verbosity.
*/
- static BSONObj statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
+ static BSONObj statsToBSON(
+ const PlanStageStats& stats,
+ ExplainOptions::Verbosity verbosity = ExplainOptions::Verbosity::kExecStats);
/**
* This version of stats tree to BSON conversion returns the result through the
@@ -95,9 +96,10 @@ public:
* Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
* to execution stats verbosity.
*/
- static void statsToBSON(const PlanStageStats& stats,
- BSONObjBuilder* bob,
- ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
+ static void statsToBSON(
+ const PlanStageStats& stats,
+ BSONObjBuilder* bob,
+ ExplainOptions::Verbosity verbosity = ExplainOptions::Verbosity::kExecStats);
/**
* Returns a short plan summary std::string describing the leaves of the query plan.
@@ -128,7 +130,7 @@ private:
* Not used except as a helper to the public statsToBSON(...) functions.
*/
static void statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* bob,
BSONObjBuilder* topLevelBob);
@@ -164,7 +166,7 @@ private:
* This is a helper for generating explain BSON. It is used by explainStages(...).
*/
static void generateExecStats(PlanStageStats* stats,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out,
boost::optional<long long> totalTimeMillis);
diff --git a/src/mongo/db/query/explain_common.cpp b/src/mongo/db/query/explain_common.cpp
deleted file mode 100644
index 1f049de6cec..00000000000
--- a/src/mongo/db/query/explain_common.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Copyright (C) 2013-2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/query/explain_common.h"
-
-#include "mongo/util/mongoutils/str.h"
-
-namespace mongo {
-
-// static
-const char* ExplainCommon::verbosityString(ExplainCommon::Verbosity verbosity) {
- switch (verbosity) {
- case QUERY_PLANNER:
- return "queryPlanner";
- case EXEC_STATS:
- return "executionStats";
- case EXEC_ALL_PLANS:
- return "allPlansExecution";
- default:
- invariant(0);
- return "unknown";
- }
-}
-
-// static
-Status ExplainCommon::parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity) {
- if (Object != cmdObj.firstElement().type()) {
- return Status(ErrorCodes::BadValue, "explain command requires a nested object");
- }
-
- *verbosity = ExplainCommon::EXEC_ALL_PLANS;
- if (!cmdObj["verbosity"].eoo()) {
- const char* verbStr = cmdObj["verbosity"].valuestrsafe();
- if (mongoutils::str::equals(verbStr, "queryPlanner")) {
- *verbosity = ExplainCommon::QUERY_PLANNER;
- } else if (mongoutils::str::equals(verbStr, "executionStats")) {
- *verbosity = ExplainCommon::EXEC_STATS;
- } else if (!mongoutils::str::equals(verbStr, "allPlansExecution")) {
- return Status(ErrorCodes::BadValue,
- "verbosity string must be one of "
- "{'queryPlanner', 'executionStats', 'allPlansExecution'}");
- }
- }
-
- return Status::OK();
-}
-
-} // namespace mongo
diff --git a/src/mongo/db/query/explain_common.h b/src/mongo/db/query/explain_common.h
deleted file mode 100644
index 750f207a2a5..00000000000
--- a/src/mongo/db/query/explain_common.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Copyright (C) 2013-2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/base/status.h"
-#include "mongo/bson/bsonobj.h"
-
-namespace mongo {
-
-/**
- * Utilities used for explain implementations on both mongod and mongos.
- */
-class ExplainCommon {
-public:
- /**
- * The various supported verbosity levels for explain. The order is
- * significant: the enum values are assigned in order of increasing verbosity.
- */
- enum Verbosity {
- // At all verbosities greater than or equal to QUERY_PLANNER, we display information
- // about the plan selected and alternate rejected plans. Does not include any execution-
- // related info. String alias is "queryPlanner".
- QUERY_PLANNER = 0,
-
- // At all verbosities greater than or equal to EXEC_STATS, we display a section of
- // output containing both overall execution stats, and stats per stage in the
- // execution tree. String alias is "execStats".
- EXEC_STATS = 1,
-
- // At this verbosity level, we generate the execution stats for each rejected plan as
- // well as the winning plan. String alias is "allPlansExecution".
- EXEC_ALL_PLANS = 2,
- };
-
- /**
- * Converts an explain verbosity to its string representation.
- */
- static const char* verbosityString(ExplainCommon::Verbosity verbosity);
-
- /**
- * Does some basic validation of the command BSON, and retrieves the explain verbosity.
- *
- * Returns a non-OK status if parsing fails.
- *
- * On success, populates "verbosity".
- */
- static Status parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity);
-};
-
-} // namespace
diff --git a/src/mongo/db/query/explain_options.cpp b/src/mongo/db/query/explain_options.cpp
new file mode 100644
index 00000000000..e81df690250
--- /dev/null
+++ b/src/mongo/db/query/explain_options.cpp
@@ -0,0 +1,93 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/query/explain_options.h"
+
+#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/util/assert_util.h"
+
+namespace mongo {
+
+constexpr StringData ExplainOptions::kVerbosityName;
+constexpr StringData ExplainOptions::kQueryPlannerVerbosityStr;
+constexpr StringData ExplainOptions::kExecStatsVerbosityStr;
+constexpr StringData ExplainOptions::kAllPlansExecutionVerbosityStr;
+
+StringData ExplainOptions::verbosityString(ExplainOptions::Verbosity verbosity) {
+ switch (verbosity) {
+ case Verbosity::kQueryPlanner:
+ return kQueryPlannerVerbosityStr;
+ case Verbosity::kExecStats:
+ return kExecStatsVerbosityStr;
+ case Verbosity::kExecAllPlans:
+ return kAllPlansExecutionVerbosityStr;
+ default:
+ MONGO_UNREACHABLE;
+ }
+}
+
+StatusWith<ExplainOptions::Verbosity> ExplainOptions::parseCmdBSON(const BSONObj& cmdObj) {
+ if (BSONType::Object != cmdObj.firstElement().type()) {
+ return Status(ErrorCodes::FailedToParse, "explain command requires a nested object");
+ }
+
+ auto verbosity = Verbosity::kExecAllPlans;
+ if (auto verbosityElt = cmdObj[kVerbosityName]) {
+ if (verbosityElt.type() != BSONType::String) {
+ return Status(ErrorCodes::FailedToParse, "explain verbosity must be a string");
+ }
+
+ auto verbStr = verbosityElt.valueStringData();
+ if (verbStr == kQueryPlannerVerbosityStr) {
+ verbosity = Verbosity::kQueryPlanner;
+ } else if (verbStr == kExecStatsVerbosityStr) {
+ verbosity = Verbosity::kExecStats;
+ } else if (verbStr != kAllPlansExecutionVerbosityStr) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "verbosity string must be one of {'"
+ << kQueryPlannerVerbosityStr
+ << "', '"
+ << kExecStatsVerbosityStr
+ << "', '"
+ << kAllPlansExecutionVerbosityStr
+ << "'}");
+ }
+ }
+
+ return verbosity;
+}
+
+BSONObj ExplainOptions::toBSON(ExplainOptions::Verbosity verbosity) {
+ BSONObjBuilder explainOptionsBuilder;
+ explainOptionsBuilder.append(kVerbosityName, StringData(verbosityString(verbosity)));
+ return explainOptionsBuilder.obj();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/explain_options.h b/src/mongo/db/query/explain_options.h
new file mode 100644
index 00000000000..97a3c85ea65
--- /dev/null
+++ b/src/mongo/db/query/explain_options.h
@@ -0,0 +1,89 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/base/status.h"
+#include "mongo/bson/bsonobj.h"
+
+namespace mongo {
+
+/**
+ * Represents options passed to the explain command (aside from the command which is being explained
+ * and its parameters).
+ */
+class ExplainOptions {
+public:
+ /**
+ * The various supported verbosity levels for explain. The order is significant: the enum values
+ * are assigned in order of increasing verbosity.
+ */
+ enum class Verbosity {
+ // At all verbosities greater than or equal to QUERY_PLANNER, we display information about
+ // the plan selected and alternate rejected plans. Does not include any execution- related
+ // info. String alias is "queryPlanner".
+ kQueryPlanner = 0,
+
+ // At all verbosities greater than or equal to EXEC_STATS, we display a section of output
+ // containing both overall execution stats, and stats per stage in the execution tree.
+ // String alias is "execStats".
+ kExecStats = 1,
+
+ // At this verbosity level, we generate the execution stats for each rejected plan as well
+ // as the winning plan. String alias is "allPlansExecution".
+ kExecAllPlans = 2,
+ };
+
+ static constexpr StringData kVerbosityName = "verbosity"_sd;
+
+ // String representations for verbosity levels.
+ static constexpr StringData kQueryPlannerVerbosityStr = "queryPlanner"_sd;
+ static constexpr StringData kExecStatsVerbosityStr = "executionStats"_sd;
+ static constexpr StringData kAllPlansExecutionVerbosityStr = "allPlansExecution"_sd;
+
+ /**
+ * Converts an explain verbosity to its string representation.
+ */
+ static StringData verbosityString(ExplainOptions::Verbosity verbosity);
+
+ /**
+ * Does some basic validation of the command BSON, then extracts and returns the explain
+ * verbosity.
+ *
+ * Returns a non-OK status if parsing fails.
+ */
+ static StatusWith<ExplainOptions::Verbosity> parseCmdBSON(const BSONObj& cmdObj);
+
+ /**
+ * Converts 'verbosity' to its corresponding representation as a BSONObj containing explain
+ * command parameters.
+ */
+ static BSONObj toBSON(ExplainOptions::Verbosity verbosity);
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/query/explain_options_test.cpp b/src/mongo/db/query/explain_options_test.cpp
new file mode 100644
index 00000000000..c1a2a606263
--- /dev/null
+++ b/src/mongo/db/query/explain_options_test.cpp
@@ -0,0 +1,95 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/query/explain_options.h"
+
+#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/json.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+
+TEST(ExplainOptionsTest, VerbosityEnumToStringReturnsCorrectValues) {
+ ASSERT_EQ(ExplainOptions::verbosityString(ExplainOptions::Verbosity::kQueryPlanner),
+ "queryPlanner"_sd);
+ ASSERT_EQ(ExplainOptions::verbosityString(ExplainOptions::Verbosity::kExecStats),
+ "executionStats"_sd);
+ ASSERT_EQ(ExplainOptions::verbosityString(ExplainOptions::Verbosity::kExecAllPlans),
+ "allPlansExecution"_sd);
+}
+
+TEST(ExplainOptionsTest, ExplainOptionsSerializeToBSONCorrectly) {
+ ASSERT_BSONOBJ_EQ(BSON("verbosity"
+ << "queryPlanner"),
+ ExplainOptions::toBSON(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_BSONOBJ_EQ(BSON("verbosity"
+ << "executionStats"),
+ ExplainOptions::toBSON(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_BSONOBJ_EQ(BSON("verbosity"
+ << "allPlansExecution"),
+ ExplainOptions::toBSON(ExplainOptions::Verbosity::kExecAllPlans));
+}
+
+TEST(ExplainOptionsTest, CanParseExplainVerbosity) {
+ auto verbosity = unittest::assertGet(
+ ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 'queryPlanner'}")));
+ ASSERT(verbosity == ExplainOptions::Verbosity::kQueryPlanner);
+ verbosity = unittest::assertGet(
+ ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 'executionStats'}")));
+ ASSERT(verbosity == ExplainOptions::Verbosity::kExecStats);
+ verbosity = unittest::assertGet(
+ ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 'allPlansExecution'}")));
+ ASSERT(verbosity == ExplainOptions::Verbosity::kExecAllPlans);
+}
+
+TEST(ExplainOptionsTest, ParsingFailsIfVerbosityIsNotAString) {
+ ASSERT_EQ(ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 1}")).getStatus(),
+ ErrorCodes::FailedToParse);
+ ASSERT_EQ(ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: {foo: 'bar'}}"))
+ .getStatus(),
+ ErrorCodes::FailedToParse);
+}
+
+TEST(ExplainOptionsTest, ParsingFailsIfVerbosityStringIsNotRecognized) {
+ ASSERT_EQ(ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 'badVerbosity'}"))
+ .getStatus(),
+ ErrorCodes::FailedToParse);
+}
+
+TEST(ExplainOptionsTest, ParsingFailsIfFirstElementIsNotAnObject) {
+ ASSERT_EQ(ExplainOptions::parseCmdBSON(fromjson("{explain: 1, verbosity: 'queryPlanner'}"))
+ .getStatus(),
+ ErrorCodes::FailedToParse);
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index d8c9feb8fde..9f709bd8485 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -556,7 +556,8 @@ std::string runQuery(OperationContext* opCtx,
bb.skip(sizeof(QueryResult::Value));
BSONObjBuilder explainBob;
- Explain::explainStages(exec.get(), collection, ExplainCommon::EXEC_ALL_PLANS, &explainBob);
+ Explain::explainStages(
+ exec.get(), collection, ExplainOptions::Verbosity::kExecAllPlans, &explainBob);
// Add the resulting object to the return buffer.
BSONObj explainObj = explainBob.obj();
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index 8bd80ff405e..3c3a75c62cd 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -86,9 +86,6 @@ StatusWith<BSONObj> ParsedDistinct::asAggregationCommand() const {
groupStageBuilder.doneFast();
pipelineBuilder.doneFast();
- if (_query->getQueryRequest().isExplain()) {
- aggregationBuilder.append("explain", true);
- }
aggregationBuilder.append(kCollationField, qr.getCollation());
// Specify the 'cursor' option so that aggregation uses the cursor interface.
diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp
index d53457e500e..3a4f6ccb42e 100644
--- a/src/mongo/db/query/parsed_distinct_test.cpp
+++ b/src/mongo/db/query/parsed_distinct_test.cpp
@@ -60,7 +60,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -96,7 +96,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -116,7 +116,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
SimpleBSONObjComparator::kInstance.makeEqualTo()));
}
-TEST(ParsedDistinctTest, ConvertToAggregationWithExplain) {
+TEST(ParsedDistinctTest, ExplainNotIncludedWhenConvertingToAggregationCommand) {
QueryTestServiceContext serviceContext;
auto uniqueTxn = serviceContext.makeOperationContext();
OperationContext* opCtx = uniqueTxn.get();
@@ -131,9 +131,11 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithExplain) {
auto agg = pd.getValue().asAggregationCommand();
ASSERT_OK(agg);
+ ASSERT_FALSE(agg.getValue().hasField("explain"));
+
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index b58a0665868..dc8188ab96a 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -1058,9 +1058,6 @@ StatusWith<BSONObj> QueryRequest::asAggregationCommand() const {
// Other options.
aggregationBuilder.append("collation", _collation);
- if (_explain) {
- aggregationBuilder.append("explain", _explain);
- }
if (_maxTimeMS > 0) {
aggregationBuilder.append(cmdOptionMaxTimeMS, _maxTimeMS);
}
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 66a03bc8285..8906d5c2a7d 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1102,14 +1102,14 @@ TEST(QueryRequestTest, ConvertToAggregationSucceeds) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT(ar.getValue().getPipeline().empty());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
}
-TEST(QueryRequestTest, ConvertToAggregationWithExplainSucceeds) {
+TEST(QueryRequestTest, ConvertToAggregationOmitsExplain) {
QueryRequest qr(testns);
qr.setExplain(true);
auto agg = qr.asAggregationCommand();
@@ -1117,7 +1117,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithExplainSucceeds) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(ar.getValue().isExplain());
+ ASSERT_FALSE(ar.getValue().getExplain());
ASSERT(ar.getValue().getPipeline().empty());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -1245,7 +1245,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithPipeline) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -1270,7 +1270,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithBatchSize) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_EQ(ar.getValue().getBatchSize(), 4LL);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -1288,7 +1288,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithMaxTimeMS) {
auto ar = AggregationRequest::parseFromBSON(testns, cmdObj);
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -1302,7 +1302,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithCollationSucceeds) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT(ar.getValue().getPipeline().empty());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
diff --git a/src/mongo/db/views/resolved_view.cpp b/src/mongo/db/views/resolved_view.cpp
index 5fcdecc857e..9012e5489e4 100644
--- a/src/mongo/db/views/resolved_view.cpp
+++ b/src/mongo/db/views/resolved_view.cpp
@@ -65,54 +65,30 @@ ResolvedView ResolvedView::fromBSON(BSONObj commandResponseObj) {
return {ResolvedView(NamespaceString(viewDef["ns"].valueStringData()), pipeline)};
}
-StatusWith<BSONObj> ResolvedView::asExpandedViewAggregation(
+AggregationRequest ResolvedView::asExpandedViewAggregation(
const AggregationRequest& request) const {
- BSONObjBuilder aggregationBuilder;
- // Perform the aggregation on the resolved namespace.
- aggregationBuilder.append("aggregate", _namespace.coll());
-
- // The new pipeline consists of two parts: first, 'pipeline' in this ResolvedView;
- // then, the pipeline in 'request'.
- BSONArrayBuilder pipelineBuilder(aggregationBuilder.subarrayStart("pipeline"));
- for (auto&& item : _pipeline) {
- pipelineBuilder.append(item);
- }
-
- for (auto&& item : request.getPipeline()) {
- pipelineBuilder.append(item);
- }
- pipelineBuilder.doneFast();
-
- if (request.isExplain()) {
- aggregationBuilder.append("explain", true);
+ // Perform the aggregation on the resolved namespace. The new pipeline consists of two parts:
+ // first, 'pipeline' in this ResolvedView; then, the pipeline in 'request'.
+ std::vector<BSONObj> resolvedPipeline;
+ resolvedPipeline.reserve(_pipeline.size() + request.getPipeline().size());
+ resolvedPipeline.insert(resolvedPipeline.end(), _pipeline.begin(), _pipeline.end());
+ resolvedPipeline.insert(
+ resolvedPipeline.end(), request.getPipeline().begin(), request.getPipeline().end());
+
+ AggregationRequest expandedRequest{_namespace, resolvedPipeline};
+
+ if (request.getExplain()) {
+ expandedRequest.setExplain(request.getExplain());
} else {
- BSONObjBuilder batchSizeBuilder(aggregationBuilder.subobjStart("cursor"));
- batchSizeBuilder.append(AggregationRequest::kBatchSizeName, request.getBatchSize());
- batchSizeBuilder.doneFast();
+ expandedRequest.setBatchSize(request.getBatchSize());
}
- if (!request.getHint().isEmpty()) {
- aggregationBuilder.append(AggregationRequest::kHintName, request.getHint());
- }
-
- if (request.shouldBypassDocumentValidation()) {
- aggregationBuilder.append("bypassDocumentValidation", true);
- }
-
- if (request.shouldAllowDiskUse()) {
- aggregationBuilder.append("allowDiskUse", true);
- }
-
- return aggregationBuilder.obj();
-}
-
-StatusWith<BSONObj> ResolvedView::asExpandedViewAggregation(const BSONObj& aggCommand) const {
- auto aggRequest = AggregationRequest::parseFromBSON(_namespace, aggCommand);
- if (!aggRequest.isOK()) {
- return aggRequest.getStatus();
- }
+ expandedRequest.setHint(request.getHint());
+ expandedRequest.setBypassDocumentValidation(request.shouldBypassDocumentValidation());
+ expandedRequest.setAllowDiskUse(request.shouldAllowDiskUse());
+ expandedRequest.setCollation(request.getCollation());
- return asExpandedViewAggregation(aggRequest.getValue());
+ return expandedRequest;
}
} // namespace mongo
diff --git a/src/mongo/db/views/resolved_view.h b/src/mongo/db/views/resolved_view.h
index a5016ffc4b3..ec85b7b3254 100644
--- a/src/mongo/db/views/resolved_view.h
+++ b/src/mongo/db/views/resolved_view.h
@@ -33,11 +33,10 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/pipeline/aggregation_request.h"
namespace mongo {
-class AggregationRequest;
-
/**
* Represents a resolved definition, composed of a base collection namespace and a pipeline
* built from one or more views.
@@ -56,11 +55,10 @@ public:
static ResolvedView fromBSON(BSONObj commandResponseObj);
/**
- * Convert an aggregation command on a view to the equivalent command against the views
+ * Convert an aggregation command on a view to the equivalent command against the view's
* underlying collection.
*/
- StatusWith<BSONObj> asExpandedViewAggregation(const BSONObj& aggCommand) const;
- StatusWith<BSONObj> asExpandedViewAggregation(const AggregationRequest& aggRequest) const;
+ AggregationRequest asExpandedViewAggregation(const AggregationRequest& aggRequest) const;
const NamespaceString& getNamespace() const {
return _namespace;
diff --git a/src/mongo/db/views/resolved_view_test.cpp b/src/mongo/db/views/resolved_view_test.cpp
index 3f991868ccb..e25c5b263b7 100644
--- a/src/mongo/db/views/resolved_view_test.cpp
+++ b/src/mongo/db/views/resolved_view_test.cpp
@@ -35,6 +35,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/pipeline/aggregation_request.h"
+#include "mongo/db/pipeline/document.h"
#include "mongo/db/views/resolved_view.h"
#include "mongo/unittest/unittest.h"
@@ -47,99 +48,51 @@ const std::vector<BSONObj> emptyPipeline;
const BSONObj kDefaultCursorOptionDocument =
BSON(AggregationRequest::kBatchSizeName << AggregationRequest::kDefaultBatchSize);
-TEST(ResolvedViewTest, ExpandingCmdObjWithEmptyPipelineOnNoOpViewYieldsEmptyPipeline) {
- const ResolvedView resolvedView{backingNss, emptyPipeline};
- BSONObj cmdObj =
- BSON("aggregate" << viewNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj());
-
- auto result = resolvedView.asExpandedViewAggregation(cmdObj);
- ASSERT_OK(result.getStatus());
-
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
- << kDefaultCursorOptionDocument);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
-}
-
-TEST(ResolvedViewTest, ExpandingCmdObjWithNonemptyPipelineAppendsToViewPipeline) {
- std::vector<BSONObj> viewPipeline{BSON("skip" << 7)};
- const ResolvedView resolvedView{backingNss, viewPipeline};
- BSONObj cmdObj = BSON(
- "aggregate" << viewNss.coll() << "pipeline" << BSON_ARRAY(BSON("limit" << 3)) << "cursor"
- << BSONObj());
-
- auto result = resolvedView.asExpandedViewAggregation(cmdObj);
- ASSERT_OK(result.getStatus());
-
- BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline"
- << BSON_ARRAY(BSON("skip" << 7) << BSON("limit" << 3))
- << "cursor"
- << kDefaultCursorOptionDocument);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
-}
-
-TEST(ResolvedViewTest, ExpandingCmdObjFailsIfCmdObjIsNotAValidAggregationCommand) {
- const ResolvedView resolvedView{backingNss, emptyPipeline};
- BSONObj badCmdObj = BSON("invalid" << 0);
- ASSERT_NOT_OK(resolvedView.asExpandedViewAggregation(badCmdObj).getStatus());
-}
-
TEST(ResolvedViewTest, ExpandingAggRequestWithEmptyPipelineOnNoOpViewYieldsEmptyPipeline) {
const ResolvedView resolvedView{backingNss, emptyPipeline};
- AggregationRequest aggRequest(viewNss, {});
-
- auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
+ AggregationRequest requestOnView{viewNss, emptyPipeline};
+ auto result = resolvedView.asExpandedViewAggregation(requestOnView);
BSONObj expected =
BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
<< kDefaultCursorOptionDocument);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
TEST(ResolvedViewTest, ExpandingAggRequestWithNonemptyPipelineAppendsToViewPipeline) {
std::vector<BSONObj> viewPipeline{BSON("skip" << 7)};
const ResolvedView resolvedView{backingNss, viewPipeline};
+ AggregationRequest requestOnView{viewNss, std::vector<BSONObj>{BSON("limit" << 3)}};
- std::vector<BSONObj> userAggregationPipeline = {BSON("limit" << 3)};
- AggregationRequest aggRequest(viewNss, userAggregationPipeline);
-
- auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
+ auto result = resolvedView.asExpandedViewAggregation(requestOnView);
BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline"
<< BSON_ARRAY(BSON("skip" << 7) << BSON("limit" << 3))
<< "cursor"
<< kDefaultCursorOptionDocument);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
TEST(ResolvedViewTest, ExpandingAggRequestPreservesExplain) {
const ResolvedView resolvedView{backingNss, emptyPipeline};
- AggregationRequest aggRequest(viewNss, {});
- aggRequest.setExplain(true);
+ AggregationRequest aggRequest{viewNss, {}};
+ aggRequest.setExplain(ExplainOptions::Verbosity::kExecStats);
auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
-
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "explain" << true);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ ASSERT(result.getExplain());
+ ASSERT(*result.getExplain() == ExplainOptions::Verbosity::kExecStats);
}
TEST(ResolvedViewTest, ExpandingAggRequestWithCursorAndExplainOnlyPreservesExplain) {
const ResolvedView resolvedView{backingNss, emptyPipeline};
- BSONObj cmdObj =
- BSON("aggregate" << viewNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj()
- << "explain"
- << true);
-
- auto result = resolvedView.asExpandedViewAggregation(cmdObj);
- ASSERT_OK(result.getStatus());
+ AggregationRequest aggRequest{viewNss, {}};
+ aggRequest.setBatchSize(10);
+ aggRequest.setExplain(ExplainOptions::Verbosity::kExecStats);
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "explain" << true);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ auto result = resolvedView.asExpandedViewAggregation(aggRequest);
+ ASSERT(result.getExplain());
+ ASSERT(*result.getExplain() == ExplainOptions::Verbosity::kExecStats);
+ ASSERT_EQ(result.getBatchSize(), AggregationRequest::kDefaultBatchSize);
}
TEST(ResolvedViewTest, ExpandingAggRequestPreservesBypassDocumentValidation) {
@@ -148,14 +101,7 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesBypassDocumentValidation) {
aggRequest.setBypassDocumentValidation(true);
auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
-
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
- << kDefaultCursorOptionDocument
- << "bypassDocumentValidation"
- << true);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ ASSERT_TRUE(result.shouldBypassDocumentValidation());
}
TEST(ResolvedViewTest, ExpandingAggRequestPreservesAllowDiskUse) {
@@ -164,14 +110,19 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesAllowDiskUse) {
aggRequest.setAllowDiskUse(true);
auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
+ ASSERT_TRUE(result.shouldAllowDiskUse());
+}
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
- << kDefaultCursorOptionDocument
- << "allowDiskUse"
- << true);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+TEST(ResolvedViewTest, ExpandingAggRequestPreservesCollation) {
+ const ResolvedView resolvedView{backingNss, emptyPipeline};
+ AggregationRequest aggRequest(viewNss, {});
+ aggRequest.setCollation(BSON("locale"
+ << "fr_CA"));
+
+ auto result = resolvedView.asExpandedViewAggregation(aggRequest);
+ ASSERT_BSONOBJ_EQ(result.getCollation(),
+ BSON("locale"
+ << "fr_CA"));
}
TEST(ResolvedViewTest, FromBSONFailsIfMissingResolvedView) {
diff --git a/src/mongo/db/views/view_sharding_check.cpp b/src/mongo/db/views/view_sharding_check.cpp
index e2144d56dac..c70cc5f5c55 100644
--- a/src/mongo/db/views/view_sharding_check.cpp
+++ b/src/mongo/db/views/view_sharding_check.cpp
@@ -76,14 +76,14 @@ StatusWith<BSONObj> ViewShardingCheck::getResolvedViewIfSharded(OperationContext
return viewDetailBob.obj();
}
-void ViewShardingCheck::appendShardedViewStatus(const BSONObj& resolvedView, BSONObjBuilder* out) {
+Status ViewShardingCheck::appendShardedViewResponse(const BSONObj& resolvedView,
+ BSONObjBuilder* out) {
invariant(out);
invariant(!resolvedView.isEmpty());
out->append("resolvedView", resolvedView);
- Status status{ErrorCodes::CommandOnShardedViewNotSupportedOnMongod,
- str::stream() << "Command on view must be executed by mongos"};
- Command::appendCommandStatus(*out, status);
+ return {ErrorCodes::CommandOnShardedViewNotSupportedOnMongod,
+ str::stream() << "Command on view must be executed by mongos"};
}
bool ViewShardingCheck::collectionIsSharded(OperationContext* opCtx, const NamespaceString& nss) {
diff --git a/src/mongo/db/views/view_sharding_check.h b/src/mongo/db/views/view_sharding_check.h
index d3437353212..c59455dd5aa 100644
--- a/src/mongo/db/views/view_sharding_check.h
+++ b/src/mongo/db/views/view_sharding_check.h
@@ -65,10 +65,11 @@ public:
const ViewDefinition* view);
/**
- * Appends the resolved view definition and CommandOnShardedViewNotSupportedOnMongod status to
- * 'result'.
+ * Appends the resolved view definition to 'result' and returns a
+ * CommandOnShardedViewNotSupportedOnMongod error status. The caller is responsible for
+ * appending the non-ok status and error code.
*/
- static void appendShardedViewStatus(const BSONObj& resolvedView, BSONObjBuilder* result);
+ static Status appendShardedViewResponse(const BSONObj& resolvedView, BSONObjBuilder* result);
private:
/**
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index a5add096d27..58df14b77b4 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -310,6 +310,40 @@ public:
}
};
+class SerializationRespectsExplainModes : public Base {
+public:
+ void run() {
+ createSource();
+
+ {
+ // Nothing serialized when no explain mode specified.
+ auto explainResult = source()->serialize();
+ ASSERT_TRUE(explainResult.missing());
+ }
+
+ {
+ auto explainResult = source()->serialize(ExplainOptions::Verbosity::kQueryPlanner);
+ ASSERT_FALSE(explainResult["$cursor"]["queryPlanner"].missing());
+ ASSERT_TRUE(explainResult["$cursor"]["executionStats"].missing());
+ }
+
+ {
+ auto explainResult = source()->serialize(ExplainOptions::Verbosity::kExecStats);
+ ASSERT_FALSE(explainResult["$cursor"]["queryPlanner"].missing());
+ ASSERT_FALSE(explainResult["$cursor"]["executionStats"].missing());
+ ASSERT_TRUE(explainResult["$cursor"]["executionStats"]["allPlansExecution"].missing());
+ }
+
+ {
+ auto explainResult =
+ source()->serialize(ExplainOptions::Verbosity::kExecAllPlans).getDocument();
+ ASSERT_FALSE(explainResult["$cursor"]["queryPlanner"].missing());
+ ASSERT_FALSE(explainResult["$cursor"]["executionStats"].missing());
+ ASSERT_FALSE(explainResult["$cursor"]["executionStats"]["allPlansExecution"].missing());
+ }
+ }
+};
+
} // namespace DocumentSourceCursor
class All : public Suite {
@@ -325,6 +359,7 @@ public:
add<DocumentSourceCursor::IndexScanProvidesSortOnKeys>();
add<DocumentSourceCursor::ReverseIndexScanProvidesSort>();
add<DocumentSourceCursor::CompoundIndexScanProvidesMultipleSorts>();
+ add<DocumentSourceCursor::SerializationRespectsExplainModes>();
}
};
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 3df262ce725..90185fd529d 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -364,7 +364,7 @@ public:
BSONObjBuilder bob;
Explain::explainStages(
- exec.get(), ctx.getCollection(), ExplainCommon::EXEC_ALL_PLANS, &bob);
+ exec.get(), ctx.getCollection(), ExplainOptions::Verbosity::kExecAllPlans, &bob);
BSONObj explained = bob.done();
ASSERT_EQ(explained["executionStats"]["nReturned"].Int(), nDocs);
diff --git a/src/mongo/s/commands/cluster_aggregate.cpp b/src/mongo/s/commands/cluster_aggregate.cpp
index d9a182b2938..010cbc55e63 100644
--- a/src/mongo/s/commands/cluster_aggregate.cpp
+++ b/src/mongo/s/commands/cluster_aggregate.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/pipeline/aggregation_request.h"
#include "mongo/db/pipeline/document_source_out.h"
#include "mongo/db/pipeline/expression_context.h"
#include "mongo/db/pipeline/lite_parsed_pipeline.h"
@@ -60,16 +59,50 @@
namespace mongo {
+namespace {
+// These fields are not part of the AggregationRequest since they are not handled by the aggregation
+// subsystem, so we serialize them separately.
+const std::initializer_list<StringData> kFieldsToPropagateToShards = {
+ "$queryOptions", "readConcern", QueryRequest::cmdOptionMaxTimeMS,
+};
+
+// Given a document representing an aggregation command such as
+//
+// {aggregate: "myCollection", pipeline: [], ...},
+//
+// produces the corresponding explain command:
+//
+// {explain: {aggregate: "myCollection", pipline: [], ...}, verbosity: ...}
+//
+// The 'cmdObj' is the original user request, which may contain fields to propagate to the shards
+// that are not handled by the aggregation subsystem itself (e.g. maxTimeMS).
+Document wrapAggAsExplain(Document aggregateCommand,
+ ExplainOptions::Verbosity verbosity,
+ const BSONObj& cmdObj) {
+ MutableDocument explainCommandBuilder;
+ explainCommandBuilder["explain"] = Value(aggregateCommand);
+
+ // Add explain command options.
+ for (auto&& explainOption : ExplainOptions::toBSON(verbosity)) {
+ explainCommandBuilder[explainOption.fieldNameStringData()] = Value(explainOption);
+ }
+
+ // Propagate options not specific to agg to the shards.
+ for (auto&& field : kFieldsToPropagateToShards) {
+ explainCommandBuilder[field] = Value(cmdObj[field]);
+ }
+
+ return explainCommandBuilder.freeze();
+}
+
+} // namespace
+
Status ClusterAggregate::runAggregate(OperationContext* opCtx,
const Namespaces& namespaces,
+ const AggregationRequest& request,
BSONObj cmdObj,
int options,
BSONObjBuilder* result) {
- auto request = AggregationRequest::parseFromBSON(namespaces.executionNss, cmdObj);
- if (!request.isOK()) {
- return request.getStatus();
- }
-
auto const catalogCache = Grid::get(opCtx)->catalogCache();
auto executionNsRoutingInfoStatus =
@@ -91,7 +124,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
// need to check if any involved collections are sharded before forwarding an aggregation
// command on an unsharded collection.
StringMap<ExpressionContext::ResolvedNamespace> resolvedNamespaces;
- LiteParsedPipeline liteParsedPipeline(request.getValue());
+ LiteParsedPipeline liteParsedPipeline(request);
for (auto&& nss : liteParsedPipeline.getInvolvedNamespaces()) {
const auto resolvedNsRoutingInfo =
uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, nss));
@@ -101,27 +134,32 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
}
if (!executionNsRoutingInfo.cm()) {
- return aggPassthrough(
- opCtx, namespaces, executionNsRoutingInfo.primary()->getId(), cmdObj, result, options);
+ return aggPassthrough(opCtx,
+ namespaces,
+ executionNsRoutingInfo.primary()->getId(),
+ request,
+ cmdObj,
+ result,
+ options);
}
const auto chunkMgr = executionNsRoutingInfo.cm();
std::unique_ptr<CollatorInterface> collation;
- if (!request.getValue().getCollation().isEmpty()) {
+ if (!request.getCollation().isEmpty()) {
collation = uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
- ->makeFromBSON(request.getValue().getCollation()));
+ ->makeFromBSON(request.getCollation()));
} else if (chunkMgr->getDefaultCollator()) {
collation = chunkMgr->getDefaultCollator()->clone();
}
- boost::intrusive_ptr<ExpressionContext> mergeCtx = new ExpressionContext(
- opCtx, request.getValue(), std::move(collation), std::move(resolvedNamespaces));
+ boost::intrusive_ptr<ExpressionContext> mergeCtx =
+ new ExpressionContext(opCtx, request, std::move(collation), std::move(resolvedNamespaces));
mergeCtx->inRouter = true;
// explicitly *not* setting mergeCtx->tempDir
// Parse and optimize the pipeline specification.
- auto pipeline = Pipeline::parse(request.getValue().getPipeline(), mergeCtx);
+ auto pipeline = Pipeline::parse(request.getPipeline(), mergeCtx);
if (!pipeline.isOK()) {
return pipeline.getStatus();
}
@@ -140,7 +178,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
}
try {
- chunkMgr->findIntersectingChunk(shardKeyMatches, request.getValue().getCollation());
+ chunkMgr->findIntersectingChunk(shardKeyMatches, request.getCollation());
return true;
} catch (const DBException&) {
return false;
@@ -159,7 +197,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
// Create the command for the shards. The 'fromRouter' field means produce output to be
// merged.
- MutableDocument commandBuilder(request.getValue().serializeToCommandObj());
+ MutableDocument commandBuilder(request.serializeToCommandObj());
commandBuilder[AggregationRequest::kPipelineName] = Value(shardPipeline->serialize());
if (needSplit) {
commandBuilder[AggregationRequest::kFromRouterName] = Value(true);
@@ -167,13 +205,16 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
Value(DOC(AggregationRequest::kBatchSizeName << 0));
}
- // These fields are not part of the AggregationRequest since they are not handled by the
- // aggregation subsystem, so we serialize them separately.
- const std::initializer_list<StringData> fieldsToPropagateToShards = {
- "$queryOptions", "readConcern", QueryRequest::cmdOptionMaxTimeMS,
- };
- for (auto&& field : fieldsToPropagateToShards) {
- commandBuilder[field] = Value(cmdObj[field]);
+ // If this is a request for an aggregation explain, then we must wrap the aggregate inside an
+ // explain command.
+ if (mergeCtx->explain) {
+ commandBuilder.reset(wrapAggAsExplain(commandBuilder.freeze(), *mergeCtx->explain, cmdObj));
+ } else {
+ // Add things like $queryOptions which must be sent to the shards but are not part of the
+ // agg request.
+ for (auto&& field : kFieldsToPropagateToShards) {
+ commandBuilder[field] = Value(cmdObj[field]);
+ }
}
BSONObj shardedCommand = commandBuilder.freeze().toBson();
@@ -188,17 +229,18 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
options,
namespaces.executionNss.ns(),
shardQuery,
- request.getValue().getCollation(),
+ request.getCollation(),
&shardResults);
- if (mergeCtx->isExplain) {
+ if (mergeCtx->explain) {
// This must be checked before we start modifying result.
uassertAllShardsSupportExplain(shardResults);
if (needSplit) {
*result << "needsPrimaryShardMerger" << needPrimaryShardMerger << "splitPipeline"
- << DOC("shardsPart" << shardPipeline->writeExplainOps() << "mergerPart"
- << pipeline.getValue()->writeExplainOps());
+ << Document{{"shardsPart", shardPipeline->writeExplainOps(*mergeCtx->explain)},
+ {"mergerPart",
+ pipeline.getValue()->writeExplainOps(*mergeCtx->explain)}};
} else {
*result << "splitPipeline" << BSONNULL;
}
@@ -231,7 +273,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
pipeline.getValue()->addInitialSource(
DocumentSourceMergeCursors::create(parseCursors(shardResults), mergeCtx));
- MutableDocument mergeCmd(request.getValue().serializeToCommandObj());
+ MutableDocument mergeCmd(request.serializeToCommandObj());
mergeCmd["pipeline"] = Value(pipeline.getValue()->serialize());
mergeCmd["cursor"] = Value(cmdObj["cursor"]);
@@ -275,7 +317,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
ShardConnection conn(mergingShard->getConnString(), outputNsOrEmpty);
BSONObj mergedResults =
- aggRunCommand(opCtx, conn.get(), namespaces, mergeCmd.freeze().toBson(), options);
+ aggRunCommand(opCtx, conn.get(), namespaces, request, mergeCmd.freeze().toBson(), options);
conn.done();
if (auto wcErrorElem = mergedResults["writeConcernError"]) {
@@ -395,14 +437,10 @@ void ClusterAggregate::killAllCursors(const std::vector<Strategy::CommandResult>
BSONObj ClusterAggregate::aggRunCommand(OperationContext* opCtx,
DBClientBase* conn,
const Namespaces& namespaces,
+ const AggregationRequest& aggRequest,
BSONObj cmd,
int queryOptions) {
// Temporary hack. See comment on declaration for details.
-
- massert(17016,
- "should only be running an aggregate command here",
- str::equals(cmd.firstElementFieldName(), "aggregate"));
-
auto cursor = conn->query(namespaces.executionNss.db() + ".$cmd",
cmd,
-1, // nToReturn
@@ -420,6 +458,14 @@ BSONObj ClusterAggregate::aggRunCommand(OperationContext* opCtx,
throw RecvStaleConfigException("command failed because of stale config", result);
}
+ // If this was an explain, then we get back an explain result object rather than a
+ // representation of a cursor. The remaining work here is related to cursor handling, so if
+ // there's no cursor, our work is done.
+ if (aggRequest.getExplain()) {
+ return result;
+ }
+
+ // Transfer ownership of the agg cursor opened on mongod to the mongos cursor manager.
auto executorPool = Grid::get(opCtx)->getExecutorPool();
result = uassertStatusOK(storePossibleCursor(opCtx,
HostAndPort(cursor->originalHost()),
@@ -433,6 +479,7 @@ BSONObj ClusterAggregate::aggRunCommand(OperationContext* opCtx,
Status ClusterAggregate::aggPassthrough(OperationContext* opCtx,
const Namespaces& namespaces,
const ShardId& shardId,
+ const AggregationRequest& aggRequest,
BSONObj cmdObj,
BSONObjBuilder* out,
int queryOptions) {
@@ -442,8 +489,16 @@ Status ClusterAggregate::aggPassthrough(OperationContext* opCtx,
return shardStatus.getStatus();
}
+ // If this is an explain, we need to re-create the explain command which will be forwarded to
+ // the shards.
+ if (aggRequest.getExplain()) {
+ auto explainCmdObj =
+ wrapAggAsExplain(aggRequest.serializeToCommandObj(), *aggRequest.getExplain(), cmdObj);
+ cmdObj = explainCmdObj.toBson();
+ }
+
ShardConnection conn(shardStatus.getValue()->getConnString(), "");
- BSONObj result = aggRunCommand(opCtx, conn.get(), namespaces, cmdObj, queryOptions);
+ BSONObj result = aggRunCommand(opCtx, conn.get(), namespaces, aggRequest, cmdObj, queryOptions);
conn.done();
// First append the properly constructed writeConcernError. It will then be skipped
@@ -458,18 +513,8 @@ Status ClusterAggregate::aggPassthrough(OperationContext* opCtx,
if (ResolvedView::isResolvedViewErrorResponse(responseObj)) {
auto resolvedView = ResolvedView::fromBSON(responseObj);
- auto request = AggregationRequest::parseFromBSON(resolvedView.getNamespace(), cmdObj);
- if (!request.isOK()) {
- out->resetToEmpty();
- return request.getStatus();
- }
-
- auto aggCmd = resolvedView.asExpandedViewAggregation(request.getValue());
- if (!aggCmd.isOK()) {
- out->resetToEmpty();
- return aggCmd.getStatus();
- }
-
+ auto resolvedAggRequest = resolvedView.asExpandedViewAggregation(aggRequest);
+ auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
out->resetToEmpty();
// We pass both the underlying collection namespace and the view namespace here. The
@@ -479,8 +524,9 @@ Status ClusterAggregate::aggPassthrough(OperationContext* opCtx,
Namespaces nsStruct;
nsStruct.requestedNss = namespaces.requestedNss;
nsStruct.executionNss = resolvedView.getNamespace();
+
return ClusterAggregate::runAggregate(
- opCtx, nsStruct, aggCmd.getValue(), queryOptions, out);
+ opCtx, nsStruct, resolvedAggRequest, resolvedAggCmd, queryOptions, out);
}
return getStatusFromCommandResult(result);
diff --git a/src/mongo/s/commands/cluster_aggregate.h b/src/mongo/s/commands/cluster_aggregate.h
index d8e29744766..591b3f070d2 100644
--- a/src/mongo/s/commands/cluster_aggregate.h
+++ b/src/mongo/s/commands/cluster_aggregate.h
@@ -34,6 +34,7 @@
#include "mongo/base/status.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/pipeline/aggregation_request.h"
#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/document_source_merge_cursors.h"
#include "mongo/s/commands/strategy.h"
@@ -60,11 +61,19 @@ public:
};
/**
- * Executes an aggregation command. 'cmdObj' specifies the aggregation to run. Fills in 'result'
- * with the command response.
+ * Executes the aggregation 'request' using context 'opCtx'.
+ *
+ * The 'namespaces' struct should contain both the user-requested namespace and the namespace
+ * over which the aggregation will actually execute. Typically these two namespaces are the
+ * same, but they may differ in the case of a query on a view.
+ *
+ * The raw aggregate command parameters should be passed in 'cmdObj'.
+ *
+ * On success, fills out 'result' with the command response.
*/
static Status runAggregate(OperationContext* opCtx,
const Namespaces& namespaces,
+ const AggregationRequest& request,
BSONObj cmdObj,
int options,
BSONObjBuilder* result);
@@ -85,12 +94,14 @@ private:
static BSONObj aggRunCommand(OperationContext* opCtx,
DBClientBase* conn,
const Namespaces& namespaces,
+ const AggregationRequest& aggRequest,
BSONObj cmd,
int queryOptions);
static Status aggPassthrough(OperationContext* opCtx,
const Namespaces& namespaces,
const ShardId& shardId,
+ const AggregationRequest& aggRequest,
BSONObj cmd,
BSONObjBuilder* result,
int queryOptions);
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 45c46c26185..2eb02c0e70f 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/query/view_response_formatter.h"
#include "mongo/db/views/resolved_view.h"
#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/s/commands/cluster_aggregate.h"
#include "mongo/s/commands/cluster_commands_common.h"
#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/commands/strategy.h"
@@ -158,16 +159,19 @@ public:
return appendCommandStatus(result, aggCmdOnView.getStatus());
}
- auto resolvedView = ResolvedView::fromBSON(countResult[0].result);
- auto aggCmd = resolvedView.asExpandedViewAggregation(aggCmdOnView.getValue());
- if (!aggCmd.isOK()) {
- return appendCommandStatus(result, aggCmd.getStatus());
+ auto aggRequestOnView = AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue());
+ if (!aggRequestOnView.isOK()) {
+ return appendCommandStatus(result, aggRequestOnView.getStatus());
}
+ auto resolvedView = ResolvedView::fromBSON(countResult[0].result);
+ auto resolvedAggRequest =
+ resolvedView.asExpandedViewAggregation(aggRequestOnView.getValue());
+ auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
BSONObjBuilder aggResult;
Command::findCommand("aggregate")
- ->run(opCtx, dbname, aggCmd.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbname, resolvedAggCmd, options, errmsg, aggResult);
result.resetToEmpty();
ViewResponseFormatter formatter(aggResult.obj());
@@ -179,7 +183,6 @@ public:
return true;
}
-
long long total = 0;
BSONObjBuilder shardSubTotal(result.subobjStart("shards"));
@@ -218,7 +221,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const override {
const NamespaceString nss(parseNs(dbname, cmdObj));
@@ -275,19 +278,23 @@ public:
return aggCmdOnView.getStatus();
}
- auto resolvedView = ResolvedView::fromBSON(shardResults[0].result);
- auto aggCmd = resolvedView.asExpandedViewAggregation(aggCmdOnView.getValue());
- if (!aggCmd.isOK()) {
- return aggCmd.getStatus();
+ auto aggRequestOnView =
+ AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue(), verbosity);
+ if (!aggRequestOnView.isOK()) {
+ return aggRequestOnView.getStatus();
}
- std::string errMsg;
- if (Command::findCommand("aggregate")
- ->run(opCtx, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
- return Status::OK();
- }
+ auto resolvedView = ResolvedView::fromBSON(shardResults[0].result);
+ auto resolvedAggRequest =
+ resolvedView.asExpandedViewAggregation(aggRequestOnView.getValue());
+ auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
+
+ ClusterAggregate::Namespaces nsStruct;
+ nsStruct.requestedNss = nss;
+ nsStruct.executionNss = resolvedAggRequest.getNamespaceString();
- return getStatusFromCommandResult(out->asTempObj());
+ return ClusterAggregate::runAggregate(
+ opCtx, nsStruct, resolvedAggRequest, resolvedAggCmd, 0, out);
}
const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj);
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index 996893fc8be..e15a9939976 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -101,10 +101,10 @@ bool appendElementsIfRoom(BSONObjBuilder* bob, const BSONObj& toAppend) {
// static
void ClusterExplain::wrapAsExplainForOP_COMMAND(const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* explainBuilder) {
explainBuilder->append("explain", cmdObj);
- explainBuilder->append("verbosity", ExplainCommon::verbosityString(verbosity));
+ explainBuilder->append("verbosity", ExplainOptions::verbosityString(verbosity));
// Propagate readConcern
if (auto readConcern = cmdObj["readConcern"]) {
@@ -114,13 +114,13 @@ void ClusterExplain::wrapAsExplainForOP_COMMAND(const BSONObj& cmdObj,
// static
void ClusterExplain::wrapAsExplain(const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out,
int* optionsOut) {
BSONObjBuilder explainBuilder;
explainBuilder.append("explain", cmdObj);
- explainBuilder.append("verbosity", ExplainCommon::verbosityString(verbosity));
+ explainBuilder.append("verbosity", ExplainOptions::verbosityString(verbosity));
// Propagate readConcern
if (auto readConcern = cmdObj["readConcern"]) {
diff --git a/src/mongo/s/commands/cluster_explain.h b/src/mongo/s/commands/cluster_explain.h
index 38332b15748..e2e3b8cc5ba 100644
--- a/src/mongo/s/commands/cluster_explain.h
+++ b/src/mongo/s/commands/cluster_explain.h
@@ -30,7 +30,7 @@
#include <string>
-#include "mongo/db/query/explain_common.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/s/commands/strategy.h"
#include "mongo/s/write_ops/batched_command_request.h"
@@ -58,7 +58,7 @@ public:
* that send the command over the NetworkInterfaceASIO rather than DBClient.
*/
static void wrapAsExplainForOP_COMMAND(const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* explainBuilder);
/**
@@ -72,7 +72,7 @@ public:
* be forwarded to the shards.
*/
static void wrapAsExplain(const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out,
int* optionsOut);
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index 4031a45c02a..099d6233a60 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -112,10 +112,9 @@ public:
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- ExplainCommon::Verbosity verbosity;
- Status parseStatus = ExplainCommon::parseCmdBSON(cmdObj, &verbosity);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus);
+ auto verbosity = ExplainOptions::parseCmdBSON(cmdObj);
+ if (!verbosity.isOK()) {
+ return appendCommandStatus(result, verbosity.getStatus());
}
// This is the nested command which we are explaining.
@@ -139,8 +138,8 @@ public:
rpc::ServerSelectionMetadata metadata(secondaryOk, readPref.getValue());
// Actually call the nested command's explain(...) method.
- Status explainStatus =
- commToExplain->explain(opCtx, dbName, explainObj, verbosity, metadata, &result);
+ Status explainStatus = commToExplain->explain(
+ opCtx, dbName, explainObj, verbosity.getValue(), metadata, &result);
if (!explainStatus.isOK()) {
return appendCommandStatus(result, explainStatus);
}
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index feae1fab5e2..da5c31572dd 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -82,7 +82,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const override {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index d308c3a53fd..fdf4d42f7cf 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -104,7 +104,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const final {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
@@ -127,17 +127,23 @@ public:
return aggCmdOnView.getStatus();
}
- auto aggCmd = resolvedView.asExpandedViewAggregation(aggCmdOnView.getValue());
- if (!aggCmd.isOK()) {
- return aggCmd.getStatus();
+ auto aggRequestOnView =
+ AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue(), verbosity);
+ if (!aggRequestOnView.isOK()) {
+ return aggRequestOnView.getStatus();
}
+ auto resolvedAggRequest =
+ resolvedView.asExpandedViewAggregation(aggRequestOnView.getValue());
+ auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
+
int queryOptions = 0;
ClusterAggregate::Namespaces nsStruct;
nsStruct.requestedNss = std::move(nss);
nsStruct.executionNss = std::move(resolvedView.getNamespace());
+
auto status = ClusterAggregate::runAggregate(
- opCtx, nsStruct, aggCmd.getValue(), queryOptions, out);
+ opCtx, nsStruct, resolvedAggRequest, resolvedAggCmd, queryOptions, out);
appendCommandStatus(*out, status);
return status;
}
@@ -189,12 +195,17 @@ public:
return appendCommandStatus(result, aggCmdOnView.getStatus());
}
- auto resolvedView = ResolvedView::fromBSON(viewDefinition);
- auto aggCmd = resolvedView.asExpandedViewAggregation(aggCmdOnView.getValue());
- if (!aggCmd.isOK()) {
- return appendCommandStatus(result, aggCmd.getStatus());
+ auto aggRequestOnView =
+ AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue());
+ if (!aggRequestOnView.isOK()) {
+ return appendCommandStatus(result, aggRequestOnView.getStatus());
}
+ auto resolvedView = ResolvedView::fromBSON(viewDefinition);
+ auto resolvedAggRequest =
+ resolvedView.asExpandedViewAggregation(aggRequestOnView.getValue());
+ auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
+
// We pass both the underlying collection namespace and the view namespace here. The
// underlying collection namespace is used to execute the aggregation on mongoD. Any
// cursor returned will be registered under the view namespace so that subsequent
@@ -202,8 +213,9 @@ public:
ClusterAggregate::Namespaces nsStruct;
nsStruct.requestedNss = std::move(nss);
nsStruct.executionNss = std::move(resolvedView.getNamespace());
+
auto status = ClusterAggregate::runAggregate(
- opCtx, nsStruct, aggCmd.getValue(), options, &result);
+ opCtx, nsStruct, resolvedAggRequest, resolvedAggCmd, options, &result);
appendCommandStatus(result, status);
return status.isOK();
}
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index e533ae05392..21571bd6e4f 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -35,6 +35,7 @@
#include "mongo/base/status.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/commands.h"
+#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/commands/cluster_aggregate.h"
namespace mongo {
@@ -80,13 +81,55 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ auto request = AggregationRequest::parseFromBSON(nss, cmdObj);
+ if (!request.isOK()) {
+ return appendCommandStatus(result, request.getStatus());
+ }
+
ClusterAggregate::Namespaces nsStruct;
nsStruct.requestedNss = nss;
nsStruct.executionNss = std::move(nss);
- auto status = ClusterAggregate::runAggregate(opCtx, nsStruct, cmdObj, options, &result);
+ auto status = ClusterAggregate::runAggregate(
+ opCtx, nsStruct, request.getValue(), cmdObj, options, &result);
appendCommandStatus(result, status);
return status.isOK();
}
+
+ Status explain(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ ExplainOptions::Verbosity verbosity,
+ const rpc::ServerSelectionMetadata& serverSelectionMetadata,
+ BSONObjBuilder* out) const override {
+ const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+
+ auto request = AggregationRequest::parseFromBSON(nss, cmdObj, verbosity);
+ if (!request.isOK()) {
+ return request.getStatus();
+ }
+
+ // Add the server selection metadata to the aggregate command in the "unwrapped" format that
+ // runAggregate() expects: {aggregate: ..., $queryOptions: {$readPreference: ...}}.
+ BSONObjBuilder aggCmdBuilder;
+ aggCmdBuilder.appendElements(cmdObj);
+ if (auto readPref = serverSelectionMetadata.getReadPreference()) {
+ auto readPrefObj = readPref->toBSON();
+ aggCmdBuilder.append(QueryRequest::kUnwrappedReadPrefField,
+ BSON("$readPreference" << readPrefObj));
+ }
+
+ int options = 0;
+ if (serverSelectionMetadata.isSecondaryOk()) {
+ options |= QueryOption_SlaveOk;
+ }
+
+ ClusterAggregate::Namespaces nsStruct;
+ nsStruct.requestedNss = nss;
+ nsStruct.executionNss = std::move(nss);
+
+ return ClusterAggregate::runAggregate(
+ opCtx, nsStruct, request.getValue(), cmdObj, options, out);
+ }
} clusterPipelineCmd;
} // namespace
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index a5d2092a96c..7986195c6f5 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -95,7 +95,7 @@ public:
virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const {
BatchedCommandRequest request(_writeType);
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index e219d9396f9..6ab6d3bc086 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -56,6 +56,7 @@
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/commands/cluster_aggregate.h"
#include "mongo/s/commands/cluster_commands_common.h"
#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/commands/run_on_all_shards_cmd.h"
@@ -991,7 +992,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const override {
// We will time how long it takes to run the commands on the shards.
@@ -1148,14 +1149,19 @@ public:
return appendCommandStatus(result, aggCmdOnView.getStatus());
}
- auto aggCmd = resolvedView.asExpandedViewAggregation(aggCmdOnView.getValue());
- if (!aggCmd.isOK()) {
- return appendCommandStatus(result, aggCmd.getStatus());
+ auto aggRequestOnView =
+ AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue());
+ if (!aggRequestOnView.isOK()) {
+ return appendCommandStatus(result, aggRequestOnView.getStatus());
}
+ auto resolvedAggRequest =
+ resolvedView.asExpandedViewAggregation(aggRequestOnView.getValue());
+ auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
+
BSONObjBuilder aggResult;
Command::findCommand("aggregate")
- ->run(opCtx, dbName, aggCmd.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbName, resolvedAggCmd, options, errmsg, aggResult);
ViewResponseFormatter formatter(aggResult.obj());
auto formatStatus = formatter.appendAsDistinctResponse(&result);
@@ -1235,7 +1241,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
@@ -1296,18 +1302,22 @@ public:
return aggCmdOnView.getStatus();
}
- auto aggCmd = resolvedView.asExpandedViewAggregation(aggCmdOnView.getValue());
- if (!aggCmd.isOK()) {
- return aggCmd.getStatus();
+ auto aggRequestOnView =
+ AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue(), verbosity);
+ if (!aggRequestOnView.isOK()) {
+ return aggRequestOnView.getStatus();
}
- std::string errMsg;
- if (Command::findCommand("aggregate")
- ->run(opCtx, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
- return Status::OK();
- }
+ auto resolvedAggRequest =
+ resolvedView.asExpandedViewAggregation(aggRequestOnView.getValue());
+ auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
+
+ ClusterAggregate::Namespaces nsStruct;
+ nsStruct.requestedNss = nss;
+ nsStruct.executionNss = resolvedAggRequest.getNamespaceString();
- return getStatusFromCommandResult(out->asTempObj());
+ return ClusterAggregate::runAggregate(
+ opCtx, nsStruct, resolvedAggRequest, resolvedAggCmd, 0, out);
}
const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj);
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 647e6601dfa..58d55f5938f 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -190,7 +190,7 @@ void Strategy::queryOp(OperationContext* opCtx, const NamespaceString& nss, DbMe
const BSONObj findCommand = queryRequest.asFindCommand();
// We default to allPlansExecution verbosity.
- const auto verbosity = ExplainCommon::EXEC_ALL_PLANS;
+ const auto verbosity = ExplainOptions::Verbosity::kExecAllPlans;
const bool secondaryOk = (readPreference.pref != ReadPreference::PrimaryOnly);
const rpc::ServerSelectionMetadata metadata(secondaryOk, readPreference);
@@ -567,7 +567,7 @@ void Strategy::writeOp(OperationContext* opCtx, DbMessage* dbm) {
Status Strategy::explainFind(OperationContext* opCtx,
const BSONObj& findCommand,
const QueryRequest& qr,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) {
BSONObjBuilder explainCmdBob;
diff --git a/src/mongo/s/commands/strategy.h b/src/mongo/s/commands/strategy.h
index 51a0d1673cc..c13e4354be1 100644
--- a/src/mongo/s/commands/strategy.h
+++ b/src/mongo/s/commands/strategy.h
@@ -31,7 +31,7 @@
#include <atomic>
#include "mongo/client/connection_string.h"
-#include "mongo/db/query/explain_common.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/s/client/shard.h"
namespace mongo {
@@ -99,7 +99,7 @@ public:
static Status explainFind(OperationContext* opCtx,
const BSONObj& findCommand,
const QueryRequest& qr,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out);
diff --git a/src/mongo/s/query/cluster_find.h b/src/mongo/s/query/cluster_find.h
index 5a011d27958..4d9f2b1dd9a 100644
--- a/src/mongo/s/query/cluster_find.h
+++ b/src/mongo/s/query/cluster_find.h
@@ -33,7 +33,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/cursor_id.h"
#include "mongo/db/query/cursor_response.h"
-#include "mongo/db/query/explain_common.h"
+#include "mongo/db/query/explain_options.h"
namespace mongo {
diff --git a/src/mongo/shell/explainable.js b/src/mongo/shell/explainable.js
index 536f0b27d32..2d6c9c6b0dd 100644
--- a/src/mongo/shell/explainable.js
+++ b/src/mongo/shell/explainable.js
@@ -105,9 +105,20 @@ var Explainable = (function() {
// Add the explain option.
extraOpts = extraOpts || {};
- extraOpts.explain = true;
- return this._collection.aggregate(pipeline, extraOpts);
+ // For compatibility with 3.4 and older versions, when the verbosity is "queryPlanner",
+ // we use the explain option to the aggregate command. Otherwise we issue an explain
+ // command wrapping the agg command, which is supported by newer versions of the server.
+ if (this._verbosity === "queryPlanner") {
+ extraOpts.explain = true;
+ return this._collection.aggregate(pipeline, extraOpts);
+ } else {
+ let aggCmd = Object.extend(
+ {"aggregate": this._collection.getName(), "pipeline": pipeline}, extraOpts);
+ let explainCmd = {"explain": aggCmd, "verbosity": this._verbosity};
+ let explainResult = this._collection.runReadCommand(explainCmd);
+ return throwOrReturn(explainResult);
+ }
};
this.count = function(query, options) {