summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/commands.cpp2
-rw-r--r--src/mongo/db/commands.h2
-rw-r--r--src/mongo/db/commands/SConscript7
-rw-r--r--src/mongo/db/commands/count_cmd.cpp19
-rw-r--r--src/mongo/db/commands/distinct.cpp16
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp9
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp2
-rw-r--r--src/mongo/db/commands/find_cmd.cpp16
-rw-r--r--src/mongo/db/commands/group_cmd.cpp2
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp539
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp511
-rw-r--r--src/mongo/db/commands/run_aggregate.h53
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp4
-rw-r--r--src/mongo/db/pipeline/SConscript6
-rw-r--r--src/mongo/db/pipeline/aggregation_request.cpp51
-rw-r--r--src/mongo/db/pipeline/aggregation_request.h30
-rw-r--r--src/mongo/db/pipeline/aggregation_request_test.cpp105
-rw-r--r--src/mongo/db/pipeline/document_source.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source.h19
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto.cpp9
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto.h2
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto_test.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_coll_stats.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_coll_stats.h2
-rw-r--r--src/mongo/db/pipeline/document_source_count_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp9
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.h2
-rw-r--r--src/mongo/db/pipeline/document_source_facet.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_facet.h2
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near.h2
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.h6
-rw-r--r--src/mongo/db/pipeline/document_source_group.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_group.h2
-rw-r--r--src/mongo/db/pipeline/document_source_index_stats.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_index_stats.h2
-rw-r--r--src/mongo/db/pipeline/document_source_limit.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_limit.h2
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.h6
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_match.h2
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors.h2
-rw-r--r--src/mongo/db/pipeline/document_source_mock.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_mock.h3
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_out.h2
-rw-r--r--src/mongo/db/pipeline/document_source_redact.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_redact.h2
-rw-r--r--src/mongo/db/pipeline/document_source_replace_root.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_sample.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_sample.h2
-rw-r--r--src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_sample_from_random_cursor.h2
-rw-r--r--src/mongo/db/pipeline/document_source_sample_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_single_document_transformation.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_single_document_transformation.h4
-rw-r--r--src/mongo/db/pipeline/document_source_skip.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_skip.h2
-rw-r--r--src/mongo/db/pipeline/document_source_sort.cpp17
-rw-r--r--src/mongo/db/pipeline/document_source_sort.h6
-rw-r--r--src/mongo/db/pipeline/document_source_sort_by_count_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_tee_consumer.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_tee_consumer.h2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.h2
-rw-r--r--src/mongo/db/pipeline/expression_context.cpp4
-rw-r--r--src/mongo/db/pipeline/expression_context.h6
-rw-r--r--src/mongo/db/pipeline/parsed_add_fields.h2
-rw-r--r--src/mongo/db/pipeline/parsed_add_fields_test.cpp45
-rw-r--r--src/mongo/db/pipeline/parsed_exclusion_projection.cpp3
-rw-r--r--src/mongo/db/pipeline/parsed_exclusion_projection.h2
-rw-r--r--src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp2
-rw-r--r--src/mongo/db/pipeline/parsed_inclusion_projection.cpp5
-rw-r--r--src/mongo/db/pipeline/parsed_inclusion_projection.h5
-rw-r--r--src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp36
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp4
-rw-r--r--src/mongo/db/pipeline/pipeline.h7
-rw-r--r--src/mongo/db/pipeline/pipeline_test.cpp11
-rw-r--r--src/mongo/db/query/SConscript22
-rw-r--r--src/mongo/db/query/count_request.cpp4
-rw-r--r--src/mongo/db/query/count_request_test.cpp6
-rw-r--r--src/mongo/db/query/explain.cpp68
-rw-r--r--src/mongo/db/query/explain.h20
-rw-r--r--src/mongo/db/query/explain_common.cpp75
-rw-r--r--src/mongo/db/query/explain_common.h76
-rw-r--r--src/mongo/db/query/explain_options.cpp93
-rw-r--r--src/mongo/db/query/explain_options.h89
-rw-r--r--src/mongo/db/query/explain_options_test.cpp95
-rw-r--r--src/mongo/db/query/find.cpp3
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp3
-rw-r--r--src/mongo/db/query/parsed_distinct_test.cpp10
-rw-r--r--src/mongo/db/query/query_request.cpp3
-rw-r--r--src/mongo/db/query/query_request_test.cpp14
-rw-r--r--src/mongo/db/views/resolved_view.cpp62
-rw-r--r--src/mongo/db/views/resolved_view.h8
-rw-r--r--src/mongo/db/views/resolved_view_test.cpp111
-rw-r--r--src/mongo/db/views/view_sharding_check.cpp8
-rw-r--r--src/mongo/db/views/view_sharding_check.h7
102 files changed, 1445 insertions, 1029 deletions
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index e726bb3918a..12e60527080 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -146,7 +146,7 @@ void Command::help(stringstream& help) const {
Status Command::explain(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const {
return {ErrorCodes::IllegalOperation, str::stream() << "Cannot explain cmd: " << getName()};
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index 832a8e7c4b8..d185c0cf6d6 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -217,7 +217,7 @@ public:
virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) const;
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index 48638bcd726..0a3b1442caf 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -35,15 +35,15 @@ env.Library(
"fail_point_cmd.cpp",
"feature_compatibility_version_command_parser.cpp",
"find_and_modify_common.cpp",
+ "generic.cpp",
"hashcmd.cpp",
"isself.cpp",
"mr_common.cpp",
+ "parameters.cpp",
"rename_collection_common.cpp",
"server_status.cpp",
- "parameters.cpp",
"user_management_commands_common.cpp",
"write_commands/write_commands_common.cpp",
- "generic.cpp",
],
LIBDEPS=[
'$BUILD_DIR/mongo/client/clientdriver',
@@ -92,13 +92,13 @@ env.Library(
"clone.cpp",
"clone_collection.cpp",
"collection_to_capped.cpp",
- "dbcommands.cpp",
"compact.cpp",
"copydb.cpp",
"copydb_start_commands.cpp",
"count_cmd.cpp",
"create_indexes.cpp",
"current_op.cpp",
+ "dbcommands.cpp",
"dbhash.cpp",
"distinct.cpp",
"driverHelpers.cpp",
@@ -128,6 +128,7 @@ env.Library(
"plan_cache_commands.cpp",
"rename_collection_cmd.cpp",
"repair_cursor.cpp",
+ "run_aggregate.cpp",
"set_feature_compatibility_version_command.cpp",
"snapshot_management.cpp",
"test_commands.cpp",
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 7ecbe2f08ea..b8418b0bff3 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -30,9 +30,9 @@
#include "mongo/platform/basic.h"
-
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
+#include "mongo/db/commands/run_aggregate.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/count.h"
@@ -104,7 +104,7 @@ public:
virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const {
const bool isExplain = true;
@@ -133,10 +133,17 @@ public:
return viewAggregation.getStatus();
}
- std::string errmsg;
- (void)Command::findCommand("aggregate")
- ->run(opCtx, dbname, viewAggregation.getValue(), 0, errmsg, *out);
- return Status::OK();
+ auto viewAggRequest = AggregationRequest::parseFromBSON(
+ request.getValue().getNs(), viewAggregation.getValue(), verbosity);
+ if (!viewAggRequest.isOK()) {
+ return viewAggRequest.getStatus();
+ }
+
+ return runAggregate(opCtx,
+ viewAggRequest.getValue().getNamespaceString(),
+ viewAggRequest.getValue(),
+ viewAggregation.getValue(),
+ *out);
}
// Prevent chunks from being cleaned up during yields - this allows us to only check the
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index febed50512f..cec31d5f9a5 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/client.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
+#include "mongo/db/commands/run_aggregate.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/jsobj.h"
@@ -114,7 +115,7 @@ public:
virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
@@ -143,10 +144,15 @@ public:
if (!viewAggregation.isOK()) {
return viewAggregation.getStatus();
}
- std::string errmsg;
- (void)Command::findCommand("aggregate")
- ->run(opCtx, dbname, viewAggregation.getValue(), 0, errmsg, *out);
- return Status::OK();
+
+ auto viewAggRequest =
+ AggregationRequest::parseFromBSON(nss, viewAggregation.getValue(), verbosity);
+ if (!viewAggRequest.isOK()) {
+ return viewAggRequest.getStatus();
+ }
+
+ return runAggregate(
+ opCtx, nss, viewAggRequest.getValue(), viewAggregation.getValue(), *out);
}
auto executor = getExecutorDistinct(
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 7c72c26977f..1f431ec6f6e 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -114,10 +114,9 @@ public:
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- ExplainCommon::Verbosity verbosity;
- Status parseStatus = ExplainCommon::parseCmdBSON(cmdObj, &verbosity);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus);
+ auto verbosity = ExplainOptions::parseCmdBSON(cmdObj);
+ if (!verbosity.isOK()) {
+ return appendCommandStatus(result, verbosity.getStatus());
}
// This is the nested command which we are explaining.
@@ -157,7 +156,7 @@ public:
Status explainStatus = commToExplain->explain(opCtx,
dbname,
explainObj,
- verbosity,
+ verbosity.getValue(),
rpc::ServerSelectionMetadata::get(opCtx),
&result);
if (!explainStatus.isOK()) {
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 3aa7eb2018c..a2deb9db62a 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -252,7 +252,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const override {
const NamespaceString fullNs = parseNsCollectionRequired(dbName, cmdObj);
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index c3d8e88b227..5bb043d438a 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/client.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
+#include "mongo/db/commands/run_aggregate.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/matcher/extensions_callback_real.h"
@@ -132,7 +133,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const override {
const NamespaceString nss(parseNs(dbname, cmdObj));
@@ -180,11 +181,17 @@ public:
if (!viewAggregationCommand.isOK())
return viewAggregationCommand.getStatus();
- Command* agg = Command::findCommand("aggregate");
- std::string errmsg;
+ // Create the agg request equivalent of the find operation, with the explain verbosity
+ // included.
+ auto aggRequest = AggregationRequest::parseFromBSON(
+ nss, viewAggregationCommand.getValue(), verbosity);
+ if (!aggRequest.isOK()) {
+ return aggRequest.getStatus();
+ }
try {
- agg->run(opCtx, dbname, viewAggregationCommand.getValue(), 0, errmsg, *out);
+ return runAggregate(
+ opCtx, nss, aggRequest.getValue(), viewAggregationCommand.getValue(), *out);
} catch (DBException& error) {
if (error.getCode() == ErrorCodes::InvalidPipelineOperator) {
return {ErrorCodes::InvalidPipelineOperator,
@@ -192,7 +199,6 @@ public:
}
return error.toStatus();
}
- return Status::OK();
}
// The collection may be NULL. If so, getExecutor() should handle it by returning an
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index cf42c368e75..a9e6eeca6ca 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -123,7 +123,7 @@ private:
virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const {
GroupRequest groupRequest;
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 4fcfb97f574..47db0e450c6 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -26,270 +26,21 @@
* it in the license file.
*/
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
-
#include "mongo/platform/basic.h"
-#include <boost/optional.hpp>
-#include <deque>
-#include <vector>
-
#include "mongo/base/init.h"
-#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/privilege.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
-#include "mongo/db/curop.h"
-#include "mongo/db/db_raii.h"
-#include "mongo/db/exec/pipeline_proxy.h"
-#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/pipeline/accumulator.h"
-#include "mongo/db/pipeline/aggregation_request.h"
-#include "mongo/db/pipeline/document.h"
-#include "mongo/db/pipeline/document_source.h"
-#include "mongo/db/pipeline/expression.h"
-#include "mongo/db/pipeline/expression_context.h"
-#include "mongo/db/pipeline/lite_parsed_pipeline.h"
+#include "mongo/db/commands/run_aggregate.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/pipeline/pipeline.h"
-#include "mongo/db/pipeline/pipeline_d.h"
-#include "mongo/db/query/collation/collator_factory_interface.h"
-#include "mongo/db/query/cursor_response.h"
-#include "mongo/db/query/find_common.h"
-#include "mongo/db/query/get_executor.h"
-#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/db/server_options.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/storage/storage_options.h"
-#include "mongo/db/views/view.h"
-#include "mongo/db/views/view_catalog.h"
-#include "mongo/db/views/view_sharding_check.h"
-#include "mongo/stdx/memory.h"
-#include "mongo/util/log.h"
-#include "mongo/util/string_map.h"
namespace mongo {
-
-using boost::intrusive_ptr;
-using std::endl;
-using std::shared_ptr;
-using std::string;
-using std::stringstream;
-using std::unique_ptr;
-using stdx::make_unique;
-
namespace {
-/**
- * Returns true if we need to keep a ClientCursor saved for this pipeline (for future getMore
- * requests). Otherwise, returns false. The passed 'nsForCursor' is only used to determine the
- * namespace used in the returned cursor. In the case of views, this can be different from that
- * in 'request'.
- */
-bool handleCursorCommand(OperationContext* opCtx,
- const string& nsForCursor,
- ClientCursor* cursor,
- PlanExecutor* exec,
- const AggregationRequest& request,
- BSONObjBuilder& result) {
- if (cursor) {
- invariant(cursor->getExecutor() == exec);
- invariant(cursor->isAggCursor());
- }
-
- long long batchSize = request.getBatchSize();
-
- // can't use result BSONObjBuilder directly since it won't handle exceptions correctly.
- BSONArrayBuilder resultsArray;
- BSONObj next;
- for (int objCount = 0; objCount < batchSize; objCount++) {
- // The initial getNext() on a PipelineProxyStage may be very expensive so we don't
- // do it when batchSize is 0 since that indicates a desire for a fast return.
- PlanExecutor::ExecState state;
- if ((state = exec->getNext(&next, NULL)) == PlanExecutor::IS_EOF) {
- // make it an obvious error to use cursor or executor after this point
- cursor = NULL;
- exec = NULL;
- break;
- }
-
- uassert(34426,
- "Plan executor error during aggregation: " + WorkingSetCommon::toStatusString(next),
- PlanExecutor::ADVANCED == state);
-
- // If adding this object will cause us to exceed the message size limit, then we stash it
- // for later.
- if (!FindCommon::haveSpaceForNext(next, objCount, resultsArray.len())) {
- exec->enqueue(next);
- break;
- }
-
- resultsArray.append(next);
- }
-
- // NOTE: exec->isEOF() can have side effects such as writing by $out. However, it should
- // be relatively quick since if there was no cursor then the input is empty. Also, this
- // violates the contract for batchSize==0. Sharding requires a cursor to be returned in that
- // case. This is ok for now however, since you can't have a sharded collection that doesn't
- // exist.
- const bool canReturnMoreBatches = cursor;
- if (!canReturnMoreBatches && exec && !exec->isEOF()) {
- // msgasserting since this shouldn't be possible to trigger from today's aggregation
- // language. The wording assumes that the only reason cursor would be null is if the
- // collection doesn't exist.
- msgasserted(
- 17391,
- str::stream() << "Aggregation has more results than fit in initial batch, but can't "
- << "create cursor since collection "
- << nsForCursor
- << " doesn't exist");
- }
-
- if (cursor) {
- // If a time limit was set on the pipeline, remaining time is "rolled over" to the
- // cursor (for use by future getmore ops).
- cursor->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
-
- CurOp::get(opCtx)->debug().cursorid = cursor->cursorid();
-
- // Cursor needs to be in a saved state while we yield locks for getmore. State
- // will be restored in getMore().
- exec->saveState();
- exec->detachFromOperationContext();
- } else {
- CurOp::get(opCtx)->debug().cursorExhausted = true;
- }
-
- const long long cursorId = cursor ? cursor->cursorid() : 0LL;
- appendCursorResponseObject(cursorId, nsForCursor, resultsArray.arr(), &result);
-
- return static_cast<bool>(cursor);
-}
-
-StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNamespaces(
- OperationContext* opCtx, const AggregationRequest& request) {
- // We intentionally do not drop and reacquire our DB lock after resolving the view definition in
- // order to prevent the definition for any view namespaces we've already resolved from changing.
- // This is necessary to prevent a cycle from being formed among the view definitions cached in
- // 'resolvedNamespaces' because we won't re-resolve a view namespace we've already encountered.
- AutoGetDb autoDb(opCtx, request.getNamespaceString().db(), MODE_IS);
- Database* const db = autoDb.getDb();
- ViewCatalog* viewCatalog = db ? db->getViewCatalog() : nullptr;
-
- const LiteParsedPipeline liteParsedPipeline(request);
- const auto& pipelineInvolvedNamespaces = liteParsedPipeline.getInvolvedNamespaces();
- std::deque<NamespaceString> involvedNamespacesQueue(pipelineInvolvedNamespaces.begin(),
- pipelineInvolvedNamespaces.end());
- StringMap<ExpressionContext::ResolvedNamespace> resolvedNamespaces;
-
- while (!involvedNamespacesQueue.empty()) {
- auto involvedNs = std::move(involvedNamespacesQueue.front());
- involvedNamespacesQueue.pop_front();
-
- if (resolvedNamespaces.find(involvedNs.coll()) != resolvedNamespaces.end()) {
- continue;
- }
-
- if (!db || db->getCollection(involvedNs.ns())) {
- // If the database exists and 'involvedNs' refers to a collection namespace, then we
- // resolve it as an empty pipeline in order to read directly from the underlying
- // collection. If the database doesn't exist, then we still resolve it as an empty
- // pipeline because 'involvedNs' doesn't refer to a view namespace in our consistent
- // snapshot of the view catalog.
- resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
- } else if (viewCatalog->lookup(opCtx, involvedNs.ns())) {
- // If 'involvedNs' refers to a view namespace, then we resolve its definition.
- auto resolvedView = viewCatalog->resolveView(opCtx, involvedNs);
- if (!resolvedView.isOK()) {
- return {ErrorCodes::FailedToParse,
- str::stream() << "Failed to resolve view '" << involvedNs.ns() << "': "
- << resolvedView.getStatus().toString()};
- }
-
- resolvedNamespaces[involvedNs.coll()] = {resolvedView.getValue().getNamespace(),
- resolvedView.getValue().getPipeline()};
-
- // We parse the pipeline corresponding to the resolved view in case we must resolve
- // other view namespaces that are also involved.
- LiteParsedPipeline resolvedViewLitePipeline(
- {resolvedView.getValue().getNamespace(), resolvedView.getValue().getPipeline()});
-
- const auto& resolvedViewInvolvedNamespaces =
- resolvedViewLitePipeline.getInvolvedNamespaces();
- involvedNamespacesQueue.insert(involvedNamespacesQueue.end(),
- resolvedViewInvolvedNamespaces.begin(),
- resolvedViewInvolvedNamespaces.end());
- } else {
- // 'involvedNs' is neither a view nor a collection, so resolve it as an empty pipeline
- // to treat it as reading from a non-existent collection.
- resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
- }
- }
-
- return resolvedNamespaces;
-}
-
-/**
- * Round trips the pipeline through serialization by calling serialize(), then Pipeline::parse().
- * fasserts if it fails to parse after being serialized.
- */
-boost::intrusive_ptr<Pipeline> reparsePipeline(
- const boost::intrusive_ptr<Pipeline>& pipeline,
- const AggregationRequest& request,
- const boost::intrusive_ptr<ExpressionContext>& expCtx) {
- auto serialized = pipeline->serialize();
-
- // Convert vector<Value> to vector<BSONObj>.
- std::vector<BSONObj> parseableSerialization;
- parseableSerialization.reserve(serialized.size());
- for (auto&& serializedStage : serialized) {
- invariant(serializedStage.getType() == BSONType::Object);
- parseableSerialization.push_back(serializedStage.getDocument().toBson());
- }
-
- auto reparsedPipeline = Pipeline::parse(parseableSerialization, expCtx);
- if (!reparsedPipeline.isOK()) {
- error() << "Aggregation command did not round trip through parsing and serialization "
- "correctly. Input pipeline: "
- << Value(request.getPipeline()) << ", serialized pipeline: " << Value(serialized);
- fassertFailedWithStatusNoTrace(40175, reparsedPipeline.getStatus());
- }
-
- reparsedPipeline.getValue()->optimizePipeline();
- return reparsedPipeline.getValue();
-}
-
-/**
- * Returns Status::OK if each view namespace in 'pipeline' has a default collator equivalent to
- * 'collator'. Otherwise, returns ErrorCodes::OptionNotSupportedOnView.
- */
-Status collatorCompatibleWithPipeline(OperationContext* opCtx,
- Database* db,
- const CollatorInterface* collator,
- const intrusive_ptr<Pipeline> pipeline) {
- if (!db || !pipeline) {
- return Status::OK();
- }
- for (auto&& potentialViewNs : pipeline->getInvolvedCollections()) {
- if (db->getCollection(potentialViewNs.ns())) {
- continue;
- }
-
- auto view = db->getViewCatalog()->lookup(opCtx, potentialViewNs.ns());
- if (!view) {
- continue;
- }
- if (!CollatorInterface::collatorsMatch(view->defaultCollator(), collator)) {
- return {ErrorCodes::OptionNotSupportedOnView,
- str::stream() << "Cannot override default collation of view "
- << potentialViewNs.ns()};
- }
- }
- return Status::OK();
-}
-
bool isMergePipeline(const std::vector<BSONObj>& pipeline) {
if (pipeline.empty()) {
return false;
@@ -323,12 +74,12 @@ public:
return ReadWriteType::kRead;
}
- virtual void help(stringstream& help) const {
- help << "{ pipeline: [ { $operator: {...}}, ... ]"
- << ", explain: <bool>"
+ virtual void help(std::stringstream& help) const {
+ help << "{ pipeline: [ { $stage: {...} }, ... ]"
<< ", allowDiskUse: <bool>"
<< ", cursor: {batchSize: <number>}"
- << " }" << endl
+ << ", ..."
+ << " }" << std::endl
<< "See http://dochub.mongodb.org/core/aggregation for more details.";
}
@@ -339,252 +90,11 @@ public:
return AuthorizationSession::get(client)->checkAuthForAggregate(nss, cmdObj);
}
- bool runParsed(OperationContext* opCtx,
- const NamespaceString& origNss,
- const AggregationRequest& request,
- BSONObj& cmdObj,
- string& errmsg,
- BSONObjBuilder& result) {
- // For operations on views, this will be the underlying namespace.
- const NamespaceString& nss = request.getNamespaceString();
-
- // Parse the user-specified collation, if any.
- std::unique_ptr<CollatorInterface> userSpecifiedCollator = request.getCollation().isEmpty()
- ? nullptr
- : uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
- ->makeFromBSON(request.getCollation()));
-
- boost::optional<ClientCursorPin> pin; // either this OR the exec will be non-null
- unique_ptr<PlanExecutor> exec;
- boost::intrusive_ptr<ExpressionContext> expCtx;
- boost::intrusive_ptr<Pipeline> pipeline;
- auto curOp = CurOp::get(opCtx);
- {
- // This will throw if the sharding version for this connection is out of date. If the
- // namespace is a view, the lock will be released before re-running the aggregation.
- // Otherwise, the lock must be held continuously from now until we have we created both
- // the output ClientCursor and the input executor. This ensures that both are using the
- // same sharding version that we synchronize on here. This is also why we always need to
- // create a ClientCursor even when we aren't outputting to a cursor. See the comment on
- // ShardFilterStage for more details.
- AutoGetCollectionOrViewForRead ctx(opCtx, nss);
- Collection* collection = ctx.getCollection();
-
- // If this is a view, resolve it by finding the underlying collection and stitching view
- // pipelines and this request's pipeline together. We then release our locks before
- // recursively calling run, which will re-acquire locks on the underlying collection.
- // (The lock must be released because recursively acquiring locks on the database will
- // prohibit yielding.)
- const LiteParsedPipeline liteParsedPipeline(request);
- if (ctx.getView() && !liteParsedPipeline.startsWithCollStats()) {
- // Check that the default collation of 'view' is compatible with the operation's
- // collation. The check is skipped if the 'request' has the empty collation, which
- // means that no collation was specified.
- if (!request.getCollation().isEmpty()) {
- if (!CollatorInterface::collatorsMatch(ctx.getView()->defaultCollator(),
- userSpecifiedCollator.get())) {
- return appendCommandStatus(result,
- {ErrorCodes::OptionNotSupportedOnView,
- "Cannot override a view's default collation"});
- }
- }
-
- auto viewDefinition =
- ViewShardingCheck::getResolvedViewIfSharded(opCtx, ctx.getDb(), ctx.getView());
- if (!viewDefinition.isOK()) {
- return appendCommandStatus(result, viewDefinition.getStatus());
- }
-
- if (!viewDefinition.getValue().isEmpty()) {
- ViewShardingCheck::appendShardedViewStatus(viewDefinition.getValue(), &result);
- return false;
- }
-
- auto resolvedView = ctx.getDb()->getViewCatalog()->resolveView(opCtx, nss);
- if (!resolvedView.isOK()) {
- return appendCommandStatus(result, resolvedView.getStatus());
- }
-
- auto collationSpec = ctx.getView()->defaultCollator()
- ? ctx.getView()->defaultCollator()->getSpec().toBSON().getOwned()
- : CollationSpec::kSimpleSpec;
-
- // With the view & collation resolved, we can relinquish locks.
- ctx.releaseLocksForView();
-
- // Parse the resolved view into a new aggregation request.
- auto newCmd = resolvedView.getValue().asExpandedViewAggregation(request);
- if (!newCmd.isOK()) {
- return appendCommandStatus(result, newCmd.getStatus());
- }
- auto newNss = resolvedView.getValue().getNamespace();
- auto newRequest = AggregationRequest::parseFromBSON(newNss, newCmd.getValue());
- if (!newRequest.isOK()) {
- return appendCommandStatus(result, newRequest.getStatus());
- }
- newRequest.getValue().setCollation(collationSpec);
-
- bool status = runParsed(
- opCtx, origNss, newRequest.getValue(), newCmd.getValue(), errmsg, result);
- {
- // Set the namespace of the curop back to the view namespace so ctx records
- // stats on this view namespace on destruction.
- stdx::lock_guard<Client> lk(*opCtx->getClient());
- curOp->setNS_inlock(nss.ns());
- }
- return status;
- }
-
- // Determine the appropriate collation to make the ExpressionContext.
-
- // If the pipeline does not have a user-specified collation, set it from the collection
- // default. Be careful to consult the original request BSON to check if a collation was
- // specified, since a specification of {locale: "simple"} will result in a null
- // collator.
- auto collatorToUse = std::move(userSpecifiedCollator);
- if (request.getCollation().isEmpty() && collection &&
- collection->getDefaultCollator()) {
- invariant(!collatorToUse);
- collatorToUse = collection->getDefaultCollator()->clone();
- }
-
- expCtx.reset(
- new ExpressionContext(opCtx,
- request,
- std::move(collatorToUse),
- uassertStatusOK(resolveInvolvedNamespaces(opCtx, request))));
- expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
-
- // Parse the pipeline.
- auto statusWithPipeline = Pipeline::parse(request.getPipeline(), expCtx);
- if (!statusWithPipeline.isOK()) {
- return appendCommandStatus(result, statusWithPipeline.getStatus());
- }
- pipeline = std::move(statusWithPipeline.getValue());
-
- // Check that the view's collation matches the collation of any views involved
- // in the pipeline.
- auto pipelineCollationStatus =
- collatorCompatibleWithPipeline(opCtx, ctx.getDb(), expCtx->getCollator(), pipeline);
- if (!pipelineCollationStatus.isOK()) {
- return appendCommandStatus(result, pipelineCollationStatus);
- }
-
- pipeline->optimizePipeline();
-
- if (kDebugBuild && !expCtx->isExplain && !expCtx->inShard) {
- // Make sure all operations round-trip through Pipeline::serialize() correctly by
- // re-parsing every command in debug builds. This is important because sharded
- // aggregations rely on this ability. Skipping when inShard because this has
- // already been through the transformation (and this un-sets expCtx->inShard).
- pipeline = reparsePipeline(pipeline, request, expCtx);
- }
-
- // This does mongod-specific stuff like creating the input PlanExecutor and adding
- // it to the front of the pipeline if needed.
- PipelineD::prepareCursorSource(collection, &request, pipeline);
-
- // Create the PlanExecutor which returns results from the pipeline. The WorkingSet
- // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
- // PlanExecutor.
- auto ws = make_unique<WorkingSet>();
- auto proxy = make_unique<PipelineProxyStage>(opCtx, pipeline, ws.get());
-
- auto statusWithPlanExecutor = (NULL == collection)
- ? PlanExecutor::make(
- opCtx, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
- : PlanExecutor::make(opCtx,
- std::move(ws),
- std::move(proxy),
- collection,
- PlanExecutor::YIELD_MANUAL);
- invariant(statusWithPlanExecutor.isOK());
- exec = std::move(statusWithPlanExecutor.getValue());
-
- {
- auto planSummary = Explain::getPlanSummary(exec.get());
- stdx::lock_guard<Client> lk(*opCtx->getClient());
- curOp->setPlanSummary_inlock(std::move(planSummary));
- }
-
- if (collection) {
- const bool isAggCursor = true; // enable special locking behavior
- pin.emplace(collection->getCursorManager()->registerCursor(
- {exec.release(),
- nss.ns(),
- opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
- 0,
- cmdObj.getOwned(),
- isAggCursor}));
- // Don't add any code between here and the start of the try block.
- }
-
- // At this point, it is safe to release the collection lock.
- // - In the case where we have a collection: we will need to reacquire the
- // collection lock later when cleaning up our ClientCursorPin.
- // - In the case where we don't have a collection: our PlanExecutor won't be
- // registered, so it will be safe to clean it up outside the lock.
- invariant(!exec || !collection);
- }
-
- try {
- // Unless set to true, the ClientCursor created above will be deleted on block exit.
- bool keepCursor = false;
-
- // If both explain and cursor are specified, explain wins.
- if (expCtx->isExplain) {
- result << "stages" << Value(pipeline->writeExplainOps());
- } else {
- // Cursor must be specified, if explain is not.
- keepCursor = handleCursorCommand(opCtx,
- origNss.ns(),
- pin ? pin->getCursor() : nullptr,
- pin ? pin->getCursor()->getExecutor() : exec.get(),
- request,
- result);
- }
-
- if (!expCtx->isExplain) {
- PlanSummaryStats stats;
- Explain::getSummaryStats(pin ? *pin->getCursor()->getExecutor() : *exec.get(),
- &stats);
- curOp->debug().setPlanSummaryMetrics(stats);
- curOp->debug().nreturned = stats.nReturned;
- }
-
- // Clean up our ClientCursorPin, if needed. We must reacquire the collection lock
- // in order to do so.
- if (pin) {
- // We acquire locks here with DBLock and CollectionLock instead of using
- // AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
- // sharding version is out of date, and we don't care if the sharding version
- // has changed.
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
- if (keepCursor) {
- pin->release();
- } else {
- pin->deleteUnderlying();
- }
- }
- } catch (...) {
- // On our way out of scope, we clean up our ClientCursorPin if needed.
- if (pin) {
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
- pin->deleteUnderlying();
- }
- throw;
- }
- // Any code that needs the cursor pinned must be inside the try block, above.
- return appendCommandStatus(result, Status::OK());
- }
-
virtual bool run(OperationContext* opCtx,
- const string& db,
+ const std::string& db,
BSONObj& cmdObj,
int options,
- string& errmsg,
+ std::string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(db, cmdObj));
@@ -610,7 +120,38 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- return runParsed(opCtx, nss, request.getValue(), cmdObj, errmsg, result);
+ return appendCommandStatus(result,
+ runAggregate(opCtx, nss, request.getValue(), cmdObj, result));
+ }
+
+ Status explain(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ ExplainOptions::Verbosity verbosity,
+ const rpc::ServerSelectionMetadata& serverSelectionMetadata,
+ BSONObjBuilder* out) const override {
+ const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+
+ // Parse the options for this request, supplying the explain verbosity separately.
+ auto request = AggregationRequest::parseFromBSON(nss, cmdObj, verbosity);
+ if (!request.isOK()) {
+ return request.getStatus();
+ }
+ // If the featureCompatibilityVersion is 3.2, we disallow collation from the user. However,
+ // operations should still respect the collection default collation. The mongos attaches the
+ // collection default collation to the merger pipeline, since the merger may not have the
+ // collection metadata. So the merger needs to accept a collation, and we rely on the shards
+ // to reject collations from the user.
+ if (!request.getValue().getCollation().isEmpty() &&
+ serverGlobalParams.featureCompatibility.version.load() ==
+ ServerGlobalParams::FeatureCompatibility::Version::k32 &&
+ !isMergePipeline(request.getValue().getPipeline())) {
+ return Status(ErrorCodes::InvalidOptions,
+ "The featureCompatibilityVersion must be 3.4 to use collation. See "
+ "http://dochub.mongodb.org/core/3.4-feature-compatibility.");
+ }
+
+ return runAggregate(opCtx, nss, request.getValue(), cmdObj, *out);
}
};
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
new file mode 100644
index 00000000000..684b954b0c5
--- /dev/null
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -0,0 +1,511 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/commands/run_aggregate.h"
+
+#include <boost/optional.hpp>
+#include <vector>
+
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/exec/pipeline_proxy.h"
+#include "mongo/db/exec/working_set_common.h"
+#include "mongo/db/pipeline/accumulator.h"
+#include "mongo/db/pipeline/document.h"
+#include "mongo/db/pipeline/document_source.h"
+#include "mongo/db/pipeline/expression.h"
+#include "mongo/db/pipeline/expression_context.h"
+#include "mongo/db/pipeline/lite_parsed_pipeline.h"
+#include "mongo/db/pipeline/pipeline.h"
+#include "mongo/db/pipeline/pipeline_d.h"
+#include "mongo/db/query/collation/collator_factory_interface.h"
+#include "mongo/db/query/cursor_response.h"
+#include "mongo/db/query/find_common.h"
+#include "mongo/db/query/get_executor.h"
+#include "mongo/db/query/plan_summary_stats.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/storage/storage_options.h"
+#include "mongo/db/views/view.h"
+#include "mongo/db/views/view_catalog.h"
+#include "mongo/db/views/view_sharding_check.h"
+#include "mongo/stdx/memory.h"
+#include "mongo/util/log.h"
+#include "mongo/util/string_map.h"
+
+namespace mongo {
+
+using boost::intrusive_ptr;
+using std::endl;
+using std::shared_ptr;
+using std::string;
+using std::stringstream;
+using std::unique_ptr;
+using stdx::make_unique;
+
+namespace {
+/**
+ * Returns true if we need to keep a ClientCursor saved for this pipeline (for future getMore
+ * requests). Otherwise, returns false. The passed 'nsForCursor' is only used to determine the
+ * namespace used in the returned cursor. In the case of views, this can be different from that
+ * in 'request'.
+ */
+bool handleCursorCommand(OperationContext* opCtx,
+ const string& nsForCursor,
+ ClientCursor* cursor,
+ PlanExecutor* exec,
+ const AggregationRequest& request,
+ BSONObjBuilder& result) {
+ if (cursor) {
+ invariant(cursor->getExecutor() == exec);
+ invariant(cursor->isAggCursor());
+ }
+
+ long long batchSize = request.getBatchSize();
+
+ // can't use result BSONObjBuilder directly since it won't handle exceptions correctly.
+ BSONArrayBuilder resultsArray;
+ BSONObj next;
+ for (int objCount = 0; objCount < batchSize; objCount++) {
+ // The initial getNext() on a PipelineProxyStage may be very expensive so we don't
+ // do it when batchSize is 0 since that indicates a desire for a fast return.
+ PlanExecutor::ExecState state;
+ if ((state = exec->getNext(&next, NULL)) == PlanExecutor::IS_EOF) {
+ // make it an obvious error to use cursor or executor after this point
+ cursor = NULL;
+ exec = NULL;
+ break;
+ }
+
+ uassert(34426,
+ "Plan executor error during aggregation: " + WorkingSetCommon::toStatusString(next),
+ PlanExecutor::ADVANCED == state);
+
+ // If adding this object will cause us to exceed the message size limit, then we stash it
+ // for later.
+ if (!FindCommon::haveSpaceForNext(next, objCount, resultsArray.len())) {
+ exec->enqueue(next);
+ break;
+ }
+
+ resultsArray.append(next);
+ }
+
+ // NOTE: exec->isEOF() can have side effects such as writing by $out. However, it should
+ // be relatively quick since if there was no cursor then the input is empty. Also, this
+ // violates the contract for batchSize==0. Sharding requires a cursor to be returned in that
+ // case. This is ok for now however, since you can't have a sharded collection that doesn't
+ // exist.
+ const bool canReturnMoreBatches = cursor;
+ if (!canReturnMoreBatches && exec && !exec->isEOF()) {
+ // msgasserting since this shouldn't be possible to trigger from today's aggregation
+ // language. The wording assumes that the only reason cursor would be null is if the
+ // collection doesn't exist.
+ msgasserted(
+ 17391,
+ str::stream() << "Aggregation has more results than fit in initial batch, but can't "
+ << "create cursor since collection "
+ << nsForCursor
+ << " doesn't exist");
+ }
+
+ if (cursor) {
+ // If a time limit was set on the pipeline, remaining time is "rolled over" to the
+ // cursor (for use by future getmore ops).
+ cursor->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
+
+ CurOp::get(opCtx)->debug().cursorid = cursor->cursorid();
+
+ // Cursor needs to be in a saved state while we yield locks for getmore. State
+ // will be restored in getMore().
+ exec->saveState();
+ exec->detachFromOperationContext();
+ } else {
+ CurOp::get(opCtx)->debug().cursorExhausted = true;
+ }
+
+ const long long cursorId = cursor ? cursor->cursorid() : 0LL;
+ appendCursorResponseObject(cursorId, nsForCursor, resultsArray.arr(), &result);
+
+ return static_cast<bool>(cursor);
+}
+
+StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNamespaces(
+ OperationContext* opCtx, const AggregationRequest& request) {
+ // We intentionally do not drop and reacquire our DB lock after resolving the view definition in
+ // order to prevent the definition for any view namespaces we've already resolved from changing.
+ // This is necessary to prevent a cycle from being formed among the view definitions cached in
+ // 'resolvedNamespaces' because we won't re-resolve a view namespace we've already encountered.
+ AutoGetDb autoDb(opCtx, request.getNamespaceString().db(), MODE_IS);
+ Database* const db = autoDb.getDb();
+ ViewCatalog* viewCatalog = db ? db->getViewCatalog() : nullptr;
+
+ const LiteParsedPipeline liteParsedPipeline(request);
+ const auto& pipelineInvolvedNamespaces = liteParsedPipeline.getInvolvedNamespaces();
+ std::deque<NamespaceString> involvedNamespacesQueue(pipelineInvolvedNamespaces.begin(),
+ pipelineInvolvedNamespaces.end());
+ StringMap<ExpressionContext::ResolvedNamespace> resolvedNamespaces;
+
+ while (!involvedNamespacesQueue.empty()) {
+ auto involvedNs = std::move(involvedNamespacesQueue.front());
+ involvedNamespacesQueue.pop_front();
+
+ if (resolvedNamespaces.find(involvedNs.coll()) != resolvedNamespaces.end()) {
+ continue;
+ }
+
+ if (!db || db->getCollection(involvedNs.ns())) {
+ // If the database exists and 'involvedNs' refers to a collection namespace, then we
+ // resolve it as an empty pipeline in order to read directly from the underlying
+ // collection. If the database doesn't exist, then we still resolve it as an empty
+ // pipeline because 'involvedNs' doesn't refer to a view namespace in our consistent
+ // snapshot of the view catalog.
+ resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
+ } else if (viewCatalog->lookup(opCtx, involvedNs.ns())) {
+ // If 'involvedNs' refers to a view namespace, then we resolve its definition.
+ auto resolvedView = viewCatalog->resolveView(opCtx, involvedNs);
+ if (!resolvedView.isOK()) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "Failed to resolve view '" << involvedNs.ns() << "': "
+ << resolvedView.getStatus().toString()};
+ }
+
+ resolvedNamespaces[involvedNs.coll()] = {resolvedView.getValue().getNamespace(),
+ resolvedView.getValue().getPipeline()};
+
+ // We parse the pipeline corresponding to the resolved view in case we must resolve
+ // other view namespaces that are also involved.
+ LiteParsedPipeline resolvedViewLitePipeline(
+ {resolvedView.getValue().getNamespace(), resolvedView.getValue().getPipeline()});
+
+ const auto& resolvedViewInvolvedNamespaces =
+ resolvedViewLitePipeline.getInvolvedNamespaces();
+ involvedNamespacesQueue.insert(involvedNamespacesQueue.end(),
+ resolvedViewInvolvedNamespaces.begin(),
+ resolvedViewInvolvedNamespaces.end());
+ } else {
+ // 'involvedNs' is neither a view nor a collection, so resolve it as an empty pipeline
+ // to treat it as reading from a non-existent collection.
+ resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
+ }
+ }
+
+ return resolvedNamespaces;
+}
+
+/**
+ * Round trips the pipeline through serialization by calling serialize(), then Pipeline::parse().
+ * fasserts if it fails to parse after being serialized.
+ */
+boost::intrusive_ptr<Pipeline> reparsePipeline(
+ const boost::intrusive_ptr<Pipeline>& pipeline,
+ const AggregationRequest& request,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx) {
+ auto serialized = pipeline->serialize();
+
+ // Convert vector<Value> to vector<BSONObj>.
+ std::vector<BSONObj> parseableSerialization;
+ parseableSerialization.reserve(serialized.size());
+ for (auto&& serializedStage : serialized) {
+ invariant(serializedStage.getType() == BSONType::Object);
+ parseableSerialization.push_back(serializedStage.getDocument().toBson());
+ }
+
+ auto reparsedPipeline = Pipeline::parse(parseableSerialization, expCtx);
+ if (!reparsedPipeline.isOK()) {
+ error() << "Aggregation command did not round trip through parsing and serialization "
+ "correctly. Input pipeline: "
+ << Value(request.getPipeline()) << ", serialized pipeline: " << Value(serialized);
+ fassertFailedWithStatusNoTrace(40175, reparsedPipeline.getStatus());
+ }
+
+ reparsedPipeline.getValue()->optimizePipeline();
+ return reparsedPipeline.getValue();
+}
+
+/**
+ * Returns Status::OK if each view namespace in 'pipeline' has a default collator equivalent to
+ * 'collator'. Otherwise, returns ErrorCodes::OptionNotSupportedOnView.
+ */
+Status collatorCompatibleWithPipeline(OperationContext* opCtx,
+ Database* db,
+ const CollatorInterface* collator,
+ const intrusive_ptr<Pipeline> pipeline) {
+ if (!db || !pipeline) {
+ return Status::OK();
+ }
+ for (auto&& potentialViewNs : pipeline->getInvolvedCollections()) {
+ if (db->getCollection(potentialViewNs.ns())) {
+ continue;
+ }
+
+ auto view = db->getViewCatalog()->lookup(opCtx, potentialViewNs.ns());
+ if (!view) {
+ continue;
+ }
+ if (!CollatorInterface::collatorsMatch(view->defaultCollator(), collator)) {
+ return {ErrorCodes::OptionNotSupportedOnView,
+ str::stream() << "Cannot override default collation of view "
+ << potentialViewNs.ns()};
+ }
+ }
+ return Status::OK();
+}
+} // namespace
+
+Status runAggregate(OperationContext* opCtx,
+ const NamespaceString& origNss,
+ const AggregationRequest& request,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) {
+ // For operations on views, this will be the underlying namespace.
+ const NamespaceString& nss = request.getNamespaceString();
+
+ // Parse the user-specified collation, if any.
+ std::unique_ptr<CollatorInterface> userSpecifiedCollator = request.getCollation().isEmpty()
+ ? nullptr
+ : uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
+ ->makeFromBSON(request.getCollation()));
+
+ boost::optional<ClientCursorPin> pin; // either this OR the exec will be non-null
+ unique_ptr<PlanExecutor> exec;
+ boost::intrusive_ptr<ExpressionContext> expCtx;
+ boost::intrusive_ptr<Pipeline> pipeline;
+ auto curOp = CurOp::get(opCtx);
+ {
+ // This will throw if the sharding version for this connection is out of date. If the
+ // namespace is a view, the lock will be released before re-running the aggregation.
+ // Otherwise, the lock must be held continuously from now until we have we created both
+ // the output ClientCursor and the input executor. This ensures that both are using the
+ // same sharding version that we synchronize on here. This is also why we always need to
+ // create a ClientCursor even when we aren't outputting to a cursor. See the comment on
+ // ShardFilterStage for more details.
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
+ Collection* collection = ctx.getCollection();
+
+ // If this is a view, resolve it by finding the underlying collection and stitching view
+ // pipelines and this request's pipeline together. We then release our locks before
+ // recursively calling runAggregate(), which will re-acquire locks on the underlying
+ // collection. (The lock must be released because recursively acquiring locks on the
+ // database will prohibit yielding.)
+ const LiteParsedPipeline liteParsedPipeline(request);
+ if (ctx.getView() && !liteParsedPipeline.startsWithCollStats()) {
+ // Check that the default collation of 'view' is compatible with the operation's
+ // collation. The check is skipped if the 'request' has the empty collation, which
+ // means that no collation was specified.
+ if (!request.getCollation().isEmpty()) {
+ if (!CollatorInterface::collatorsMatch(ctx.getView()->defaultCollator(),
+ userSpecifiedCollator.get())) {
+ return {ErrorCodes::OptionNotSupportedOnView,
+ "Cannot override a view's default collation"};
+ }
+ }
+
+ auto viewDefinition =
+ ViewShardingCheck::getResolvedViewIfSharded(opCtx, ctx.getDb(), ctx.getView());
+ if (!viewDefinition.isOK()) {
+ return viewDefinition.getStatus();
+ }
+
+ if (!viewDefinition.getValue().isEmpty()) {
+ return ViewShardingCheck::appendShardedViewResponse(viewDefinition.getValue(),
+ &result);
+ }
+
+ auto resolvedView = ctx.getDb()->getViewCatalog()->resolveView(opCtx, nss);
+ if (!resolvedView.isOK()) {
+ return resolvedView.getStatus();
+ }
+
+ auto collationSpec = ctx.getView()->defaultCollator()
+ ? ctx.getView()->defaultCollator()->getSpec().toBSON().getOwned()
+ : CollationSpec::kSimpleSpec;
+
+ // With the view & collation resolved, we can relinquish locks.
+ ctx.releaseLocksForView();
+
+ // Parse the resolved view into a new aggregation request.
+ auto newRequest = resolvedView.getValue().asExpandedViewAggregation(request);
+ newRequest.setCollation(collationSpec);
+ auto newCmd = newRequest.serializeToCommandObj().toBson();
+
+ auto status = runAggregate(opCtx, origNss, newRequest, newCmd, result);
+ {
+ // Set the namespace of the curop back to the view namespace so ctx records
+ // stats on this view namespace on destruction.
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ curOp->setNS_inlock(nss.ns());
+ }
+ return status;
+ }
+
+ // Determine the appropriate collation to make the ExpressionContext.
+
+ // If the pipeline does not have a user-specified collation, set it from the collection
+ // default. Be careful to consult the original request BSON to check if a collation was
+ // specified, since a specification of {locale: "simple"} will result in a null
+ // collator.
+ auto collatorToUse = std::move(userSpecifiedCollator);
+ if (request.getCollation().isEmpty() && collection && collection->getDefaultCollator()) {
+ invariant(!collatorToUse);
+ collatorToUse = collection->getDefaultCollator()->clone();
+ }
+
+ expCtx.reset(
+ new ExpressionContext(opCtx,
+ request,
+ std::move(collatorToUse),
+ uassertStatusOK(resolveInvolvedNamespaces(opCtx, request))));
+ expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
+
+ // Parse the pipeline.
+ auto statusWithPipeline = Pipeline::parse(request.getPipeline(), expCtx);
+ if (!statusWithPipeline.isOK()) {
+ return statusWithPipeline.getStatus();
+ }
+ pipeline = std::move(statusWithPipeline.getValue());
+
+ // Check that the view's collation matches the collation of any views involved
+ // in the pipeline.
+ auto pipelineCollationStatus =
+ collatorCompatibleWithPipeline(opCtx, ctx.getDb(), expCtx->getCollator(), pipeline);
+ if (!pipelineCollationStatus.isOK()) {
+ return pipelineCollationStatus;
+ }
+
+ pipeline->optimizePipeline();
+
+ if (kDebugBuild && !expCtx->explain && !expCtx->inShard) {
+ // Make sure all operations round-trip through Pipeline::serialize() correctly by
+ // re-parsing every command in debug builds. This is important because sharded
+ // aggregations rely on this ability. Skipping when inShard because this has
+ // already been through the transformation (and this un-sets expCtx->inShard).
+ pipeline = reparsePipeline(pipeline, request, expCtx);
+ }
+
+ // This does mongod-specific stuff like creating the input PlanExecutor and adding
+ // it to the front of the pipeline if needed.
+ PipelineD::prepareCursorSource(collection, &request, pipeline);
+
+ // Create the PlanExecutor which returns results from the pipeline. The WorkingSet
+ // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
+ // PlanExecutor.
+ auto ws = make_unique<WorkingSet>();
+ auto proxy = make_unique<PipelineProxyStage>(opCtx, pipeline, ws.get());
+
+ auto statusWithPlanExecutor = (NULL == collection)
+ ? PlanExecutor::make(
+ opCtx, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
+ : PlanExecutor::make(
+ opCtx, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
+ invariant(statusWithPlanExecutor.isOK());
+ exec = std::move(statusWithPlanExecutor.getValue());
+
+ {
+ auto planSummary = Explain::getPlanSummary(exec.get());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ curOp->setPlanSummary_inlock(std::move(planSummary));
+ }
+
+ if (collection) {
+ const bool isAggCursor = true; // enable special locking behavior
+ pin.emplace(collection->getCursorManager()->registerCursor(
+ {exec.release(),
+ nss.ns(),
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
+ 0,
+ cmdObj.getOwned(),
+ isAggCursor}));
+ // Don't add any code between here and the start of the try block.
+ }
+
+ // At this point, it is safe to release the collection lock.
+ // - In the case where we have a collection: we will need to reacquire the
+ // collection lock later when cleaning up our ClientCursorPin.
+ // - In the case where we don't have a collection: our PlanExecutor won't be
+ // registered, so it will be safe to clean it up outside the lock.
+ invariant(!exec || !collection);
+ }
+
+ try {
+ // Unless set to true, the ClientCursor created above will be deleted on block exit.
+ bool keepCursor = false;
+
+ // If both explain and cursor are specified, explain wins.
+ if (expCtx->explain) {
+ result << "stages" << Value(pipeline->writeExplainOps(*expCtx->explain));
+ } else {
+ // Cursor must be specified, if explain is not.
+ keepCursor = handleCursorCommand(opCtx,
+ origNss.ns(),
+ pin ? pin->getCursor() : nullptr,
+ pin ? pin->getCursor()->getExecutor() : exec.get(),
+ request,
+ result);
+ }
+
+ if (!expCtx->explain) {
+ PlanSummaryStats stats;
+ Explain::getSummaryStats(pin ? *pin->getCursor()->getExecutor() : *exec.get(), &stats);
+ curOp->debug().setPlanSummaryMetrics(stats);
+ curOp->debug().nreturned = stats.nReturned;
+ }
+
+ // Clean up our ClientCursorPin, if needed. We must reacquire the collection lock
+ // in order to do so.
+ if (pin) {
+ // We acquire locks here with DBLock and CollectionLock instead of using
+ // AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
+ // sharding version is out of date, and we don't care if the sharding version
+ // has changed.
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
+ if (keepCursor) {
+ pin->release();
+ } else {
+ pin->deleteUnderlying();
+ }
+ }
+ } catch (...) {
+ // On our way out of scope, we clean up our ClientCursorPin if needed.
+ if (pin) {
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
+ pin->deleteUnderlying();
+ }
+ throw;
+ }
+ // Any code that needs the cursor pinned must be inside the try block, above.
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/run_aggregate.h b/src/mongo/db/commands/run_aggregate.h
new file mode 100644
index 00000000000..1412768ddcf
--- /dev/null
+++ b/src/mongo/db/commands/run_aggregate.h
@@ -0,0 +1,53 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/bson/bsonobj.h"
+#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/pipeline/aggregation_request.h"
+
+namespace mongo {
+
+/**
+ * Executes the aggregation 'request' over the specified namespace 'nss' using context 'opCtx'.
+ *
+ * The raw aggregate command parameters should be passed in 'cmdObj', and will be reported as the
+ * originatingCommand in subsequent getMores on the resulting agg cursor.
+ *
+ * On success, fills out 'result' with the command response.
+ */
+Status runAggregate(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const AggregationRequest& request,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result);
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 4093bcf083e..3c30ddc9e66 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -304,7 +304,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const final {
const auto batch = parseUpdateCommand(dbname, cmdObj);
@@ -376,7 +376,7 @@ public:
Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
const rpc::ServerSelectionMetadata&,
BSONObjBuilder* out) const final {
const auto batch = parseDeleteCommand(dbname, cmdObj);
diff --git a/src/mongo/db/pipeline/SConscript b/src/mongo/db/pipeline/SConscript
index 49783bc5def..e243adda24a 100644
--- a/src/mongo/db/pipeline/SConscript
+++ b/src/mongo/db/pipeline/SConscript
@@ -85,13 +85,15 @@ env.Library(
'aggregation_request.cpp',
],
LIBDEPS=[
- 'document_value',
'$BUILD_DIR/mongo/base',
- '$BUILD_DIR/mongo/db/query/command_request_response',
'$BUILD_DIR/mongo/db/namespace_string',
+ '$BUILD_DIR/mongo/db/query/command_request_response',
+ '$BUILD_DIR/mongo/db/query/explain_options',
'$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/db/repl/read_concern_args',
'$BUILD_DIR/mongo/db/storage/storage_options',
+ '$BUILD_DIR/mongo/db/write_concern_options',
+ 'document_value',
]
)
diff --git a/src/mongo/db/pipeline/aggregation_request.cpp b/src/mongo/db/pipeline/aggregation_request.cpp
index 947cfbb3f81..920073c6558 100644
--- a/src/mongo/db/pipeline/aggregation_request.cpp
+++ b/src/mongo/db/pipeline/aggregation_request.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/query/query_request.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/storage/storage_options.h"
+#include "mongo/db/write_concern_options.h"
namespace mongo {
@@ -61,8 +62,10 @@ const long long AggregationRequest::kDefaultBatchSize = 101;
AggregationRequest::AggregationRequest(NamespaceString nss, std::vector<BSONObj> pipeline)
: _nss(std::move(nss)), _pipeline(std::move(pipeline)), _batchSize(kDefaultBatchSize) {}
-StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString nss,
- const BSONObj& cmdObj) {
+StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
+ NamespaceString nss,
+ const BSONObj& cmdObj,
+ boost::optional<ExplainOptions::Verbosity> explainVerbosity) {
// Parse required parameters.
auto pipelineElem = cmdObj[kPipelineName];
if (pipelineElem.eoo() || pipelineElem.type() != BSONType::Array) {
@@ -81,12 +84,13 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString
const std::initializer_list<StringData> optionsParsedElseWhere = {
QueryRequest::cmdOptionMaxTimeMS,
- "writeConcern"_sd,
+ WriteConcernOptions::kWriteConcernField,
kPipelineName,
kCommandName,
repl::ReadConcernArgs::kReadConcernFieldName};
bool hasCursorElem = false;
+ bool hasExplainElem = false;
// Parse optional parameters.
for (auto&& elem : cmdObj) {
@@ -138,7 +142,11 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString
str::stream() << kExplainName << " must be a boolean, not a "
<< typeName(elem.type())};
}
- request.setExplain(elem.Bool());
+
+ hasExplainElem = true;
+ if (elem.Bool()) {
+ request.setExplain(ExplainOptions::Verbosity::kQueryPlanner);
+ }
} else if (kFromRouterName == fieldName) {
if (elem.type() != BSONType::Bool) {
return {ErrorCodes::TypeMismatch,
@@ -165,11 +173,35 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString
}
}
- if (!hasCursorElem && !request.isExplain()) {
+ if (explainVerbosity) {
+ if (hasExplainElem) {
+ return {
+ ErrorCodes::FailedToParse,
+ str::stream() << "The '" << kExplainName
+ << "' option is illegal when a explain verbosity is also provided"};
+ }
+
+ request.setExplain(explainVerbosity);
+ }
+
+ if (!hasCursorElem && !request.getExplain()) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "The '" << kCursorName
+ << "' option is required, except for aggregation explain"};
+ }
+
+ if (request.getExplain() && cmdObj[repl::ReadConcernArgs::kReadConcernFieldName]) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "Aggregation explain does not support the '"
+ << repl::ReadConcernArgs::kReadConcernFieldName
+ << "' option"};
+ }
+
+ if (request.getExplain() && cmdObj[WriteConcernOptions::kWriteConcernField]) {
return {ErrorCodes::FailedToParse,
- str::stream() << "The '" << kCursorName << "' option is required, unless '"
- << kExplainName
- << "' is true"};
+ str::stream() << "Aggregation explain does not support the'"
+ << WriteConcernOptions::kWriteConcernField
+ << "' option"};
}
return request;
@@ -181,7 +213,6 @@ Document AggregationRequest::serializeToCommandObj() const {
{kCommandName, _nss.coll()},
{kPipelineName, _pipeline},
// Only serialize booleans if different than their default.
- {kExplainName, _explain ? Value(true) : Value()},
{kAllowDiskUseName, _allowDiskUse ? Value(true) : Value()},
{kFromRouterName, _fromRouter ? Value(true) : Value()},
{bypassDocumentValidationCommandOption(),
@@ -189,7 +220,7 @@ Document AggregationRequest::serializeToCommandObj() const {
// Only serialize a collation if one was specified.
{kCollationName, _collation.isEmpty() ? Value() : Value(_collation)},
// Only serialize batchSize when explain is false.
- {kCursorName, _explain ? Value() : Value(Document{{kBatchSizeName, _batchSize}})},
+ {kCursorName, _explainMode ? Value() : Value(Document{{kBatchSizeName, _batchSize}})},
// Only serialize a hint if one was specified.
{kHintName, _hint.isEmpty() ? Value() : Value(_hint)}};
}
diff --git a/src/mongo/db/pipeline/aggregation_request.h b/src/mongo/db/pipeline/aggregation_request.h
index b25eaa020dc..156d397cbb0 100644
--- a/src/mongo/db/pipeline/aggregation_request.h
+++ b/src/mongo/db/pipeline/aggregation_request.h
@@ -34,6 +34,7 @@
#include "mongo/bson/bsonelement.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/query/explain_options.h"
namespace mongo {
@@ -62,8 +63,15 @@ public:
* Create a new instance of AggregationRequest by parsing the raw command object. Returns a
* non-OK status if a required field was missing, if there was an unrecognized field name or if
* there was a bad value for one of the fields.
+ *
+ * If we are parsing a request for an explained aggregation with an explain verbosity provided,
+ * then 'explainVerbosity' contains this information. In this case, 'cmdObj' may not itself
+ * contain the explain specifier. Otherwise, 'explainVerbosity' should be boost::none.
*/
- static StatusWith<AggregationRequest> parseFromBSON(NamespaceString nss, const BSONObj& cmdObj);
+ static StatusWith<AggregationRequest> parseFromBSON(
+ NamespaceString nss,
+ const BSONObj& cmdObj,
+ boost::optional<ExplainOptions::Verbosity> explainVerbosity = boost::none);
/**
* Constructs an AggregationRequest over the given namespace with the given pipeline. All
@@ -75,6 +83,9 @@ public:
* Serializes the options to a Document. Note that this serialization includes the original
* pipeline object, as specified. Callers will likely want to override this field with a
* serialization of a parsed and optimized Pipeline object.
+ *
+ * The explain option is not serialized. Since the explain command format is {explain:
+ * {aggregate: ...}, ...}, explain options are not part of the aggregate command object.
*/
Document serializeToCommandObj() const;
@@ -97,10 +108,6 @@ public:
return _pipeline;
}
- bool isExplain() const {
- return _explain;
- }
-
bool isFromRouter() const {
return _fromRouter;
}
@@ -124,6 +131,10 @@ public:
return _hint;
}
+ boost::optional<ExplainOptions::Verbosity> getExplain() const {
+ return _explainMode;
+ }
+
//
// Setters for optional fields.
//
@@ -144,8 +155,8 @@ public:
_hint = hint.getOwned();
}
- void setExplain(bool isExplain) {
- _explain = isExplain;
+ void setExplain(boost::optional<ExplainOptions::Verbosity> verbosity) {
+ _explainMode = verbosity;
}
void setAllowDiskUse(bool allowDiskUse) {
@@ -162,7 +173,6 @@ public:
private:
// Required fields.
-
const NamespaceString _nss;
// An unparsed version of the pipeline.
@@ -181,7 +191,9 @@ private:
// {$hint: <String>}, where <String> is the index name hinted.
BSONObj _hint;
- bool _explain = false;
+ // The explain mode to use, or boost::none if this is not a request for an aggregation explain.
+ boost::optional<ExplainOptions::Verbosity> _explainMode;
+
bool _allowDiskUse = false;
bool _fromRouter = false;
bool _bypassDocumentValidation = false;
diff --git a/src/mongo/db/pipeline/aggregation_request_test.cpp b/src/mongo/db/pipeline/aggregation_request_test.cpp
index 45313fc8a1e..a40bca8d8d1 100644
--- a/src/mongo/db/pipeline/aggregation_request_test.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_test.cpp
@@ -58,7 +58,8 @@ TEST(AggregationRequestTest, ShouldParseAllKnownOptions) {
"bypassDocumentValidation: true, collation: {locale: 'en_US'}, cursor: {batchSize: 10}, "
"hint: {a: 1}}");
auto request = unittest::assertGet(AggregationRequest::parseFromBSON(nss, inputBson));
- ASSERT_TRUE(request.isExplain());
+ ASSERT_TRUE(request.getExplain());
+ ASSERT(*request.getExplain() == ExplainOptions::Verbosity::kQueryPlanner);
ASSERT_TRUE(request.shouldAllowDiskUse());
ASSERT_TRUE(request.isFromRouter());
ASSERT_TRUE(request.shouldBypassDocumentValidation());
@@ -69,6 +70,33 @@ TEST(AggregationRequestTest, ShouldParseAllKnownOptions) {
<< "en_US"));
}
+TEST(AggregationRequestTest, ShouldParseExplicitExplainFalseWithCursorOption) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], explain: false, cursor: {batchSize: 10}}");
+ auto request = unittest::assertGet(AggregationRequest::parseFromBSON(nss, inputBson));
+ ASSERT_FALSE(request.getExplain());
+ ASSERT_EQ(request.getBatchSize(), 10);
+}
+
+TEST(AggregationRequestTest, ShouldParseWithSeparateQueryPlannerExplainModeArg) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: []}");
+ auto request = unittest::assertGet(AggregationRequest::parseFromBSON(
+ nss, inputBson, ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_TRUE(request.getExplain());
+ ASSERT(*request.getExplain() == ExplainOptions::Verbosity::kQueryPlanner);
+}
+
+TEST(AggregationRequestTest, ShouldParseWithSeparateQueryPlannerExplainModeArgAndCursorOption) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], cursor: {batchSize: 10}}");
+ auto request = unittest::assertGet(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats));
+ ASSERT_TRUE(request.getExplain());
+ ASSERT(*request.getExplain() == ExplainOptions::Verbosity::kExecStats);
+ ASSERT_EQ(request.getBatchSize(), 10);
+}
+
//
// Serialization
//
@@ -87,7 +115,7 @@ TEST(AggregationRequestTest, ShouldOnlySerializeRequiredFieldsIfNoOptionalFields
TEST(AggregationRequestTest, ShouldNotSerializeOptionalValuesIfEquivalentToDefault) {
NamespaceString nss("a.collection");
AggregationRequest request(nss, {});
- request.setExplain(false);
+ request.setExplain(boost::none);
request.setAllowDiskUse(false);
request.setFromRouter(false);
request.setBypassDocumentValidation(false);
@@ -104,11 +132,10 @@ TEST(AggregationRequestTest, ShouldNotSerializeOptionalValuesIfEquivalentToDefau
TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
NamespaceString nss("a.collection");
AggregationRequest request(nss, {});
- request.setExplain(true);
request.setAllowDiskUse(true);
request.setFromRouter(true);
request.setBypassDocumentValidation(true);
- request.setBatchSize(10); // batchSize not serialzed when explain is true.
+ request.setBatchSize(10);
const auto hintObj = BSON("a" << 1);
request.setHint(hintObj);
const auto collationObj = BSON("locale"
@@ -118,11 +145,12 @@ TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
auto expectedSerialization =
Document{{AggregationRequest::kCommandName, nss.coll()},
{AggregationRequest::kPipelineName, Value(std::vector<Value>{})},
- {AggregationRequest::kExplainName, true},
{AggregationRequest::kAllowDiskUseName, true},
{AggregationRequest::kFromRouterName, true},
{bypassDocumentValidationCommandOption(), true},
{AggregationRequest::kCollationName, collationObj},
+ {AggregationRequest::kCursorName,
+ Value(Document({{AggregationRequest::kBatchSizeName, 10}}))},
{AggregationRequest::kHintName, hintObj}};
ASSERT_DOCUMENT_EQ(request.serializeToCommandObj(), expectedSerialization);
}
@@ -159,6 +187,18 @@ TEST(AggregationRequestTest, ShouldAcceptHintAsString) {
<< "a_1"));
}
+TEST(AggregationRequestTest, ShouldNotSerializeBatchSizeOrExplainWhenExplainSet) {
+ NamespaceString nss("a.collection");
+ AggregationRequest request(nss, {});
+ request.setBatchSize(10);
+ request.setExplain(ExplainOptions::Verbosity::kQueryPlanner);
+
+ auto expectedSerialization =
+ Document{{AggregationRequest::kCommandName, nss.coll()},
+ {AggregationRequest::kPipelineName, Value(std::vector<Value>{})}};
+ ASSERT_DOCUMENT_EQ(request.serializeToCommandObj(), expectedSerialization);
+}
+
//
// Error cases.
//
@@ -201,13 +241,20 @@ TEST(AggregationRequestTest, ShouldRejectHintAsArray) {
AggregationRequest::parseFromBSON(NamespaceString("a.collection"), inputBson).getStatus());
}
-TEST(AggregationRequestTest, ShouldRejectNonBoolExplain) {
+TEST(AggregationRequestTest, ShouldRejectExplainIfNumber) {
NamespaceString nss("a.collection");
const BSONObj inputBson =
fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, explain: 1}");
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
+TEST(AggregationRequestTest, ShouldRejectExplainIfObject) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, explain: {}}");
+ ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
+}
+
TEST(AggregationRequestTest, ShouldRejectNonBoolFromRouter) {
NamespaceString nss("a.collection");
const BSONObj inputBson =
@@ -228,6 +275,52 @@ TEST(AggregationRequestTest, ShouldRejectNoCursorNoExplain) {
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
+TEST(AggregationRequestTest, ShouldRejectExplainTrueWithSeparateExplainArg) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], explain: true}");
+ ASSERT_NOT_OK(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats)
+ .getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainFalseWithSeparateExplainArg) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], explain: false}");
+ ASSERT_NOT_OK(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats)
+ .getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainWithReadConcernMajority) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [], explain: true, readConcern: {level: 'majority'}}");
+ ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainExecStatsVerbosityWithReadConcernMajority) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], readConcern: {level: 'majority'}}");
+ ASSERT_NOT_OK(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats)
+ .getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainWithWriteConcernMajority) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [], explain: true, writeConcern: {w: 'majority'}}");
+ ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectExplainExecStatsVerbosityWithWriteConcernMajority) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [], writeConcern: {w: 'majority'}}");
+ ASSERT_NOT_OK(
+ AggregationRequest::parseFromBSON(nss, inputBson, ExplainOptions::Verbosity::kExecStats)
+ .getStatus());
+}
+
//
// Ignore fields parsed elsewhere.
//
diff --git a/src/mongo/db/pipeline/document_source.cpp b/src/mongo/db/pipeline/document_source.cpp
index 440dc762175..ec1d8edfcb3 100644
--- a/src/mongo/db/pipeline/document_source.cpp
+++ b/src/mongo/db/pipeline/document_source.cpp
@@ -198,7 +198,8 @@ void DocumentSource::dispose() {
}
}
-void DocumentSource::serializeToArray(vector<Value>& array, bool explain) const {
+void DocumentSource::serializeToArray(vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain) const {
Value entry = serialize(explain);
if (!entry.missing()) {
array.push_back(entry);
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index 0dd3b7b3154..b6ef71d1480 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -49,6 +49,7 @@
#include "mongo/db/pipeline/lite_parsed_document_source.h"
#include "mongo/db/pipeline/pipeline.h"
#include "mongo/db/pipeline/value.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/stdx/functional.h"
#include "mongo/util/intrusive_counter.h"
@@ -244,11 +245,15 @@ public:
/**
* In the default case, serializes the DocumentSource and adds it to the std::vector<Value>.
*
- * A subclass may choose to overwrite this, rather than serialize,
- * if it should output multiple stages (eg, $sort sometimes also outputs a $limit).
+ * A subclass may choose to overwrite this, rather than serialize, if it should output multiple
+ * stages (eg, $sort sometimes also outputs a $limit).
+ *
+ * The 'explain' parameter indicates the explain verbosity mode, or is equal boost::none if no
+ * explain is requested.
*/
-
- virtual void serializeToArray(std::vector<Value>& array, bool explain = false) const;
+ virtual void serializeToArray(
+ std::vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const;
/**
* Returns true if doesn't require an input source (most DocumentSources do).
@@ -473,8 +478,12 @@ private:
* This is used by the default implementation of serializeToArray() to add this object
* to a pipeline being serialized. Returning a missing() Value results in no entry
* being added to the array for this stage (DocumentSource).
+ *
+ * The 'explain' parameter indicates the explain verbosity mode, or is equal boost::none if no
+ * explain is requested.
*/
- virtual Value serialize(bool explain = false) const = 0;
+ virtual Value serialize(
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const = 0;
};
/** This class marks DocumentSources that should be split between the merger and the shards.
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto.cpp b/src/mongo/db/pipeline/document_source_bucket_auto.cpp
index b02e8d0226f..f2015df5f64 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_auto.cpp
@@ -333,10 +333,11 @@ void DocumentSourceBucketAuto::dispose() {
pSource->dispose();
}
-Value DocumentSourceBucketAuto::serialize(bool explain) const {
+Value DocumentSourceBucketAuto::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument insides;
- insides["groupBy"] = _groupByExpression->serialize(explain);
+ insides["groupBy"] = _groupByExpression->serialize(static_cast<bool>(explain));
insides["buckets"] = Value(_nBuckets);
if (_granularityRounder) {
@@ -347,8 +348,8 @@ Value DocumentSourceBucketAuto::serialize(bool explain) const {
MutableDocument outputSpec(nOutputFields);
for (size_t i = 0; i < nOutputFields; i++) {
intrusive_ptr<Accumulator> accum = _accumulatorFactories[i](pExpCtx);
- outputSpec[_fieldNames[i]] =
- Value{Document{{accum->getOpName(), _expressions[i]->serialize(explain)}}};
+ outputSpec[_fieldNames[i]] = Value{
+ Document{{accum->getOpName(), _expressions[i]->serialize(static_cast<bool>(explain))}}};
}
insides["output"] = outputSpec.freezeToValue();
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto.h b/src/mongo/db/pipeline/document_source_bucket_auto.h
index 61f7da30c71..75c3f07b683 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto.h
+++ b/src/mongo/db/pipeline/document_source_bucket_auto.h
@@ -43,7 +43,7 @@ namespace mongo {
*/
class DocumentSourceBucketAuto final : public DocumentSource, public SplittableDocumentSource {
public:
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final;
GetNextResult getNext() final;
void dispose() final;
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
index 2d71b676024..debd2eede82 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
@@ -87,9 +87,9 @@ public:
auto bucketAutoStage = createBucketAuto(bucketAutoSpec);
assertBucketAutoType(bucketAutoStage);
- const bool explain = true;
vector<Value> explainedStages;
- bucketAutoStage->serializeToArray(explainedStages, explain);
+ bucketAutoStage->serializeToArray(explainedStages,
+ ExplainOptions::Verbosity::kQueryPlanner);
ASSERT_EQUALS(explainedStages.size(), 1UL);
Value expectedExplain = Value(expectedObj);
diff --git a/src/mongo/db/pipeline/document_source_bucket_test.cpp b/src/mongo/db/pipeline/document_source_bucket_test.cpp
index bdfb3635bb7..1a61934dcb5 100644
--- a/src/mongo/db/pipeline/document_source_bucket_test.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_test.cpp
@@ -66,7 +66,7 @@ public:
// Serialize the DocumentSourceGroup and DocumentSourceSort from $bucket so that we can
// check the explain output to make sure $group and $sort have the correct fields.
- const bool explain = true;
+ auto explain = ExplainOptions::Verbosity::kQueryPlanner;
vector<Value> explainedStages;
groupStage->serializeToArray(explainedStages, explain);
sortStage->serializeToArray(explainedStages, explain);
diff --git a/src/mongo/db/pipeline/document_source_coll_stats.cpp b/src/mongo/db/pipeline/document_source_coll_stats.cpp
index 30ebb4916fc..56ebaca69e3 100644
--- a/src/mongo/db/pipeline/document_source_coll_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_coll_stats.cpp
@@ -126,7 +126,7 @@ bool DocumentSourceCollStats::isValidInitialSource() const {
return true;
}
-Value DocumentSourceCollStats::serialize(bool explain) const {
+Value DocumentSourceCollStats::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(Document{{getSourceName(), _collStatsSpec}});
}
diff --git a/src/mongo/db/pipeline/document_source_coll_stats.h b/src/mongo/db/pipeline/document_source_coll_stats.h
index d8cf2eb6718..9e5aa2a4f3f 100644
--- a/src/mongo/db/pipeline/document_source_coll_stats.h
+++ b/src/mongo/db/pipeline/document_source_coll_stats.h
@@ -63,7 +63,7 @@ public:
bool isValidInitialSource() const final;
- Value serialize(bool explain = false) const;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
static boost::intrusive_ptr<DocumentSource> createFromBson(
BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
diff --git a/src/mongo/db/pipeline/document_source_count_test.cpp b/src/mongo/db/pipeline/document_source_count_test.cpp
index 5535ee5ea83..e083399b0fa 100644
--- a/src/mongo/db/pipeline/document_source_count_test.cpp
+++ b/src/mongo/db/pipeline/document_source_count_test.cpp
@@ -63,7 +63,7 @@ public:
dynamic_cast<DocumentSourceSingleDocumentTransformation*>(result[1].get());
ASSERT(projectStage);
- const bool explain = true;
+ auto explain = ExplainOptions::Verbosity::kQueryPlanner;
vector<Value> explainedStages;
groupStage->serializeToArray(explainedStages, explain);
projectStage->serializeToArray(explainedStages, explain);
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 276a16196bc..ae62da0a978 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -174,7 +174,7 @@ void DocumentSourceCursor::recordPlanSummaryStats() {
_planSummaryStats.hasSortStage = hasSortStage;
}
-Value DocumentSourceCursor::serialize(bool explain) const {
+Value DocumentSourceCursor::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
// we never parse a documentSourceCursor, so we only serialize for explain
if (!explain)
return Value();
@@ -187,8 +187,7 @@ Value DocumentSourceCursor::serialize(bool explain) const {
massert(17392, "No _exec. Were we disposed before explained?", _exec);
_exec->restoreState();
- Explain::explainStages(
- _exec.get(), autoColl.getCollection(), ExplainCommon::QUERY_PLANNER, &explainBuilder);
+ Explain::explainStages(_exec.get(), autoColl.getCollection(), *explain, &explainBuilder);
_exec->saveState();
}
@@ -209,6 +208,10 @@ Value DocumentSourceCursor::serialize(bool explain) const {
BSONObj explainObj = explainBuilder.obj();
invariant(explainObj.hasField("queryPlanner"));
out["queryPlanner"] = Value(explainObj["queryPlanner"]);
+ if (*explain >= ExplainOptions::Verbosity::kExecStats) {
+ invariant(explainObj.hasField("executionStats"));
+ out["executionStats"] = Value(explainObj["executionStats"]);
+ }
return Value(DOC(getSourceName() << out.freezeToValue()));
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.h b/src/mongo/db/pipeline/document_source_cursor.h
index 360b855ad39..3faeea86de6 100644
--- a/src/mongo/db/pipeline/document_source_cursor.h
+++ b/src/mongo/db/pipeline/document_source_cursor.h
@@ -58,7 +58,7 @@ public:
*/
Pipeline::SourceContainer::iterator doOptimizeAt(Pipeline::SourceContainer::iterator itr,
Pipeline::SourceContainer* container) final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
bool isValidInitialSource() const final {
return true;
}
diff --git a/src/mongo/db/pipeline/document_source_facet.cpp b/src/mongo/db/pipeline/document_source_facet.cpp
index 9b03b233051..0842f7f68bf 100644
--- a/src/mongo/db/pipeline/document_source_facet.cpp
+++ b/src/mongo/db/pipeline/document_source_facet.cpp
@@ -179,11 +179,11 @@ DocumentSource::GetNextResult DocumentSourceFacet::getNext() {
return resultDoc.freeze();
}
-Value DocumentSourceFacet::serialize(bool explain) const {
+Value DocumentSourceFacet::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument serialized;
for (auto&& facet : _facets) {
- serialized[facet.name] =
- Value(explain ? facet.pipeline->writeExplainOps() : facet.pipeline->serialize());
+ serialized[facet.name] = Value(explain ? facet.pipeline->writeExplainOps(*explain)
+ : facet.pipeline->serialize());
}
return Value(Document{{"$facet", serialized.freezeToValue()}});
}
diff --git a/src/mongo/db/pipeline/document_source_facet.h b/src/mongo/db/pipeline/document_source_facet.h
index 921b2e87609..4da558e37a1 100644
--- a/src/mongo/db/pipeline/document_source_facet.h
+++ b/src/mongo/db/pipeline/document_source_facet.h
@@ -135,7 +135,7 @@ private:
DocumentSourceFacet(std::vector<FacetPipeline> facetPipelines,
const boost::intrusive_ptr<ExpressionContext>& expCtx);
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
boost::intrusive_ptr<TeeBuffer> _teeBuffer;
std::vector<FacetPipeline> _facets;
diff --git a/src/mongo/db/pipeline/document_source_geo_near.cpp b/src/mongo/db/pipeline/document_source_geo_near.cpp
index 17e9997bbf5..c971509e373 100644
--- a/src/mongo/db/pipeline/document_source_geo_near.cpp
+++ b/src/mongo/db/pipeline/document_source_geo_near.cpp
@@ -97,7 +97,7 @@ intrusive_ptr<DocumentSource> DocumentSourceGeoNear::getMergeSource() {
return DocumentSourceSort::create(pExpCtx, BSON(distanceField->fullPath() << 1), limit);
}
-Value DocumentSourceGeoNear::serialize(bool explain) const {
+Value DocumentSourceGeoNear::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument result;
if (coordsIsArray) {
diff --git a/src/mongo/db/pipeline/document_source_geo_near.h b/src/mongo/db/pipeline/document_source_geo_near.h
index c38c311a4e5..03bcf304eee 100644
--- a/src/mongo/db/pipeline/document_source_geo_near.h
+++ b/src/mongo/db/pipeline/document_source_geo_near.h
@@ -49,7 +49,7 @@ public:
bool isValidInitialSource() const final {
return true;
}
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
BSONObjSet getOutputSorts() final {
return SimpleBSONObjComparator::kInstance.makeBSONObjSet(
{BSON(distanceField->fullPath() << -1)});
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
index a11af0a44c3..e8e2cacc108 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
@@ -390,7 +390,8 @@ void DocumentSourceGraphLookUp::checkMemoryUsage() {
_cache.evictDownTo(_maxMemoryUsageBytes - _frontierUsageBytes - _visitedUsageBytes);
}
-void DocumentSourceGraphLookUp::serializeToArray(std::vector<Value>& array, bool explain) const {
+void DocumentSourceGraphLookUp::serializeToArray(
+ std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
// Serialize default options.
MutableDocument spec(DOC("from" << _from.coll() << "as" << _as.fullPath() << "connectToField"
<< _connectToField.fullPath()
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.h b/src/mongo/db/pipeline/document_source_graph_lookup.h
index 6e94e98b4af..8c14a2ab8fa 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.h
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.h
@@ -45,7 +45,9 @@ public:
const char* getSourceName() const final;
void dispose() final;
BSONObjSet getOutputSorts() final;
- void serializeToArray(std::vector<Value>& array, bool explain = false) const final;
+ void serializeToArray(
+ std::vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
/**
* Returns the 'as' path, and possibly the fields modified by an absorbed $unwind.
@@ -107,7 +109,7 @@ private:
boost::optional<long long> maxDepth,
boost::optional<boost::intrusive_ptr<DocumentSourceUnwind>> unwindSrc);
- Value serialize(bool explain = false) const final {
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final {
// Should not be called; use serializeToArray instead.
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/pipeline/document_source_group.cpp b/src/mongo/db/pipeline/document_source_group.cpp
index a70a65c06b9..d4f44850a8f 100644
--- a/src/mongo/db/pipeline/document_source_group.cpp
+++ b/src/mongo/db/pipeline/document_source_group.cpp
@@ -198,19 +198,19 @@ intrusive_ptr<DocumentSource> DocumentSourceGroup::optimize() {
return this;
}
-Value DocumentSourceGroup::serialize(bool explain) const {
+Value DocumentSourceGroup::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument insides;
// Add the _id.
if (_idFieldNames.empty()) {
invariant(_idExpressions.size() == 1);
- insides["_id"] = _idExpressions[0]->serialize(explain);
+ insides["_id"] = _idExpressions[0]->serialize(static_cast<bool>(explain));
} else {
// Decomposed document case.
invariant(_idExpressions.size() == _idFieldNames.size());
MutableDocument md;
for (size_t i = 0; i < _idExpressions.size(); i++) {
- md[_idFieldNames[i]] = _idExpressions[i]->serialize(explain);
+ md[_idFieldNames[i]] = _idExpressions[i]->serialize(static_cast<bool>(explain));
}
insides["_id"] = md.freezeToValue();
}
@@ -219,8 +219,8 @@ Value DocumentSourceGroup::serialize(bool explain) const {
const size_t n = vFieldName.size();
for (size_t i = 0; i < n; ++i) {
intrusive_ptr<Accumulator> accum = vpAccumulatorFactory[i](pExpCtx);
- insides[vFieldName[i]] =
- Value(DOC(accum->getOpName() << vpExpression[i]->serialize(explain)));
+ insides[vFieldName[i]] = Value(
+ DOC(accum->getOpName() << vpExpression[i]->serialize(static_cast<bool>(explain))));
}
if (_doingMerge) {
diff --git a/src/mongo/db/pipeline/document_source_group.h b/src/mongo/db/pipeline/document_source_group.h
index e79ce631817..86b973b9e04 100644
--- a/src/mongo/db/pipeline/document_source_group.h
+++ b/src/mongo/db/pipeline/document_source_group.h
@@ -48,7 +48,7 @@ public:
// Virtuals from DocumentSource.
boost::intrusive_ptr<DocumentSource> optimize() final;
GetDepsReturn getDependencies(DepsTracker* deps) const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetNextResult getNext() final;
void dispose() final;
const char* getSourceName() const final;
diff --git a/src/mongo/db/pipeline/document_source_index_stats.cpp b/src/mongo/db/pipeline/document_source_index_stats.cpp
index 3256f8e5211..863c37119c3 100644
--- a/src/mongo/db/pipeline/document_source_index_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_index_stats.cpp
@@ -81,7 +81,8 @@ intrusive_ptr<DocumentSource> DocumentSourceIndexStats::createFromBson(
return new DocumentSourceIndexStats(pExpCtx);
}
-Value DocumentSourceIndexStats::serialize(bool explain) const {
+Value DocumentSourceIndexStats::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << Document()));
}
}
diff --git a/src/mongo/db/pipeline/document_source_index_stats.h b/src/mongo/db/pipeline/document_source_index_stats.h
index 1c1bc521aeb..26beb7edb8b 100644
--- a/src/mongo/db/pipeline/document_source_index_stats.h
+++ b/src/mongo/db/pipeline/document_source_index_stats.h
@@ -42,7 +42,7 @@ public:
// virtuals from DocumentSource
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
virtual bool isValidInitialSource() const final {
return true;
diff --git a/src/mongo/db/pipeline/document_source_limit.cpp b/src/mongo/db/pipeline/document_source_limit.cpp
index 797c6ed6652..704ba907165 100644
--- a/src/mongo/db/pipeline/document_source_limit.cpp
+++ b/src/mongo/db/pipeline/document_source_limit.cpp
@@ -85,7 +85,7 @@ DocumentSource::GetNextResult DocumentSourceLimit::getNext() {
return nextInput;
}
-Value DocumentSourceLimit::serialize(bool explain) const {
+Value DocumentSourceLimit::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(Document{{getSourceName(), _limit}});
}
diff --git a/src/mongo/db/pipeline/document_source_limit.h b/src/mongo/db/pipeline/document_source_limit.h
index 9c04213045d..c6660152d66 100644
--- a/src/mongo/db/pipeline/document_source_limit.h
+++ b/src/mongo/db/pipeline/document_source_limit.h
@@ -47,7 +47,7 @@ public:
*/
Pipeline::SourceContainer::iterator doOptimizeAt(Pipeline::SourceContainer::iterator itr,
Pipeline::SourceContainer* container) final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final {
return SEE_NEXT; // This doesn't affect needed fields
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index f173224f202..a474baefe6a 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -414,7 +414,8 @@ DocumentSource::GetNextResult DocumentSourceLookUp::unwindResult() {
return output.freeze();
}
-void DocumentSourceLookUp::serializeToArray(std::vector<Value>& array, bool explain) const {
+void DocumentSourceLookUp::serializeToArray(
+ std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
MutableDocument output(DOC(
getSourceName() << DOC("from" << _fromNs.coll() << "as" << _as.fullPath() << "localField"
<< _localField.fullPath()
diff --git a/src/mongo/db/pipeline/document_source_lookup.h b/src/mongo/db/pipeline/document_source_lookup.h
index e179abfe73e..febdedc0472 100644
--- a/src/mongo/db/pipeline/document_source_lookup.h
+++ b/src/mongo/db/pipeline/document_source_lookup.h
@@ -49,7 +49,9 @@ public:
GetNextResult getNext() final;
const char* getSourceName() const final;
- void serializeToArray(std::vector<Value>& array, bool explain = false) const final;
+ void serializeToArray(
+ std::vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
/**
* Returns the 'as' path, and possibly fields modified by an absorbed $unwind.
@@ -120,7 +122,7 @@ private:
std::string foreignField,
const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- Value serialize(bool explain = false) const final {
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final {
// Should not be called; use serializeToArray instead.
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 3107e8b7677..e72377ca04e 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -57,7 +57,7 @@ const char* DocumentSourceMatch::getSourceName() const {
return "$match";
}
-Value DocumentSourceMatch::serialize(bool explain) const {
+Value DocumentSourceMatch::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << Document(getQuery())));
}
diff --git a/src/mongo/db/pipeline/document_source_match.h b/src/mongo/db/pipeline/document_source_match.h
index badfe1ed8b9..faeda447cb3 100644
--- a/src/mongo/db/pipeline/document_source_match.h
+++ b/src/mongo/db/pipeline/document_source_match.h
@@ -42,7 +42,7 @@ public:
// virtuals from DocumentSource
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
boost::intrusive_ptr<DocumentSource> optimize() final;
BSONObjSet getOutputSorts() final {
return pSource ? pSource->getOutputSorts()
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors.cpp b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
index b03db6560df..a9f06ef6aea 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
@@ -86,7 +86,8 @@ intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::createFromBson(
return new DocumentSourceMergeCursors(std::move(cursorDescriptors), pExpCtx);
}
-Value DocumentSourceMergeCursors::serialize(bool explain) const {
+Value DocumentSourceMergeCursors::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
vector<Value> cursors;
for (size_t i = 0; i < _cursorDescriptors.size(); i++) {
cursors.push_back(
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors.h b/src/mongo/db/pipeline/document_source_merge_cursors.h
index ad853a9f755..0842ca89bdd 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors.h
+++ b/src/mongo/db/pipeline/document_source_merge_cursors.h
@@ -52,7 +52,7 @@ public:
GetNextResult getNext() final;
const char* getSourceName() const final;
void dispose() final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
bool isValidInitialSource() const final {
return true;
}
diff --git a/src/mongo/db/pipeline/document_source_mock.cpp b/src/mongo/db/pipeline/document_source_mock.cpp
index 3918bbd5ad7..9f1aab2977b 100644
--- a/src/mongo/db/pipeline/document_source_mock.cpp
+++ b/src/mongo/db/pipeline/document_source_mock.cpp
@@ -54,7 +54,7 @@ const char* DocumentSourceMock::getSourceName() const {
return "mock";
}
-Value DocumentSourceMock::serialize(bool explain) const {
+Value DocumentSourceMock::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(Document{{getSourceName(), Document()}});
}
diff --git a/src/mongo/db/pipeline/document_source_mock.h b/src/mongo/db/pipeline/document_source_mock.h
index 7b966f5c418..74973f3fd27 100644
--- a/src/mongo/db/pipeline/document_source_mock.h
+++ b/src/mongo/db/pipeline/document_source_mock.h
@@ -46,7 +46,8 @@ public:
GetNextResult getNext() override;
const char* getSourceName() const override;
- Value serialize(bool explain = false) const override;
+ Value serialize(
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const override;
void dispose() override;
bool isValidInitialSource() const override {
return true;
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index 7dab6365405..0bccc03643d 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -220,7 +220,7 @@ intrusive_ptr<DocumentSource> DocumentSourceOut::createFromBson(
return new DocumentSourceOut(outputNs, pExpCtx);
}
-Value DocumentSourceOut::serialize(bool explain) const {
+Value DocumentSourceOut::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
massert(
17000, "$out shouldn't have different db than input", _outputNs.db() == pExpCtx->ns.db());
diff --git a/src/mongo/db/pipeline/document_source_out.h b/src/mongo/db/pipeline/document_source_out.h
index 4e8a678404a..5fca80dfba1 100644
--- a/src/mongo/db/pipeline/document_source_out.h
+++ b/src/mongo/db/pipeline/document_source_out.h
@@ -41,7 +41,7 @@ public:
~DocumentSourceOut() final;
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final;
bool needsPrimaryShard() const final {
return true;
diff --git a/src/mongo/db/pipeline/document_source_redact.cpp b/src/mongo/db/pipeline/document_source_redact.cpp
index 0096017a2f8..ae79729f2e0 100644
--- a/src/mongo/db/pipeline/document_source_redact.cpp
+++ b/src/mongo/db/pipeline/document_source_redact.cpp
@@ -163,8 +163,8 @@ intrusive_ptr<DocumentSource> DocumentSourceRedact::optimize() {
return this;
}
-Value DocumentSourceRedact::serialize(bool explain) const {
- return Value(DOC(getSourceName() << _expression.get()->serialize(explain)));
+Value DocumentSourceRedact::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
+ return Value(DOC(getSourceName() << _expression.get()->serialize(static_cast<bool>(explain))));
}
intrusive_ptr<DocumentSource> DocumentSourceRedact::createFromBson(
diff --git a/src/mongo/db/pipeline/document_source_redact.h b/src/mongo/db/pipeline/document_source_redact.h
index b9cf6242d42..8177de12eeb 100644
--- a/src/mongo/db/pipeline/document_source_redact.h
+++ b/src/mongo/db/pipeline/document_source_redact.h
@@ -50,7 +50,7 @@ public:
static boost::intrusive_ptr<DocumentSource> createFromBson(
BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& expCtx);
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
private:
DocumentSourceRedact(const boost::intrusive_ptr<ExpressionContext>& expCtx,
diff --git a/src/mongo/db/pipeline/document_source_replace_root.cpp b/src/mongo/db/pipeline/document_source_replace_root.cpp
index 1ae82d5cf3a..521cf748784 100644
--- a/src/mongo/db/pipeline/document_source_replace_root.cpp
+++ b/src/mongo/db/pipeline/document_source_replace_root.cpp
@@ -76,8 +76,8 @@ public:
_newRoot->optimize();
}
- Document serialize(bool explain) const final {
- return Document{{"newRoot", _newRoot->serialize(explain)}};
+ Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const final {
+ return Document{{"newRoot", _newRoot->serialize(static_cast<bool>(explain))}};
}
DocumentSource::GetDepsReturn addDependencies(DepsTracker* deps) const final {
diff --git a/src/mongo/db/pipeline/document_source_sample.cpp b/src/mongo/db/pipeline/document_source_sample.cpp
index c7d98db7260..5117e5c516a 100644
--- a/src/mongo/db/pipeline/document_source_sample.cpp
+++ b/src/mongo/db/pipeline/document_source_sample.cpp
@@ -83,7 +83,7 @@ DocumentSource::GetNextResult DocumentSourceSample::getNext() {
return _sortStage->getNext();
}
-Value DocumentSourceSample::serialize(bool explain) const {
+Value DocumentSourceSample::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << DOC("size" << _size)));
}
diff --git a/src/mongo/db/pipeline/document_source_sample.h b/src/mongo/db/pipeline/document_source_sample.h
index 28f1dd97b05..ec88c0737a8 100644
--- a/src/mongo/db/pipeline/document_source_sample.h
+++ b/src/mongo/db/pipeline/document_source_sample.h
@@ -37,7 +37,7 @@ class DocumentSourceSample final : public DocumentSource, public SplittableDocum
public:
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final {
return SEE_NEXT;
diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
index 9fc3b5bf105..ff6c4e6ec16 100644
--- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
@@ -138,7 +138,8 @@ DocumentSource::GetNextResult DocumentSourceSampleFromRandomCursor::getNextNonDu
"sporadic failure, please try again.");
}
-Value DocumentSourceSampleFromRandomCursor::serialize(bool explain) const {
+Value DocumentSourceSampleFromRandomCursor::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << DOC("size" << _size)));
}
diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.h b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.h
index 0d0ac39ca49..19b7106b03d 100644
--- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.h
+++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.h
@@ -41,7 +41,7 @@ class DocumentSourceSampleFromRandomCursor final : public DocumentSource {
public:
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetDepsReturn getDependencies(DepsTracker* deps) const final;
static boost::intrusive_ptr<DocumentSourceSampleFromRandomCursor> create(
diff --git a/src/mongo/db/pipeline/document_source_sample_test.cpp b/src/mongo/db/pipeline/document_source_sample_test.cpp
index 30a59baf22b..528676cb294 100644
--- a/src/mongo/db/pipeline/document_source_sample_test.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_test.cpp
@@ -140,7 +140,7 @@ private:
* created with.
*/
void checkBsonRepresentation(const BSONObj& spec) {
- Value serialized = static_cast<DocumentSourceSample*>(sample())->serialize(false);
+ Value serialized = static_cast<DocumentSourceSample*>(sample())->serialize();
auto generatedSpec = serialized.getDocument().toBson();
ASSERT_BSONOBJ_EQ(spec, generatedSpec);
}
diff --git a/src/mongo/db/pipeline/document_source_single_document_transformation.cpp b/src/mongo/db/pipeline/document_source_single_document_transformation.cpp
index 35204d272a4..5fcd0ba5400 100644
--- a/src/mongo/db/pipeline/document_source_single_document_transformation.cpp
+++ b/src/mongo/db/pipeline/document_source_single_document_transformation.cpp
@@ -74,7 +74,8 @@ void DocumentSourceSingleDocumentTransformation::dispose() {
_parsedTransform.reset();
}
-Value DocumentSourceSingleDocumentTransformation::serialize(bool explain) const {
+Value DocumentSourceSingleDocumentTransformation::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(Document{{getSourceName(), _parsedTransform->serialize(explain)}});
}
diff --git a/src/mongo/db/pipeline/document_source_single_document_transformation.h b/src/mongo/db/pipeline/document_source_single_document_transformation.h
index 1251cb454a4..86f4607513c 100644
--- a/src/mongo/db/pipeline/document_source_single_document_transformation.h
+++ b/src/mongo/db/pipeline/document_source_single_document_transformation.h
@@ -55,7 +55,7 @@ public:
virtual ~TransformerInterface() = default;
virtual Document applyTransformation(Document input) = 0;
virtual void optimize() = 0;
- virtual Document serialize(bool explain) const = 0;
+ virtual Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const = 0;
virtual DocumentSource::GetDepsReturn addDependencies(DepsTracker* deps) const = 0;
virtual GetModPathsReturn getModifiedPaths() const = 0;
};
@@ -70,7 +70,7 @@ public:
GetNextResult getNext() final;
boost::intrusive_ptr<DocumentSource> optimize() final;
void dispose() final;
- Value serialize(bool explain) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
Pipeline::SourceContainer::iterator doOptimizeAt(Pipeline::SourceContainer::iterator itr,
Pipeline::SourceContainer* container) final;
DocumentSource::GetDepsReturn getDependencies(DepsTracker* deps) const final;
diff --git a/src/mongo/db/pipeline/document_source_skip.cpp b/src/mongo/db/pipeline/document_source_skip.cpp
index 114a2f7d1b4..61125a879f4 100644
--- a/src/mongo/db/pipeline/document_source_skip.cpp
+++ b/src/mongo/db/pipeline/document_source_skip.cpp
@@ -73,7 +73,7 @@ DocumentSource::GetNextResult DocumentSourceSkip::getNext() {
return pSource->getNext();
}
-Value DocumentSourceSkip::serialize(bool explain) const {
+Value DocumentSourceSkip::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << _nToSkip));
}
diff --git a/src/mongo/db/pipeline/document_source_skip.h b/src/mongo/db/pipeline/document_source_skip.h
index 92d087e3c75..46cb6a6f3ab 100644
--- a/src/mongo/db/pipeline/document_source_skip.h
+++ b/src/mongo/db/pipeline/document_source_skip.h
@@ -43,7 +43,7 @@ public:
*/
Pipeline::SourceContainer::iterator doOptimizeAt(Pipeline::SourceContainer::iterator itr,
Pipeline::SourceContainer* container) final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
boost::intrusive_ptr<DocumentSource> optimize() final;
BSONObjSet getOutputSorts() final {
return pSource ? pSource->getOutputSorts()
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
index 6554a892016..b32b94760d9 100644
--- a/src/mongo/db/pipeline/document_source_sort.cpp
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -79,16 +79,17 @@ DocumentSource::GetNextResult DocumentSourceSort::getNext() {
return _output->next().second;
}
-void DocumentSourceSort::serializeToArray(vector<Value>& array, bool explain) const {
+void DocumentSourceSort::serializeToArray(
+ std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
if (explain) { // always one Value for combined $sort + $limit
- array.push_back(
- Value(DOC(getSourceName()
- << DOC("sortKey" << serializeSortKey(explain) << "mergePresorted"
- << (_mergingPresorted ? Value(true) : Value())
- << "limit"
- << (limitSrc ? Value(limitSrc->getLimit()) : Value())))));
+ array.push_back(Value(
+ DOC(getSourceName() << DOC(
+ "sortKey" << serializeSortKey(static_cast<bool>(explain)) << "mergePresorted"
+ << (_mergingPresorted ? Value(true) : Value())
+ << "limit"
+ << (limitSrc ? Value(limitSrc->getLimit()) : Value())))));
} else { // one Value for $sort and maybe a Value for $limit
- MutableDocument inner(serializeSortKey(explain));
+ MutableDocument inner(serializeSortKey(static_cast<bool>(explain)));
if (_mergingPresorted)
inner["$mergePresorted"] = Value(true);
array.push_back(Value(DOC(getSourceName() << inner.freeze())));
diff --git a/src/mongo/db/pipeline/document_source_sort.h b/src/mongo/db/pipeline/document_source_sort.h
index d9266011a55..52d2ff2b687 100644
--- a/src/mongo/db/pipeline/document_source_sort.h
+++ b/src/mongo/db/pipeline/document_source_sort.h
@@ -43,7 +43,9 @@ public:
// virtuals from DocumentSource
GetNextResult getNext() final;
const char* getSourceName() const final;
- void serializeToArray(std::vector<Value>& array, bool explain = false) const final;
+ void serializeToArray(
+ std::vector<Value>& array,
+ boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
GetModPathsReturn getModifiedPaths() const final {
// A $sort does not modify any paths.
@@ -123,7 +125,7 @@ public:
private:
explicit DocumentSourceSort(const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- Value serialize(bool explain = false) const final {
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final {
MONGO_UNREACHABLE; // Should call serializeToArray instead.
}
diff --git a/src/mongo/db/pipeline/document_source_sort_by_count_test.cpp b/src/mongo/db/pipeline/document_source_sort_by_count_test.cpp
index 82e4aa6fb56..3e0a007c59b 100644
--- a/src/mongo/db/pipeline/document_source_sort_by_count_test.cpp
+++ b/src/mongo/db/pipeline/document_source_sort_by_count_test.cpp
@@ -68,7 +68,7 @@ public:
// Serialize the DocumentSourceGroup and DocumentSourceSort from $sortByCount so that we can
// check the explain output to make sure $group and $sort have the correct fields.
- const bool explain = true;
+ const auto explain = ExplainOptions::Verbosity::kQueryPlanner;
vector<Value> explainedStages;
groupStage->serializeToArray(explainedStages, explain);
sortStage->serializeToArray(explainedStages, explain);
diff --git a/src/mongo/db/pipeline/document_source_tee_consumer.cpp b/src/mongo/db/pipeline/document_source_tee_consumer.cpp
index 052bf7bad7b..c620fba222a 100644
--- a/src/mongo/db/pipeline/document_source_tee_consumer.cpp
+++ b/src/mongo/db/pipeline/document_source_tee_consumer.cpp
@@ -62,7 +62,8 @@ void DocumentSourceTeeConsumer::dispose() {
_bufferSource->dispose(_facetId);
}
-Value DocumentSourceTeeConsumer::serialize(bool explain) const {
+Value DocumentSourceTeeConsumer::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
// This stage will be inserted into the beginning of a pipeline, but should not show up in the
// explain output.
return Value();
diff --git a/src/mongo/db/pipeline/document_source_tee_consumer.h b/src/mongo/db/pipeline/document_source_tee_consumer.h
index bfdbfbda1cb..cebeaee9a2e 100644
--- a/src/mongo/db/pipeline/document_source_tee_consumer.h
+++ b/src/mongo/db/pipeline/document_source_tee_consumer.h
@@ -63,7 +63,7 @@ public:
return GetDepsReturn::SEE_NEXT;
}
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
private:
DocumentSourceTeeConsumer(const boost::intrusive_ptr<ExpressionContext>& expCtx,
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index 683069597e0..079439bd842 100644
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -239,7 +239,7 @@ DocumentSource::GetModPathsReturn DocumentSourceUnwind::getModifiedPaths() const
return {GetModPathsReturn::Type::kFiniteSet, std::move(modifiedFields)};
}
-Value DocumentSourceUnwind::serialize(bool explain) const {
+Value DocumentSourceUnwind::serialize(boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << DOC(
"path" << _unwindPath.fullPathWithPrefix() << "preserveNullAndEmptyArrays"
<< (_preserveNullAndEmptyArrays ? Value(true) : Value())
diff --git a/src/mongo/db/pipeline/document_source_unwind.h b/src/mongo/db/pipeline/document_source_unwind.h
index 9a9c466d037..53fbd33d083 100644
--- a/src/mongo/db/pipeline/document_source_unwind.h
+++ b/src/mongo/db/pipeline/document_source_unwind.h
@@ -38,7 +38,7 @@ public:
// virtuals from DocumentSource
GetNextResult getNext() final;
const char* getSourceName() const final;
- Value serialize(bool explain = false) const final;
+ Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
BSONObjSet getOutputSorts() final;
/**
diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp
index 5bccef201ae..2b149b1af9c 100644
--- a/src/mongo/db/pipeline/expression_context.cpp
+++ b/src/mongo/db/pipeline/expression_context.cpp
@@ -43,7 +43,7 @@ ExpressionContext::ExpressionContext(OperationContext* opCtx,
const AggregationRequest& request,
std::unique_ptr<CollatorInterface> collator,
StringMap<ResolvedNamespace> resolvedNamespaces)
- : isExplain(request.isExplain()),
+ : explain(request.getExplain()),
inShard(request.isFromRouter()),
extSortAllowed(request.shouldAllowDiskUse()),
bypassDocumentValidation(request.shouldBypassDocumentValidation()),
@@ -74,7 +74,7 @@ void ExpressionContext::setCollator(std::unique_ptr<CollatorInterface> coll) {
intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(NamespaceString ns) const {
intrusive_ptr<ExpressionContext> expCtx = new ExpressionContext();
- expCtx->isExplain = isExplain;
+ expCtx->explain = explain;
expCtx->inShard = inShard;
expCtx->inRouter = inRouter;
expCtx->extSortAllowed = extSortAllowed;
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index af6d7d199c9..4d4c0b75f3a 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -29,6 +29,7 @@
#pragma once
#include <boost/intrusive_ptr.hpp>
+#include <boost/optional.hpp>
#include <memory>
#include <string>
#include <vector>
@@ -40,6 +41,7 @@
#include "mongo/db/pipeline/document_comparator.h"
#include "mongo/db/pipeline/value_comparator.h"
#include "mongo/db/query/collation/collator_interface.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/util/intrusive_counter.h"
#include "mongo/util/string_map.h"
@@ -98,7 +100,9 @@ public:
return it->second;
};
- bool isExplain = false;
+ // The explain verbosity requested by the user, or boost::none if no explain was requested.
+ boost::optional<ExplainOptions::Verbosity> explain;
+
bool inShard = false;
bool inRouter = false;
bool extSortAllowed = false;
diff --git a/src/mongo/db/pipeline/parsed_add_fields.h b/src/mongo/db/pipeline/parsed_add_fields.h
index 2213191f6c8..8cc3d24f913 100644
--- a/src/mongo/db/pipeline/parsed_add_fields.h
+++ b/src/mongo/db/pipeline/parsed_add_fields.h
@@ -75,7 +75,7 @@ public:
_variables = stdx::make_unique<Variables>(idGenerator.getIdCount());
}
- Document serialize(bool explain = false) const final {
+ Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const final {
MutableDocument output;
_root->serialize(&output, explain);
return output.freeze();
diff --git a/src/mongo/db/pipeline/parsed_add_fields_test.cpp b/src/mongo/db/pipeline/parsed_add_fields_test.cpp
index d7b68afca1d..c9adc2b1a36 100644
--- a/src/mongo/db/pipeline/parsed_add_fields_test.cpp
+++ b/src/mongo/db/pipeline/parsed_add_fields_test.cpp
@@ -176,8 +176,13 @@ TEST(ParsedAddFieldsSerialize, SerializesToCorrectForm) {
fromjson("{a: {$add: [\"$a\", {$const: 2}]}, b: {d: {$const: 3}}, x: {y: {$const: 4}}}"));
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
// Verify that serialize treats the _id field as any other field: including when explicity included.
@@ -190,8 +195,13 @@ TEST(ParsedAddFieldsSerialize, AddsIdToSerializeWhenExplicitlyIncluded) {
auto expectedSerialization = Document(fromjson("{_id: {$const: false}}"));
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
// Verify that serialize treats the _id field as any other field: excluded when not explicitly
@@ -207,8 +217,13 @@ TEST(ParsedAddFieldsSerialize, OmitsIdFromSerializeWhenNotIncluded) {
auto expectedSerialization = Document(fromjson("{a: {$const: true}}"));
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
// Verify that the $addFields stage optimizes expressions into simpler forms when possible.
@@ -220,8 +235,13 @@ TEST(ParsedAddFieldsOptimize, OptimizesTopLevelExpressions) {
auto expectedSerialization = Document{{"a", Document{{"$const", 3}}}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
// Verify that the $addFields stage optimizes expressions even when they are nested.
@@ -233,8 +253,13 @@ TEST(ParsedAddFieldsOptimize, ShouldOptimizeNestedExpressions) {
auto expectedSerialization = Document{{"a", Document{{"b", Document{{"$const", 3}}}}}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, addition.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ addition.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
//
diff --git a/src/mongo/db/pipeline/parsed_exclusion_projection.cpp b/src/mongo/db/pipeline/parsed_exclusion_projection.cpp
index 0f800f9a112..ae3fa4ba2dc 100644
--- a/src/mongo/db/pipeline/parsed_exclusion_projection.cpp
+++ b/src/mongo/db/pipeline/parsed_exclusion_projection.cpp
@@ -135,7 +135,8 @@ void ExclusionNode::addModifiedPaths(std::set<std::string>* modifiedPaths) const
// ParsedExclusionProjection.
//
-Document ParsedExclusionProjection::serialize(bool explain) const {
+Document ParsedExclusionProjection::serialize(
+ boost::optional<ExplainOptions::Verbosity> explain) const {
return _root->serialize();
}
diff --git a/src/mongo/db/pipeline/parsed_exclusion_projection.h b/src/mongo/db/pipeline/parsed_exclusion_projection.h
index d0988d2d2cb..74314d97439 100644
--- a/src/mongo/db/pipeline/parsed_exclusion_projection.h
+++ b/src/mongo/db/pipeline/parsed_exclusion_projection.h
@@ -103,7 +103,7 @@ public:
return ProjectionType::kExclusion;
}
- Document serialize(bool explain = false) const final;
+ Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const final;
/**
* Parses the projection specification given by 'spec', populating internal data structures.
diff --git a/src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp b/src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp
index 76b458260d0..f0e860067f7 100644
--- a/src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_exclusion_projection_test.cpp
@@ -87,7 +87,7 @@ TEST(ExclusionProjection, ShouldSerializeToEquivalentProjection) {
// Converts numbers to bools, converts dotted paths to nested documents. Note order of excluded
// fields is subject to change.
- auto serialization = exclusion.serialize();
+ auto serialization = exclusion.serialize(boost::none);
ASSERT_EQ(serialization.size(), 4UL);
ASSERT_VALUE_EQ(serialization["a"], Value(false));
ASSERT_VALUE_EQ(serialization["_id"], Value(false));
diff --git a/src/mongo/db/pipeline/parsed_inclusion_projection.cpp b/src/mongo/db/pipeline/parsed_inclusion_projection.cpp
index d294761852f..da6b9e72ce9 100644
--- a/src/mongo/db/pipeline/parsed_inclusion_projection.cpp
+++ b/src/mongo/db/pipeline/parsed_inclusion_projection.cpp
@@ -54,7 +54,8 @@ void InclusionNode::optimize() {
}
}
-void InclusionNode::serialize(MutableDocument* output, bool explain) const {
+void InclusionNode::serialize(MutableDocument* output,
+ boost::optional<ExplainOptions::Verbosity> explain) const {
// Always put "_id" first if it was included (implicitly or explicitly).
if (_inclusions.find("_id") != _inclusions.end()) {
output->addField("_id", Value(true));
@@ -77,7 +78,7 @@ void InclusionNode::serialize(MutableDocument* output, bool explain) const {
} else {
auto expressionIt = _expressions.find(field);
invariant(expressionIt != _expressions.end());
- output->addField(field, expressionIt->second->serialize(explain));
+ output->addField(field, expressionIt->second->serialize(static_cast<bool>(explain)));
}
}
}
diff --git a/src/mongo/db/pipeline/parsed_inclusion_projection.h b/src/mongo/db/pipeline/parsed_inclusion_projection.h
index ec4f8d0466b..1c0a667b025 100644
--- a/src/mongo/db/pipeline/parsed_inclusion_projection.h
+++ b/src/mongo/db/pipeline/parsed_inclusion_projection.h
@@ -62,7 +62,8 @@ public:
/**
* Serialize this projection.
*/
- void serialize(MutableDocument* output, bool explain) const;
+ void serialize(MutableDocument* output,
+ boost::optional<ExplainOptions::Verbosity> explain) const;
/**
* Adds dependencies of any fields that need to be included, or that are used by any
@@ -197,7 +198,7 @@ public:
/**
* Serialize the projection.
*/
- Document serialize(bool explain = false) const final {
+ Document serialize(boost::optional<ExplainOptions::Verbosity> explain) const final {
MutableDocument output;
if (_idExcluded) {
output.addField("_id", Value(false));
diff --git a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
index c2610418a40..a0e7398ef6f 100644
--- a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
@@ -141,8 +141,13 @@ TEST(InclusionProjection, ShouldSerializeToEquivalentProjection) {
"{_id: true, a: {$add: [\"$a\", {$const: 2}]}, b: {d: true}, x: {y: {$const: 4}}}"));
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
TEST(InclusionProjection, ShouldSerializeExplicitExclusionOfId) {
@@ -154,8 +159,13 @@ TEST(InclusionProjection, ShouldSerializeExplicitExclusionOfId) {
auto expectedSerialization = Document{{"_id", false}, {"a", true}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
@@ -169,8 +179,13 @@ TEST(InclusionProjection, ShouldOptimizeTopLevelExpressions) {
auto expectedSerialization = Document{{"_id", true}, {"a", Document{{"$const", 3}}}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
TEST(InclusionProjection, ShouldOptimizeNestedExpressions) {
@@ -184,8 +199,13 @@ TEST(InclusionProjection, ShouldOptimizeNestedExpressions) {
Document{{"_id", true}, {"a", Document{{"b", Document{{"$const", 3}}}}}};
// Should be the same if we're serializing for explain or for internal use.
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(false));
- ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(true));
+ ASSERT_DOCUMENT_EQ(expectedSerialization, inclusion.serialize(boost::none));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_DOCUMENT_EQ(expectedSerialization,
+ inclusion.serialize(ExplainOptions::Verbosity::kExecAllPlans));
}
TEST(InclusionProjection, ShouldReportThatAllExceptIncludedFieldsAreModified) {
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 5c13f710b40..027f4527aa0 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -320,10 +320,10 @@ boost::optional<Document> Pipeline::getNext() {
: boost::optional<Document>{nextResult.releaseDocument()};
}
-vector<Value> Pipeline::writeExplainOps() const {
+vector<Value> Pipeline::writeExplainOps(ExplainOptions::Verbosity verbosity) const {
vector<Value> array;
for (SourceContainer::const_iterator it = _sources.begin(); it != _sources.end(); ++it) {
- (*it)->serializeToArray(array, /*explain=*/true);
+ (*it)->serializeToArray(array, verbosity);
}
return array;
}
diff --git a/src/mongo/db/pipeline/pipeline.h b/src/mongo/db/pipeline/pipeline.h
index 3aceb6c28f0..4f15289efef 100644
--- a/src/mongo/db/pipeline/pipeline.h
+++ b/src/mongo/db/pipeline/pipeline.h
@@ -36,6 +36,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/pipeline/dependencies.h"
#include "mongo/db/pipeline/value.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/util/intrusive_counter.h"
#include "mongo/util/timer.h"
@@ -148,10 +149,10 @@ public:
boost::optional<Document> getNext();
/**
- * Write the pipeline's operators to a std::vector<Value>, with the
- * explain flag true (for DocumentSource::serializeToArray()).
+ * Write the pipeline's operators to a std::vector<Value>, providing the level of detail
+ * specified by 'verbosity'.
*/
- std::vector<Value> writeExplainOps() const;
+ std::vector<Value> writeExplainOps(ExplainOptions::Verbosity verbosity) const;
/**
* Returns the dependencies needed by this pipeline. 'metadataAvailable' should reflect what
diff --git a/src/mongo/db/pipeline/pipeline_test.cpp b/src/mongo/db/pipeline/pipeline_test.cpp
index 098371f5cd7..aed8f8d6889 100644
--- a/src/mongo/db/pipeline/pipeline_test.cpp
+++ b/src/mongo/db/pipeline/pipeline_test.cpp
@@ -97,8 +97,9 @@ public:
auto outputPipe = uassertStatusOK(Pipeline::parse(request.getPipeline(), ctx));
outputPipe->optimizePipeline();
- ASSERT_VALUE_EQ(Value(outputPipe->writeExplainOps()),
- Value(outputPipeExpected["pipeline"]));
+ ASSERT_VALUE_EQ(
+ Value(outputPipe->writeExplainOps(ExplainOptions::Verbosity::kQueryPlanner)),
+ Value(outputPipeExpected["pipeline"]));
ASSERT_VALUE_EQ(Value(outputPipe->serialize()), Value(serializePipeExpected["pipeline"]));
}
@@ -963,8 +964,10 @@ public:
shardPipe = mergePipe->splitForSharded();
ASSERT(shardPipe != nullptr);
- ASSERT_VALUE_EQ(Value(shardPipe->writeExplainOps()), Value(shardPipeExpected["pipeline"]));
- ASSERT_VALUE_EQ(Value(mergePipe->writeExplainOps()), Value(mergePipeExpected["pipeline"]));
+ ASSERT_VALUE_EQ(Value(shardPipe->writeExplainOps(ExplainOptions::Verbosity::kQueryPlanner)),
+ Value(shardPipeExpected["pipeline"]));
+ ASSERT_VALUE_EQ(Value(mergePipe->writeExplainOps(ExplainOptions::Verbosity::kQueryPlanner)),
+ Value(mergePipeExpected["pipeline"]));
}
virtual ~Base() {}
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 09253fa31e1..0a97fe2d7e9 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -96,18 +96,38 @@ env.Library(
source=[
"collation/collator_factory_icu_decoration.cpp",
"find_common.cpp",
- "explain_common.cpp",
"parsed_distinct.cpp",
],
LIBDEPS=[
"$BUILD_DIR/mongo/base",
"$BUILD_DIR/mongo/util/fail_point",
"collation/collator_icu",
+ "explain_options",
"query_planner",
"query_request",
],
)
+env.Library(
+ target="explain_options",
+ source=[
+ "explain_options.cpp",
+ ],
+ LIBDEPS=[
+ "$BUILD_DIR/mongo/base",
+ ],
+)
+
+env.CppUnitTest(
+ target="explain_options_test",
+ source=[
+ "explain_options_test.cpp",
+ ],
+ LIBDEPS=[
+ "explain_options",
+ ],
+)
+
env.CppUnitTest(
target="parsed_distinct_test",
source=[
diff --git a/src/mongo/db/query/count_request.cpp b/src/mongo/db/query/count_request.cpp
index 6abddd36ec1..3451cd7989d 100644
--- a/src/mongo/db/query/count_request.cpp
+++ b/src/mongo/db/query/count_request.cpp
@@ -171,10 +171,6 @@ StatusWith<BSONObj> CountRequest::asAggregationCommand() const {
pipelineBuilder.doneFast();
// Complete the command by appending the other options to count.
- if (_explain) {
- aggregationBuilder.append(kExplainField, _explain);
- }
-
if (_collation) {
aggregationBuilder.append(kCollationField, *_collation);
}
diff --git a/src/mongo/db/query/count_request_test.cpp b/src/mongo/db/query/count_request_test.cpp
index e2a40f87ccf..8508c75ead6 100644
--- a/src/mongo/db/query/count_request_test.cpp
+++ b/src/mongo/db/query/count_request_test.cpp
@@ -271,15 +271,17 @@ TEST(CountRequest, ConvertToAggregationWithQueryAndFilterAndLimit) {
SimpleBSONObjComparator::kInstance.makeEqualTo()));
}
-TEST(CountRequest, ConvertToAggregationWithExplain) {
+TEST(CountRequest, ConvertToAggregationOmitsExplain) {
CountRequest countRequest(testns, BSONObj());
countRequest.setExplain(true);
auto agg = countRequest.asAggregationCommand();
ASSERT_OK(agg);
+ ASSERT_FALSE(agg.getValue().hasField("explain"));
+
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(ar.getValue().isExplain());
+ ASSERT_FALSE(ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 7d83021dbd5..eb43caf5047 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -246,7 +246,7 @@ using mongoutils::str::stream;
// static
void Explain::statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* bob,
BSONObjBuilder* topLevelBob) {
invariant(bob);
@@ -268,7 +268,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
}
// Some top-level exec stats get pulled out of the root stage.
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nReturned", stats.common.advanced);
bob->appendNumber("executionTimeMillisEstimate", stats.common.executionTimeMillis);
bob->appendNumber("works", stats.common.works);
@@ -285,7 +285,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
if (STAGE_AND_HASH == stats.stageType) {
AndHashStats* spec = static_cast<AndHashStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("memUsage", spec->memUsage);
bob->appendNumber("memLimit", spec->memLimit);
@@ -299,7 +299,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_AND_SORTED == stats.stageType) {
AndSortedStats* spec = static_cast<AndSortedStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("flagged", spec->flagged);
for (size_t i = 0; i < spec->failedAnd.size(); ++i) {
bob->appendNumber(string(stream() << "failedAnd_" << i), spec->failedAnd[i]);
@@ -308,20 +308,20 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_COLLSCAN == stats.stageType) {
CollectionScanStats* spec = static_cast<CollectionScanStats*>(stats.specific.get());
bob->append("direction", spec->direction > 0 ? "forward" : "backward");
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("docsExamined", spec->docsTested);
}
} else if (STAGE_COUNT == stats.stageType) {
CountStats* spec = static_cast<CountStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nCounted", spec->nCounted);
bob->appendNumber("nSkipped", spec->nSkipped);
}
} else if (STAGE_COUNT_SCAN == stats.stageType) {
CountScanStats* spec = static_cast<CountScanStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("keysExamined", spec->keysExamined);
}
@@ -348,7 +348,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_DELETE == stats.stageType) {
DeleteStats* spec = static_cast<DeleteStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nWouldDelete", spec->docsDeleted);
bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
}
@@ -376,18 +376,18 @@ void Explain::statsToBSON(const PlanStageStats& stats,
bob->append("indexBounds", spec->indexBounds);
}
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("keysExamined", spec->keysExamined);
}
} else if (STAGE_ENSURE_SORTED == stats.stageType) {
EnsureSortedStats* spec = static_cast<EnsureSortedStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nDropped", spec->nDropped);
}
} else if (STAGE_FETCH == stats.stageType) {
FetchStats* spec = static_cast<FetchStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("docsExamined", spec->docsExamined);
bob->appendNumber("alreadyHasObj", spec->alreadyHasObj);
}
@@ -398,7 +398,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
bob->append("indexName", spec->indexName);
bob->append("indexVersion", spec->indexVersion);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
BSONArrayBuilder intervalsBob(bob->subarrayStart("searchIntervals"));
for (vector<IntervalStats>::const_iterator it = spec->intervalStats.begin();
it != spec->intervalStats.end();
@@ -414,12 +414,12 @@ void Explain::statsToBSON(const PlanStageStats& stats,
}
} else if (STAGE_GROUP == stats.stageType) {
GroupStats* spec = static_cast<GroupStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nGroups", spec->nGroups);
}
} else if (STAGE_IDHACK == stats.stageType) {
IDHackStats* spec = static_cast<IDHackStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("keysExamined", spec->keysExamined);
bob->appendNumber("docsExamined", spec->docsExamined);
}
@@ -447,7 +447,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
bob->append("indexBounds", spec->indexBounds);
}
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("keysExamined", spec->keysExamined);
bob->appendNumber("seeks", spec->seeks);
bob->appendNumber("dupsTested", spec->dupsTested);
@@ -457,7 +457,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_OR == stats.stageType) {
OrStats* spec = static_cast<OrStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("dupsTested", spec->dupsTested);
bob->appendNumber("dupsDropped", spec->dupsDropped);
bob->appendNumber("recordIdsForgotten", spec->recordIdsForgotten);
@@ -471,7 +471,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_SHARDING_FILTER == stats.stageType) {
ShardingFilterStats* spec = static_cast<ShardingFilterStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("chunkSkips", spec->chunkSkips);
}
} else if (STAGE_SKIP == stats.stageType) {
@@ -481,7 +481,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
SortStats* spec = static_cast<SortStats*>(stats.specific.get());
bob->append("sortPattern", spec->sortPattern);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("memUsage", spec->memUsage);
bob->appendNumber("memLimit", spec->memLimit);
}
@@ -493,7 +493,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
MergeSortStats* spec = static_cast<MergeSortStats*>(stats.specific.get());
bob->append("sortPattern", spec->sortPattern);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("dupsTested", spec->dupsTested);
bob->appendNumber("dupsDropped", spec->dupsDropped);
}
@@ -507,19 +507,19 @@ void Explain::statsToBSON(const PlanStageStats& stats,
} else if (STAGE_TEXT_MATCH == stats.stageType) {
TextMatchStats* spec = static_cast<TextMatchStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("docsRejected", spec->docsRejected);
}
} else if (STAGE_TEXT_OR == stats.stageType) {
TextOrStats* spec = static_cast<TextOrStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("docsExamined", spec->fetches);
}
} else if (STAGE_UPDATE == stats.stageType) {
UpdateStats* spec = static_cast<UpdateStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nMatched", spec->nMatched);
bob->appendNumber("nWouldModify", spec->nModified);
bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
@@ -555,7 +555,7 @@ void Explain::statsToBSON(const PlanStageStats& stats,
}
// static
-BSONObj Explain::statsToBSON(const PlanStageStats& stats, ExplainCommon::Verbosity verbosity) {
+BSONObj Explain::statsToBSON(const PlanStageStats& stats, ExplainOptions::Verbosity verbosity) {
BSONObjBuilder bob;
statsToBSON(stats, &bob, verbosity);
return bob.obj();
@@ -564,7 +564,7 @@ BSONObj Explain::statsToBSON(const PlanStageStats& stats, ExplainCommon::Verbosi
// static
void Explain::statsToBSON(const PlanStageStats& stats,
BSONObjBuilder* bob,
- ExplainCommon::Verbosity verbosity) {
+ ExplainOptions::Verbosity verbosity) {
statsToBSON(stats, verbosity, bob, bob);
}
@@ -582,7 +582,7 @@ void Explain::getWinningPlanStats(const PlanExecutor* exec, BSONObjBuilder* bob)
mps ? std::move(mps->getStats()->children[mps->bestPlanIdx()])
: std::move(exec->getRootStage()->getStats()));
- statsToBSON(*winningStats, ExplainCommon::EXEC_STATS, bob, bob);
+ statsToBSON(*winningStats, ExplainOptions::Verbosity::kExecStats, bob, bob);
}
// static
@@ -627,14 +627,14 @@ void Explain::generatePlannerInfo(PlanExecutor* exec,
}
BSONObjBuilder winningPlanBob(plannerBob.subobjStart("winningPlan"));
- statsToBSON(*winnerStats, &winningPlanBob, ExplainCommon::QUERY_PLANNER);
+ statsToBSON(*winnerStats, &winningPlanBob, ExplainOptions::Verbosity::kQueryPlanner);
winningPlanBob.doneFast();
// Genenerate array of rejected plans.
BSONArrayBuilder allPlansBob(plannerBob.subarrayStart("rejectedPlans"));
for (size_t i = 0; i < rejectedStats.size(); i++) {
BSONObjBuilder childBob(allPlansBob.subobjStart());
- statsToBSON(*rejectedStats[i], &childBob, ExplainCommon::QUERY_PLANNER);
+ statsToBSON(*rejectedStats[i], &childBob, ExplainOptions::Verbosity::kQueryPlanner);
}
allPlansBob.doneFast();
@@ -643,7 +643,7 @@ void Explain::generatePlannerInfo(PlanExecutor* exec,
// static
void Explain::generateExecStats(PlanStageStats* stats,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out,
boost::optional<long long> totalTimeMillis) {
out->appendNumber("nReturned", stats->common.advanced);
@@ -693,7 +693,7 @@ void Explain::generateServerInfo(BSONObjBuilder* out) {
// static
void Explain::explainStages(PlanExecutor* exec,
const Collection* collection,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out) {
//
// Collect plan stats, running the plan if necessary. The stats also give the structure of the
@@ -707,7 +707,7 @@ void Explain::explainStages(PlanExecutor* exec,
// Get stats of the winning plan from the trial period, if the verbosity level is high enough
// and there was a runoff between multiple plans.
unique_ptr<PlanStageStats> winningStatsTrial;
- if (verbosity >= ExplainCommon::EXEC_ALL_PLANS && mps) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecAllPlans && mps) {
winningStatsTrial = std::move(mps->getStats()->children[mps->bestPlanIdx()]);
invariant(winningStatsTrial.get());
}
@@ -726,7 +726,7 @@ void Explain::explainStages(PlanExecutor* exec,
// If we need execution stats, then run the plan in order to gather the stats.
Status executePlanStatus = Status::OK();
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
executePlanStatus = exec->executePlan();
}
@@ -746,11 +746,11 @@ void Explain::explainStages(PlanExecutor* exec,
// Use the stats trees to produce explain BSON.
//
- if (verbosity >= ExplainCommon::QUERY_PLANNER) {
+ if (verbosity >= ExplainOptions::Verbosity::kQueryPlanner) {
generatePlannerInfo(exec, collection, winningStats.get(), allPlansStats, out);
}
- if (verbosity >= ExplainCommon::EXEC_STATS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
BSONObjBuilder execBob(out->subobjStart("executionStats"));
// If there is an execution error while running the query, the error is reported under
@@ -768,7 +768,7 @@ void Explain::explainStages(PlanExecutor* exec,
// Also generate exec stats for all plans, if the verbosity level is high enough.
// These stats reflect what happened during the trial period that ranked the plans.
- if (verbosity >= ExplainCommon::EXEC_ALL_PLANS) {
+ if (verbosity >= ExplainOptions::Verbosity::kExecAllPlans) {
// If we ranked multiple plans against each other, then add stats collected
// from the trial period of the winning plan. The "allPlansExecution" section
// will contain an apples-to-apples comparison of the winning plan's stats against
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index fb9b8d56c2a..8202cbfbeec 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -31,7 +31,7 @@
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/query/explain_common.h"
+#include "mongo/db/query/explain_options.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_solution.h"
@@ -64,7 +64,7 @@ public:
*/
static void explainStages(PlanExecutor* exec,
const Collection* collection,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out);
/**
@@ -85,8 +85,9 @@ public:
* Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
* to execution stats verbosity.
*/
- static BSONObj statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
+ static BSONObj statsToBSON(
+ const PlanStageStats& stats,
+ ExplainOptions::Verbosity verbosity = ExplainOptions::Verbosity::kExecStats);
/**
* This version of stats tree to BSON conversion returns the result through the
@@ -95,9 +96,10 @@ public:
* Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
* to execution stats verbosity.
*/
- static void statsToBSON(const PlanStageStats& stats,
- BSONObjBuilder* bob,
- ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
+ static void statsToBSON(
+ const PlanStageStats& stats,
+ BSONObjBuilder* bob,
+ ExplainOptions::Verbosity verbosity = ExplainOptions::Verbosity::kExecStats);
/**
* Returns a short plan summary std::string describing the leaves of the query plan.
@@ -128,7 +130,7 @@ private:
* Not used except as a helper to the public statsToBSON(...) functions.
*/
static void statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* bob,
BSONObjBuilder* topLevelBob);
@@ -164,7 +166,7 @@ private:
* This is a helper for generating explain BSON. It is used by explainStages(...).
*/
static void generateExecStats(PlanStageStats* stats,
- ExplainCommon::Verbosity verbosity,
+ ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out,
boost::optional<long long> totalTimeMillis);
diff --git a/src/mongo/db/query/explain_common.cpp b/src/mongo/db/query/explain_common.cpp
deleted file mode 100644
index 1f049de6cec..00000000000
--- a/src/mongo/db/query/explain_common.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Copyright (C) 2013-2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/query/explain_common.h"
-
-#include "mongo/util/mongoutils/str.h"
-
-namespace mongo {
-
-// static
-const char* ExplainCommon::verbosityString(ExplainCommon::Verbosity verbosity) {
- switch (verbosity) {
- case QUERY_PLANNER:
- return "queryPlanner";
- case EXEC_STATS:
- return "executionStats";
- case EXEC_ALL_PLANS:
- return "allPlansExecution";
- default:
- invariant(0);
- return "unknown";
- }
-}
-
-// static
-Status ExplainCommon::parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity) {
- if (Object != cmdObj.firstElement().type()) {
- return Status(ErrorCodes::BadValue, "explain command requires a nested object");
- }
-
- *verbosity = ExplainCommon::EXEC_ALL_PLANS;
- if (!cmdObj["verbosity"].eoo()) {
- const char* verbStr = cmdObj["verbosity"].valuestrsafe();
- if (mongoutils::str::equals(verbStr, "queryPlanner")) {
- *verbosity = ExplainCommon::QUERY_PLANNER;
- } else if (mongoutils::str::equals(verbStr, "executionStats")) {
- *verbosity = ExplainCommon::EXEC_STATS;
- } else if (!mongoutils::str::equals(verbStr, "allPlansExecution")) {
- return Status(ErrorCodes::BadValue,
- "verbosity string must be one of "
- "{'queryPlanner', 'executionStats', 'allPlansExecution'}");
- }
- }
-
- return Status::OK();
-}
-
-} // namespace mongo
diff --git a/src/mongo/db/query/explain_common.h b/src/mongo/db/query/explain_common.h
deleted file mode 100644
index 750f207a2a5..00000000000
--- a/src/mongo/db/query/explain_common.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Copyright (C) 2013-2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/base/status.h"
-#include "mongo/bson/bsonobj.h"
-
-namespace mongo {
-
-/**
- * Utilities used for explain implementations on both mongod and mongos.
- */
-class ExplainCommon {
-public:
- /**
- * The various supported verbosity levels for explain. The order is
- * significant: the enum values are assigned in order of increasing verbosity.
- */
- enum Verbosity {
- // At all verbosities greater than or equal to QUERY_PLANNER, we display information
- // about the plan selected and alternate rejected plans. Does not include any execution-
- // related info. String alias is "queryPlanner".
- QUERY_PLANNER = 0,
-
- // At all verbosities greater than or equal to EXEC_STATS, we display a section of
- // output containing both overall execution stats, and stats per stage in the
- // execution tree. String alias is "execStats".
- EXEC_STATS = 1,
-
- // At this verbosity level, we generate the execution stats for each rejected plan as
- // well as the winning plan. String alias is "allPlansExecution".
- EXEC_ALL_PLANS = 2,
- };
-
- /**
- * Converts an explain verbosity to its string representation.
- */
- static const char* verbosityString(ExplainCommon::Verbosity verbosity);
-
- /**
- * Does some basic validation of the command BSON, and retrieves the explain verbosity.
- *
- * Returns a non-OK status if parsing fails.
- *
- * On success, populates "verbosity".
- */
- static Status parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity);
-};
-
-} // namespace
diff --git a/src/mongo/db/query/explain_options.cpp b/src/mongo/db/query/explain_options.cpp
new file mode 100644
index 00000000000..e81df690250
--- /dev/null
+++ b/src/mongo/db/query/explain_options.cpp
@@ -0,0 +1,93 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/query/explain_options.h"
+
+#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/util/assert_util.h"
+
+namespace mongo {
+
+constexpr StringData ExplainOptions::kVerbosityName;
+constexpr StringData ExplainOptions::kQueryPlannerVerbosityStr;
+constexpr StringData ExplainOptions::kExecStatsVerbosityStr;
+constexpr StringData ExplainOptions::kAllPlansExecutionVerbosityStr;
+
+StringData ExplainOptions::verbosityString(ExplainOptions::Verbosity verbosity) {
+ switch (verbosity) {
+ case Verbosity::kQueryPlanner:
+ return kQueryPlannerVerbosityStr;
+ case Verbosity::kExecStats:
+ return kExecStatsVerbosityStr;
+ case Verbosity::kExecAllPlans:
+ return kAllPlansExecutionVerbosityStr;
+ default:
+ MONGO_UNREACHABLE;
+ }
+}
+
+StatusWith<ExplainOptions::Verbosity> ExplainOptions::parseCmdBSON(const BSONObj& cmdObj) {
+ if (BSONType::Object != cmdObj.firstElement().type()) {
+ return Status(ErrorCodes::FailedToParse, "explain command requires a nested object");
+ }
+
+ auto verbosity = Verbosity::kExecAllPlans;
+ if (auto verbosityElt = cmdObj[kVerbosityName]) {
+ if (verbosityElt.type() != BSONType::String) {
+ return Status(ErrorCodes::FailedToParse, "explain verbosity must be a string");
+ }
+
+ auto verbStr = verbosityElt.valueStringData();
+ if (verbStr == kQueryPlannerVerbosityStr) {
+ verbosity = Verbosity::kQueryPlanner;
+ } else if (verbStr == kExecStatsVerbosityStr) {
+ verbosity = Verbosity::kExecStats;
+ } else if (verbStr != kAllPlansExecutionVerbosityStr) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "verbosity string must be one of {'"
+ << kQueryPlannerVerbosityStr
+ << "', '"
+ << kExecStatsVerbosityStr
+ << "', '"
+ << kAllPlansExecutionVerbosityStr
+ << "'}");
+ }
+ }
+
+ return verbosity;
+}
+
+BSONObj ExplainOptions::toBSON(ExplainOptions::Verbosity verbosity) {
+ BSONObjBuilder explainOptionsBuilder;
+ explainOptionsBuilder.append(kVerbosityName, StringData(verbosityString(verbosity)));
+ return explainOptionsBuilder.obj();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/explain_options.h b/src/mongo/db/query/explain_options.h
new file mode 100644
index 00000000000..97a3c85ea65
--- /dev/null
+++ b/src/mongo/db/query/explain_options.h
@@ -0,0 +1,89 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/base/status.h"
+#include "mongo/bson/bsonobj.h"
+
+namespace mongo {
+
+/**
+ * Represents options passed to the explain command (aside from the command which is being explained
+ * and its parameters).
+ */
+class ExplainOptions {
+public:
+ /**
+ * The various supported verbosity levels for explain. The order is significant: the enum values
+ * are assigned in order of increasing verbosity.
+ */
+ enum class Verbosity {
+ // At all verbosities greater than or equal to QUERY_PLANNER, we display information about
+ // the plan selected and alternate rejected plans. Does not include any execution- related
+ // info. String alias is "queryPlanner".
+ kQueryPlanner = 0,
+
+ // At all verbosities greater than or equal to EXEC_STATS, we display a section of output
+ // containing both overall execution stats, and stats per stage in the execution tree.
+ // String alias is "execStats".
+ kExecStats = 1,
+
+ // At this verbosity level, we generate the execution stats for each rejected plan as well
+ // as the winning plan. String alias is "allPlansExecution".
+ kExecAllPlans = 2,
+ };
+
+ static constexpr StringData kVerbosityName = "verbosity"_sd;
+
+ // String representations for verbosity levels.
+ static constexpr StringData kQueryPlannerVerbosityStr = "queryPlanner"_sd;
+ static constexpr StringData kExecStatsVerbosityStr = "executionStats"_sd;
+ static constexpr StringData kAllPlansExecutionVerbosityStr = "allPlansExecution"_sd;
+
+ /**
+ * Converts an explain verbosity to its string representation.
+ */
+ static StringData verbosityString(ExplainOptions::Verbosity verbosity);
+
+ /**
+ * Does some basic validation of the command BSON, then extracts and returns the explain
+ * verbosity.
+ *
+ * Returns a non-OK status if parsing fails.
+ */
+ static StatusWith<ExplainOptions::Verbosity> parseCmdBSON(const BSONObj& cmdObj);
+
+ /**
+ * Converts 'verbosity' to its corresponding representation as a BSONObj containing explain
+ * command parameters.
+ */
+ static BSONObj toBSON(ExplainOptions::Verbosity verbosity);
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/query/explain_options_test.cpp b/src/mongo/db/query/explain_options_test.cpp
new file mode 100644
index 00000000000..c1a2a606263
--- /dev/null
+++ b/src/mongo/db/query/explain_options_test.cpp
@@ -0,0 +1,95 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/query/explain_options.h"
+
+#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/json.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+
+TEST(ExplainOptionsTest, VerbosityEnumToStringReturnsCorrectValues) {
+ ASSERT_EQ(ExplainOptions::verbosityString(ExplainOptions::Verbosity::kQueryPlanner),
+ "queryPlanner"_sd);
+ ASSERT_EQ(ExplainOptions::verbosityString(ExplainOptions::Verbosity::kExecStats),
+ "executionStats"_sd);
+ ASSERT_EQ(ExplainOptions::verbosityString(ExplainOptions::Verbosity::kExecAllPlans),
+ "allPlansExecution"_sd);
+}
+
+TEST(ExplainOptionsTest, ExplainOptionsSerializeToBSONCorrectly) {
+ ASSERT_BSONOBJ_EQ(BSON("verbosity"
+ << "queryPlanner"),
+ ExplainOptions::toBSON(ExplainOptions::Verbosity::kQueryPlanner));
+ ASSERT_BSONOBJ_EQ(BSON("verbosity"
+ << "executionStats"),
+ ExplainOptions::toBSON(ExplainOptions::Verbosity::kExecStats));
+ ASSERT_BSONOBJ_EQ(BSON("verbosity"
+ << "allPlansExecution"),
+ ExplainOptions::toBSON(ExplainOptions::Verbosity::kExecAllPlans));
+}
+
+TEST(ExplainOptionsTest, CanParseExplainVerbosity) {
+ auto verbosity = unittest::assertGet(
+ ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 'queryPlanner'}")));
+ ASSERT(verbosity == ExplainOptions::Verbosity::kQueryPlanner);
+ verbosity = unittest::assertGet(
+ ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 'executionStats'}")));
+ ASSERT(verbosity == ExplainOptions::Verbosity::kExecStats);
+ verbosity = unittest::assertGet(
+ ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 'allPlansExecution'}")));
+ ASSERT(verbosity == ExplainOptions::Verbosity::kExecAllPlans);
+}
+
+TEST(ExplainOptionsTest, ParsingFailsIfVerbosityIsNotAString) {
+ ASSERT_EQ(ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 1}")).getStatus(),
+ ErrorCodes::FailedToParse);
+ ASSERT_EQ(ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: {foo: 'bar'}}"))
+ .getStatus(),
+ ErrorCodes::FailedToParse);
+}
+
+TEST(ExplainOptionsTest, ParsingFailsIfVerbosityStringIsNotRecognized) {
+ ASSERT_EQ(ExplainOptions::parseCmdBSON(fromjson("{explain: {}, verbosity: 'badVerbosity'}"))
+ .getStatus(),
+ ErrorCodes::FailedToParse);
+}
+
+TEST(ExplainOptionsTest, ParsingFailsIfFirstElementIsNotAnObject) {
+ ASSERT_EQ(ExplainOptions::parseCmdBSON(fromjson("{explain: 1, verbosity: 'queryPlanner'}"))
+ .getStatus(),
+ ErrorCodes::FailedToParse);
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index d8c9feb8fde..9f709bd8485 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -556,7 +556,8 @@ std::string runQuery(OperationContext* opCtx,
bb.skip(sizeof(QueryResult::Value));
BSONObjBuilder explainBob;
- Explain::explainStages(exec.get(), collection, ExplainCommon::EXEC_ALL_PLANS, &explainBob);
+ Explain::explainStages(
+ exec.get(), collection, ExplainOptions::Verbosity::kExecAllPlans, &explainBob);
// Add the resulting object to the return buffer.
BSONObj explainObj = explainBob.obj();
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index 8bd80ff405e..3c3a75c62cd 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -86,9 +86,6 @@ StatusWith<BSONObj> ParsedDistinct::asAggregationCommand() const {
groupStageBuilder.doneFast();
pipelineBuilder.doneFast();
- if (_query->getQueryRequest().isExplain()) {
- aggregationBuilder.append("explain", true);
- }
aggregationBuilder.append(kCollationField, qr.getCollation());
// Specify the 'cursor' option so that aggregation uses the cursor interface.
diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp
index d53457e500e..3a4f6ccb42e 100644
--- a/src/mongo/db/query/parsed_distinct_test.cpp
+++ b/src/mongo/db/query/parsed_distinct_test.cpp
@@ -60,7 +60,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -96,7 +96,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -116,7 +116,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
SimpleBSONObjComparator::kInstance.makeEqualTo()));
}
-TEST(ParsedDistinctTest, ConvertToAggregationWithExplain) {
+TEST(ParsedDistinctTest, ExplainNotIncludedWhenConvertingToAggregationCommand) {
QueryTestServiceContext serviceContext;
auto uniqueTxn = serviceContext.makeOperationContext();
OperationContext* opCtx = uniqueTxn.get();
@@ -131,9 +131,11 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithExplain) {
auto agg = pd.getValue().asAggregationCommand();
ASSERT_OK(agg);
+ ASSERT_FALSE(agg.getValue().hasField("explain"));
+
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index b58a0665868..dc8188ab96a 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -1058,9 +1058,6 @@ StatusWith<BSONObj> QueryRequest::asAggregationCommand() const {
// Other options.
aggregationBuilder.append("collation", _collation);
- if (_explain) {
- aggregationBuilder.append("explain", _explain);
- }
if (_maxTimeMS > 0) {
aggregationBuilder.append(cmdOptionMaxTimeMS, _maxTimeMS);
}
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 66a03bc8285..8906d5c2a7d 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1102,14 +1102,14 @@ TEST(QueryRequestTest, ConvertToAggregationSucceeds) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT(ar.getValue().getPipeline().empty());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
}
-TEST(QueryRequestTest, ConvertToAggregationWithExplainSucceeds) {
+TEST(QueryRequestTest, ConvertToAggregationOmitsExplain) {
QueryRequest qr(testns);
qr.setExplain(true);
auto agg = qr.asAggregationCommand();
@@ -1117,7 +1117,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithExplainSucceeds) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(ar.getValue().isExplain());
+ ASSERT_FALSE(ar.getValue().getExplain());
ASSERT(ar.getValue().getPipeline().empty());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -1245,7 +1245,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithPipeline) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -1270,7 +1270,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithBatchSize) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_EQ(ar.getValue().getBatchSize(), 4LL);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -1288,7 +1288,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithMaxTimeMS) {
auto ar = AggregationRequest::parseFromBSON(testns, cmdObj);
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -1302,7 +1302,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithCollationSucceeds) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(!ar.getValue().isExplain());
+ ASSERT(!ar.getValue().getExplain());
ASSERT(ar.getValue().getPipeline().empty());
ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
diff --git a/src/mongo/db/views/resolved_view.cpp b/src/mongo/db/views/resolved_view.cpp
index 5fcdecc857e..9012e5489e4 100644
--- a/src/mongo/db/views/resolved_view.cpp
+++ b/src/mongo/db/views/resolved_view.cpp
@@ -65,54 +65,30 @@ ResolvedView ResolvedView::fromBSON(BSONObj commandResponseObj) {
return {ResolvedView(NamespaceString(viewDef["ns"].valueStringData()), pipeline)};
}
-StatusWith<BSONObj> ResolvedView::asExpandedViewAggregation(
+AggregationRequest ResolvedView::asExpandedViewAggregation(
const AggregationRequest& request) const {
- BSONObjBuilder aggregationBuilder;
- // Perform the aggregation on the resolved namespace.
- aggregationBuilder.append("aggregate", _namespace.coll());
-
- // The new pipeline consists of two parts: first, 'pipeline' in this ResolvedView;
- // then, the pipeline in 'request'.
- BSONArrayBuilder pipelineBuilder(aggregationBuilder.subarrayStart("pipeline"));
- for (auto&& item : _pipeline) {
- pipelineBuilder.append(item);
- }
-
- for (auto&& item : request.getPipeline()) {
- pipelineBuilder.append(item);
- }
- pipelineBuilder.doneFast();
-
- if (request.isExplain()) {
- aggregationBuilder.append("explain", true);
+ // Perform the aggregation on the resolved namespace. The new pipeline consists of two parts:
+ // first, 'pipeline' in this ResolvedView; then, the pipeline in 'request'.
+ std::vector<BSONObj> resolvedPipeline;
+ resolvedPipeline.reserve(_pipeline.size() + request.getPipeline().size());
+ resolvedPipeline.insert(resolvedPipeline.end(), _pipeline.begin(), _pipeline.end());
+ resolvedPipeline.insert(
+ resolvedPipeline.end(), request.getPipeline().begin(), request.getPipeline().end());
+
+ AggregationRequest expandedRequest{_namespace, resolvedPipeline};
+
+ if (request.getExplain()) {
+ expandedRequest.setExplain(request.getExplain());
} else {
- BSONObjBuilder batchSizeBuilder(aggregationBuilder.subobjStart("cursor"));
- batchSizeBuilder.append(AggregationRequest::kBatchSizeName, request.getBatchSize());
- batchSizeBuilder.doneFast();
+ expandedRequest.setBatchSize(request.getBatchSize());
}
- if (!request.getHint().isEmpty()) {
- aggregationBuilder.append(AggregationRequest::kHintName, request.getHint());
- }
-
- if (request.shouldBypassDocumentValidation()) {
- aggregationBuilder.append("bypassDocumentValidation", true);
- }
-
- if (request.shouldAllowDiskUse()) {
- aggregationBuilder.append("allowDiskUse", true);
- }
-
- return aggregationBuilder.obj();
-}
-
-StatusWith<BSONObj> ResolvedView::asExpandedViewAggregation(const BSONObj& aggCommand) const {
- auto aggRequest = AggregationRequest::parseFromBSON(_namespace, aggCommand);
- if (!aggRequest.isOK()) {
- return aggRequest.getStatus();
- }
+ expandedRequest.setHint(request.getHint());
+ expandedRequest.setBypassDocumentValidation(request.shouldBypassDocumentValidation());
+ expandedRequest.setAllowDiskUse(request.shouldAllowDiskUse());
+ expandedRequest.setCollation(request.getCollation());
- return asExpandedViewAggregation(aggRequest.getValue());
+ return expandedRequest;
}
} // namespace mongo
diff --git a/src/mongo/db/views/resolved_view.h b/src/mongo/db/views/resolved_view.h
index a5016ffc4b3..ec85b7b3254 100644
--- a/src/mongo/db/views/resolved_view.h
+++ b/src/mongo/db/views/resolved_view.h
@@ -33,11 +33,10 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/pipeline/aggregation_request.h"
namespace mongo {
-class AggregationRequest;
-
/**
* Represents a resolved definition, composed of a base collection namespace and a pipeline
* built from one or more views.
@@ -56,11 +55,10 @@ public:
static ResolvedView fromBSON(BSONObj commandResponseObj);
/**
- * Convert an aggregation command on a view to the equivalent command against the views
+ * Convert an aggregation command on a view to the equivalent command against the view's
* underlying collection.
*/
- StatusWith<BSONObj> asExpandedViewAggregation(const BSONObj& aggCommand) const;
- StatusWith<BSONObj> asExpandedViewAggregation(const AggregationRequest& aggRequest) const;
+ AggregationRequest asExpandedViewAggregation(const AggregationRequest& aggRequest) const;
const NamespaceString& getNamespace() const {
return _namespace;
diff --git a/src/mongo/db/views/resolved_view_test.cpp b/src/mongo/db/views/resolved_view_test.cpp
index 3f991868ccb..e25c5b263b7 100644
--- a/src/mongo/db/views/resolved_view_test.cpp
+++ b/src/mongo/db/views/resolved_view_test.cpp
@@ -35,6 +35,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/pipeline/aggregation_request.h"
+#include "mongo/db/pipeline/document.h"
#include "mongo/db/views/resolved_view.h"
#include "mongo/unittest/unittest.h"
@@ -47,99 +48,51 @@ const std::vector<BSONObj> emptyPipeline;
const BSONObj kDefaultCursorOptionDocument =
BSON(AggregationRequest::kBatchSizeName << AggregationRequest::kDefaultBatchSize);
-TEST(ResolvedViewTest, ExpandingCmdObjWithEmptyPipelineOnNoOpViewYieldsEmptyPipeline) {
- const ResolvedView resolvedView{backingNss, emptyPipeline};
- BSONObj cmdObj =
- BSON("aggregate" << viewNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj());
-
- auto result = resolvedView.asExpandedViewAggregation(cmdObj);
- ASSERT_OK(result.getStatus());
-
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
- << kDefaultCursorOptionDocument);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
-}
-
-TEST(ResolvedViewTest, ExpandingCmdObjWithNonemptyPipelineAppendsToViewPipeline) {
- std::vector<BSONObj> viewPipeline{BSON("skip" << 7)};
- const ResolvedView resolvedView{backingNss, viewPipeline};
- BSONObj cmdObj = BSON(
- "aggregate" << viewNss.coll() << "pipeline" << BSON_ARRAY(BSON("limit" << 3)) << "cursor"
- << BSONObj());
-
- auto result = resolvedView.asExpandedViewAggregation(cmdObj);
- ASSERT_OK(result.getStatus());
-
- BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline"
- << BSON_ARRAY(BSON("skip" << 7) << BSON("limit" << 3))
- << "cursor"
- << kDefaultCursorOptionDocument);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
-}
-
-TEST(ResolvedViewTest, ExpandingCmdObjFailsIfCmdObjIsNotAValidAggregationCommand) {
- const ResolvedView resolvedView{backingNss, emptyPipeline};
- BSONObj badCmdObj = BSON("invalid" << 0);
- ASSERT_NOT_OK(resolvedView.asExpandedViewAggregation(badCmdObj).getStatus());
-}
-
TEST(ResolvedViewTest, ExpandingAggRequestWithEmptyPipelineOnNoOpViewYieldsEmptyPipeline) {
const ResolvedView resolvedView{backingNss, emptyPipeline};
- AggregationRequest aggRequest(viewNss, {});
-
- auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
+ AggregationRequest requestOnView{viewNss, emptyPipeline};
+ auto result = resolvedView.asExpandedViewAggregation(requestOnView);
BSONObj expected =
BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
<< kDefaultCursorOptionDocument);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
TEST(ResolvedViewTest, ExpandingAggRequestWithNonemptyPipelineAppendsToViewPipeline) {
std::vector<BSONObj> viewPipeline{BSON("skip" << 7)};
const ResolvedView resolvedView{backingNss, viewPipeline};
+ AggregationRequest requestOnView{viewNss, std::vector<BSONObj>{BSON("limit" << 3)}};
- std::vector<BSONObj> userAggregationPipeline = {BSON("limit" << 3)};
- AggregationRequest aggRequest(viewNss, userAggregationPipeline);
-
- auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
+ auto result = resolvedView.asExpandedViewAggregation(requestOnView);
BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline"
<< BSON_ARRAY(BSON("skip" << 7) << BSON("limit" << 3))
<< "cursor"
<< kDefaultCursorOptionDocument);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
TEST(ResolvedViewTest, ExpandingAggRequestPreservesExplain) {
const ResolvedView resolvedView{backingNss, emptyPipeline};
- AggregationRequest aggRequest(viewNss, {});
- aggRequest.setExplain(true);
+ AggregationRequest aggRequest{viewNss, {}};
+ aggRequest.setExplain(ExplainOptions::Verbosity::kExecStats);
auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
-
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "explain" << true);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ ASSERT(result.getExplain());
+ ASSERT(*result.getExplain() == ExplainOptions::Verbosity::kExecStats);
}
TEST(ResolvedViewTest, ExpandingAggRequestWithCursorAndExplainOnlyPreservesExplain) {
const ResolvedView resolvedView{backingNss, emptyPipeline};
- BSONObj cmdObj =
- BSON("aggregate" << viewNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj()
- << "explain"
- << true);
-
- auto result = resolvedView.asExpandedViewAggregation(cmdObj);
- ASSERT_OK(result.getStatus());
+ AggregationRequest aggRequest{viewNss, {}};
+ aggRequest.setBatchSize(10);
+ aggRequest.setExplain(ExplainOptions::Verbosity::kExecStats);
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "explain" << true);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ auto result = resolvedView.asExpandedViewAggregation(aggRequest);
+ ASSERT(result.getExplain());
+ ASSERT(*result.getExplain() == ExplainOptions::Verbosity::kExecStats);
+ ASSERT_EQ(result.getBatchSize(), AggregationRequest::kDefaultBatchSize);
}
TEST(ResolvedViewTest, ExpandingAggRequestPreservesBypassDocumentValidation) {
@@ -148,14 +101,7 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesBypassDocumentValidation) {
aggRequest.setBypassDocumentValidation(true);
auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
-
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
- << kDefaultCursorOptionDocument
- << "bypassDocumentValidation"
- << true);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+ ASSERT_TRUE(result.shouldBypassDocumentValidation());
}
TEST(ResolvedViewTest, ExpandingAggRequestPreservesAllowDiskUse) {
@@ -164,14 +110,19 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesAllowDiskUse) {
aggRequest.setAllowDiskUse(true);
auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_OK(result.getStatus());
+ ASSERT_TRUE(result.shouldAllowDiskUse());
+}
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
- << kDefaultCursorOptionDocument
- << "allowDiskUse"
- << true);
- ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+TEST(ResolvedViewTest, ExpandingAggRequestPreservesCollation) {
+ const ResolvedView resolvedView{backingNss, emptyPipeline};
+ AggregationRequest aggRequest(viewNss, {});
+ aggRequest.setCollation(BSON("locale"
+ << "fr_CA"));
+
+ auto result = resolvedView.asExpandedViewAggregation(aggRequest);
+ ASSERT_BSONOBJ_EQ(result.getCollation(),
+ BSON("locale"
+ << "fr_CA"));
}
TEST(ResolvedViewTest, FromBSONFailsIfMissingResolvedView) {
diff --git a/src/mongo/db/views/view_sharding_check.cpp b/src/mongo/db/views/view_sharding_check.cpp
index e2144d56dac..c70cc5f5c55 100644
--- a/src/mongo/db/views/view_sharding_check.cpp
+++ b/src/mongo/db/views/view_sharding_check.cpp
@@ -76,14 +76,14 @@ StatusWith<BSONObj> ViewShardingCheck::getResolvedViewIfSharded(OperationContext
return viewDetailBob.obj();
}
-void ViewShardingCheck::appendShardedViewStatus(const BSONObj& resolvedView, BSONObjBuilder* out) {
+Status ViewShardingCheck::appendShardedViewResponse(const BSONObj& resolvedView,
+ BSONObjBuilder* out) {
invariant(out);
invariant(!resolvedView.isEmpty());
out->append("resolvedView", resolvedView);
- Status status{ErrorCodes::CommandOnShardedViewNotSupportedOnMongod,
- str::stream() << "Command on view must be executed by mongos"};
- Command::appendCommandStatus(*out, status);
+ return {ErrorCodes::CommandOnShardedViewNotSupportedOnMongod,
+ str::stream() << "Command on view must be executed by mongos"};
}
bool ViewShardingCheck::collectionIsSharded(OperationContext* opCtx, const NamespaceString& nss) {
diff --git a/src/mongo/db/views/view_sharding_check.h b/src/mongo/db/views/view_sharding_check.h
index d3437353212..c59455dd5aa 100644
--- a/src/mongo/db/views/view_sharding_check.h
+++ b/src/mongo/db/views/view_sharding_check.h
@@ -65,10 +65,11 @@ public:
const ViewDefinition* view);
/**
- * Appends the resolved view definition and CommandOnShardedViewNotSupportedOnMongod status to
- * 'result'.
+ * Appends the resolved view definition to 'result' and returns a
+ * CommandOnShardedViewNotSupportedOnMongod error status. The caller is responsible for
+ * appending the non-ok status and error code.
*/
- static void appendShardedViewStatus(const BSONObj& resolvedView, BSONObjBuilder* result);
+ static Status appendShardedViewResponse(const BSONObj& resolvedView, BSONObjBuilder* result);
private:
/**