summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Wahlin <james.wahlin@mongodb.com>2019-10-31 16:24:54 +0000
committerevergreen <evergreen@mongodb.com>2019-10-31 16:24:54 +0000
commit7a3d17ea6b73bc916d94e59a44d5c1a56cbcb2e5 (patch)
tree4fea36496d4351b9960951c8cce0f991208abd51
parentbc26f68ff09269816cb0118ad8f2a25ee17ad0e9 (diff)
downloadmongo-7a3d17ea6b73bc916d94e59a44d5c1a56cbcb2e5.tar.gz
Revert "SERVER-44013 MR Agg: Report plan stats and summary for currentOp/profiler/slow query logging"
This reverts commit 8e9a8b5552ae078e1890ec319909b7268adcfaac.
-rw-r--r--buildscripts/resmokeconfig/suites/core_map_reduce_agg.yaml1
-rw-r--r--jstests/core/profile_agg.js57
-rw-r--r--jstests/core/profile_mapreduce.js28
-rw-r--r--src/mongo/db/commands/SConscript1
-rw-r--r--src/mongo/db/commands/map_reduce_agg.cpp26
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp7
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp10
7 files changed, 47 insertions, 83 deletions
diff --git a/buildscripts/resmokeconfig/suites/core_map_reduce_agg.yaml b/buildscripts/resmokeconfig/suites/core_map_reduce_agg.yaml
index d7aee4d28ea..8d6f68bff9a 100644
--- a/buildscripts/resmokeconfig/suites/core_map_reduce_agg.yaml
+++ b/buildscripts/resmokeconfig/suites/core_map_reduce_agg.yaml
@@ -17,7 +17,6 @@ selector:
- jstests/core/mr_undef.js
- jstests/core/mr_use_this_object.js
- jstests/core/mr_scope.js
- - jstests/core/profile_mapreduce.js
executor:
archive:
hooks:
diff --git a/jstests/core/profile_agg.js b/jstests/core/profile_agg.js
index d8e96d46646..0b67296d9c0 100644
--- a/jstests/core/profile_agg.js
+++ b/jstests/core/profile_agg.js
@@ -8,25 +8,26 @@
// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
load("jstests/libs/profiler.js");
-const testDB = db.getSiblingDB("profile_agg");
+var testDB = db.getSiblingDB("profile_agg");
assert.commandWorked(testDB.dropDatabase());
-const coll = testDB.getCollection("test");
+var coll = testDB.getCollection("test");
testDB.setProfilingLevel(2);
//
// Confirm metrics for agg w/ $match.
//
-for (let i = 0; i < 10; ++i) {
+var i;
+for (i = 0; i < 10; ++i) {
assert.commandWorked(coll.insert({a: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
-assert.eq(8,
- coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {b: 1}}, {$addFields: {c: 1}}],
- {collation: {locale: "fr"}, comment: "agg_comment"})
- .itcount());
-let profileObj = getLatestProfilerEntry(testDB);
+assert.eq(
+ 8,
+ coll.aggregate([{$match: {a: {$gte: 2}}}], {collation: {locale: "fr"}, comment: "agg_comment"})
+ .itcount());
+var profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
assert.eq(profileObj.op, "command", tojson(profileObj));
@@ -44,60 +45,38 @@ assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
-assert(profileObj.hasOwnProperty("hasSortStage"), tojson(profileObj));
+assert(!profileObj.hasOwnProperty("hasSortStage"), tojson(profileObj));
// Testing that 'usedDisk' is set when disk is used requires either using a lot of data or
// configuring a server parameter which could mess up other tests. This testing is
// done elsewhere so that this test can stay in the core suite
assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-// Confirm that 'hasSortStage' is not present when the sort is non-blocking.
-coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}, {$addFields: {c: 1}}],
- {collation: {locale: "fr"}, comment: "agg_comment"});
-profileObj = getLatestProfilerEntry(testDB);
-assert(!profileObj.hasOwnProperty("hasSortStage"), tojson(profileObj));
-
//
// Confirm "fromMultiPlanner" metric.
//
-assert(coll.drop());
+coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
-for (let i = 0; i < 5; ++i) {
+for (i = 0; i < 5; ++i) {
assert.commandWorked(coll.insert({a: i, b: i}));
}
-assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}, {$addFields: {c: 1}}]).itcount());
+assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}]).itcount());
profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
//
-// Confirm that the correct namespace is written to the profiler when running an aggregation with a
-// $out stage.
-//
-assert(coll.drop());
-db.profile_agg_out.drop();
-for (let i = 0; i < 5; ++i) {
- assert.commandWorked(coll.insert({a: i}));
-}
-
-assert.eq(0, coll.aggregate([{$match: {a: {$gt: 0}}}, {$out: "profile_agg_out"}]).itcount());
-profileObj = getLatestProfilerEntry(testDB);
-
-assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
-
-//
// Confirm that the "hint" modifier is in the profiler document.
//
-assert(coll.drop());
+coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
-for (let i = 0; i < 5; ++i) {
+for (i = 0; i < 5; ++i) {
assert.commandWorked(coll.insert({a: i, b: i}));
}
-assert.eq(
- 1, coll.aggregate([{$match: {a: 3, b: 3}}, {$addFields: {c: 1}}], {hint: {_id: 1}}).itcount());
+assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}], {hint: {_id: 1}}).itcount());
profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.command.hint, {_id: 1}, tojson(profileObj));
@@ -111,9 +90,7 @@ for (let i = 0; i < 501; i++) {
matchPredicate[i] = "a".repeat(150);
}
-assert.eq(coll.aggregate([{$match: matchPredicate}, {$addFields: {c: 1}}], {comment: "profile_agg"})
- .itcount(),
- 0);
+assert.eq(coll.aggregate([{$match: matchPredicate}], {comment: "profile_agg"}).itcount(), 0);
profileObj = getLatestProfilerEntry(testDB);
assert.eq((typeof profileObj.command.$truncated), "string", tojson(profileObj));
assert.eq(profileObj.command.comment, "profile_agg", tojson(profileObj));
diff --git a/jstests/core/profile_mapreduce.js b/jstests/core/profile_mapreduce.js
index 608971f8684..00a52871bd9 100644
--- a/jstests/core/profile_mapreduce.js
+++ b/jstests/core/profile_mapreduce.js
@@ -14,25 +14,26 @@
// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
load("jstests/libs/profiler.js");
-const testDB = db.getSiblingDB("profile_mapreduce");
+var testDB = db.getSiblingDB("profile_mapreduce");
assert.commandWorked(testDB.dropDatabase());
-const conn = testDB.getMongo();
-const coll = testDB.getCollection("test");
+var conn = testDB.getMongo();
+var coll = testDB.getCollection("test");
testDB.setProfilingLevel(2);
-const mapFunction = function() {
+var mapFunction = function() {
emit(this.a, this.b);
};
-const reduceFunction = function(a, b) {
+var reduceFunction = function(a, b) {
return Array.sum(b);
};
//
// Confirm metrics for mapReduce with query.
//
-for (let i = 0; i < 3; i++) {
+coll.drop();
+for (var i = 0; i < 3; i++) {
assert.commandWorked(coll.insert({a: i, b: i}));
}
assert.commandWorked(coll.createIndex({a: 1}));
@@ -41,13 +42,14 @@ coll.mapReduce(mapFunction,
reduceFunction,
{query: {a: {$gte: 0}}, out: {inline: 1}, collation: {locale: "fr"}});
-let profileObj = getLatestProfilerEntry(testDB);
+var profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
assert.eq(profileObj.op, "command", tojson(profileObj));
assert.eq(profileObj.keysExamined, 3, tojson(profileObj));
assert.eq(profileObj.docsExamined, 3, tojson(profileObj));
assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj));
assert.eq(coll.getName(), profileObj.command.mapreduce, tojson(profileObj));
assert.eq({locale: "fr"}, profileObj.command.collation, tojson(profileObj));
@@ -60,8 +62,8 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
// Confirm metrics for mapReduce with sort stage.
//
-assert(coll.drop());
-for (let i = 0; i < 5; i++) {
+coll.drop();
+for (var i = 0; i < 5; i++) {
assert.commandWorked(coll.insert({a: i, b: i}));
}
@@ -74,8 +76,8 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
// Confirm namespace field is correct when output is a collection.
//
-assert(coll.drop());
-for (let i = 0; i < 3; i++) {
+coll.drop();
+for (var i = 0; i < 3; i++) {
assert.commandWorked(coll.insert({a: i, b: i}));
}
@@ -88,10 +90,10 @@ assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
//
// Confirm "fromMultiPlanner" metric.
//
-assert(coll.drop());
+coll.drop();
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
-for (let i = 0; i < 5; ++i) {
+for (i = 0; i < 5; ++i) {
assert.commandWorked(coll.insert({a: i, b: i}));
}
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index 96e4d4c32a0..c382ad4a62a 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -523,7 +523,6 @@ env.Library(
'$BUILD_DIR/mongo/db/commands/servers',
'$BUILD_DIR/mongo/db/db_raii',
'$BUILD_DIR/mongo/db/pipeline/mongo_process_interface',
- '$BUILD_DIR/mongo/db/query_exec',
'$BUILD_DIR/mongo/db/query/map_reduce_output_format',
'$BUILD_DIR/mongo/idl/idl_parser',
'map_reduce_parser'
diff --git a/src/mongo/db/commands/map_reduce_agg.cpp b/src/mongo/db/commands/map_reduce_agg.cpp
index 8ca42eec7b0..68ad9cb031d 100644
--- a/src/mongo/db/commands/map_reduce_agg.cpp
+++ b/src/mongo/db/commands/map_reduce_agg.cpp
@@ -43,11 +43,9 @@
#include "mongo/db/commands/map_reduce_javascript_code.h"
#include "mongo/db/commands/map_reduce_stats.h"
#include "mongo/db/commands/mr_common.h"
-#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/document_value/value.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/pipeline/document_source_cursor.h"
#include "mongo/db/pipeline/expression.h"
#include "mongo/db/pipeline/pipeline_d.h"
#include "mongo/db/query/map_reduce_output_format.h"
@@ -80,7 +78,7 @@ auto makeExpressionContext(OperationContext* opCtx, const MapReduce& parsedMr) {
// Manually build an ExpressionContext with the desired options for the translated
// aggregation. The one option worth noting here is allowDiskUse, which is required to allow
// the $group stage of the translated pipeline to spill to disk.
- auto expCtx = make_intrusive<ExpressionContext>(
+ return make_intrusive<ExpressionContext>(
opCtx,
boost::none, // explain
false, // fromMongos
@@ -93,8 +91,6 @@ auto makeExpressionContext(OperationContext* opCtx, const MapReduce& parsedMr) {
MongoProcessInterface::create(opCtx),
StringMap<ExpressionContext::ResolvedNamespace>{}, // resolvedNamespaces
uuid);
- expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
- return expCtx;
}
std::vector<CommonStats> extractStats(const Pipeline& pipeline) {
@@ -131,19 +127,8 @@ bool runAggregationMapReduce(OperationContext* opCtx,
expCtx, pipeline.release());
}();
- {
- auto planSummaryStr = PipelineD::getPlanSummaryStr(runnablePipeline.get());
-
- stdx::lock_guard<Client> lk(*opCtx->getClient());
- CurOp::get(opCtx)->setPlanSummary_inlock(std::move(planSummaryStr));
- }
-
auto resultArray = exhaustPipelineIntoBSONArray(runnablePipeline);
- PlanSummaryStats planSummaryStats;
- PipelineD::getPlanSummaryStats(runnablePipeline.get(), &planSummaryStats);
- CurOp::get(opCtx)->debug().setPlanSummaryMetrics(planSummaryStats);
-
MapReduceStats mapReduceStats(extractStats(*runnablePipeline),
MapReduceStats::ResponseType::kUnsharded,
boost::get_optional_value_or(parsedMr.getVerbose(), false),
@@ -162,15 +147,6 @@ bool runAggregationMapReduce(OperationContext* opCtx,
&result);
}
- // The aggregation pipeline may change the namespace of the curop and we need to set it back to
- // the original namespace to correctly report command stats. One example when the namespace can
- // be changed is when the pipeline contains an $out stage, which executes an internal command to
- // create a temp collection, changing the curop namespace to the name of this temp collection.
- {
- stdx::lock_guard<Client> lk(*opCtx->getClient());
- CurOp::get(opCtx)->setNS_inlock(parsedMr.getNamespace().ns());
- }
-
return true;
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index c2f9c9d1a8d..6ad22cce49d 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -161,7 +161,14 @@ void DocumentSourceCursor::_updateOplogTimestamp() {
void DocumentSourceCursor::recordPlanSummaryStats() {
invariant(_exec);
+ // Aggregation handles in-memory sort outside of the query sub-system. Given that we need to
+ // preserve the existing value of hasSortStage rather than overwrite with the underlying
+ // PlanExecutor's value.
+ auto hasSortStage = _planSummaryStats.hasSortStage;
+
Explain::getSummaryStats(*_exec, &_planSummaryStats);
+
+ _planSummaryStats.hasSortStage = hasSortStage;
}
Value DocumentSourceCursor::serialize(boost::optional<ExplainOptions::Verbosity> verbosity) const {
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 791968683db..db3482342d9 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -806,14 +806,18 @@ void PipelineD::getPlanSummaryStats(const Pipeline* pipeline, PlanSummaryStats*
*statsOut = docSourceCursor->getPlanSummaryStats();
}
+ bool hasSortStage{false};
+ bool usedDisk{false};
for (auto&& source : pipeline->_sources) {
if (dynamic_cast<DocumentSourceSort*>(source.get()))
- statsOut->hasSortStage = true;
+ hasSortStage = true;
- statsOut->usedDisk = statsOut->usedDisk || source->usedDisk();
- if (statsOut->usedDisk && statsOut->hasSortStage)
+ usedDisk = usedDisk || source->usedDisk();
+ if (usedDisk && hasSortStage)
break;
}
+ statsOut->hasSortStage = hasSortStage;
+ statsOut->usedDisk = usedDisk;
}
} // namespace mongo