summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Percy <david.percy@mongodb.com>2020-01-06 19:41:50 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-31 23:04:51 +0000
commit084ae4ef6a3d050ee8599d04d9bac3ed5b7369d9 (patch)
tree1efac672a76356744ec9cef4db9e6656249b8e65
parentc281752a3a47f6e8654bd8bd0213b87e36c3ca76 (diff)
downloadmongo-084ae4ef6a3d050ee8599d04d9bac3ed5b7369d9.tar.gz
SERVER-40805 Indicate reason for replanning
-rw-r--r--etc/backports_required_for_multiversion_tests.yml2
-rw-r--r--jstests/core/profile_find.js9
-rw-r--r--jstests/noPassthrough/plan_cache_replan_sort.js51
-rw-r--r--src/mongo/db/curop.cpp14
-rw-r--r--src/mongo/db/curop.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp17
-rw-r--r--src/mongo/db/exec/cached_plan.h2
-rw-r--r--src/mongo/db/exec/plan_stats.h4
-rw-r--r--src/mongo/db/query/explain.cpp2
-rw-r--r--src/mongo/db/query/plan_summary_stats.h2
-rw-r--r--src/mongo/db/sorter/sorter.cpp6
11 files changed, 92 insertions, 19 deletions
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index d978bf7991c..a2a6482b612 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -29,6 +29,8 @@ concurrency_sharded_replication_multiversion:
replica_sets_jscore_multiversion_passthrough:
- ticket: SERVER-44260
test_file: jstests/core/txns/new_transaction_waits_for_previous_txn_table_updates.js
+- ticket: SERVER-40805
+ test_file: jstests/core/profile_find.js
replica_sets_multiversion:
- ticket: SERVER-42825
diff --git a/jstests/core/profile_find.js b/jstests/core/profile_find.js
index 23bcf96e2ad..31d17448ee5 100644
--- a/jstests/core/profile_find.js
+++ b/jstests/core/profile_find.js
@@ -126,8 +126,13 @@ assert.neq(coll.findOne({a: 5, b: 15}), null);
assert.neq(coll.findOne({a: 15, b: 10}), null);
profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
-assert.eq(profileObj.replanned, true, tojson(profileObj));
-assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+assert.eq(profileObj.replanned, true, profileObj);
+assert(profileObj.hasOwnProperty('replanReason'), profileObj);
+assert(
+ profileObj.replanReason.match(
+ /cached plan was less efficient than expected: expected trial execution to take [0-9]+ works but it took at least [0-9]+ works/),
+ profileObj);
+assert.eq(profileObj.appName, "MongoDB Shell", profileObj);
//
// Confirm that query modifiers such as "hint" are in the profiler document.
diff --git a/jstests/noPassthrough/plan_cache_replan_sort.js b/jstests/noPassthrough/plan_cache_replan_sort.js
new file mode 100644
index 00000000000..cece92db2ce
--- /dev/null
+++ b/jstests/noPassthrough/plan_cache_replan_sort.js
@@ -0,0 +1,51 @@
+/**
+ * Test that when replanning happens due to blocking sort's memory limit, we include
+ * replanReason: "cached plan returned: ..." in the profiling data.
+ * @tags: [requires_profiling]
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/profiler.js");
+
+const conn = MongoRunner.runMongod({
+ // Disallow blocking sort, by setting the memory limit very low (1 byte).
+ setParameter: "internalQueryExecMaxBlockingSortBytes=1"
+});
+const db = conn.getDB("test");
+const coll = db.plan_cache_replan_sort;
+coll.drop();
+
+// Ensure a plan with a sort stage gets cached.
+assert.commandWorked(coll.createIndex({x: 1}));
+assert.commandWorked(coll.createIndex({y: 1}));
+const docs = Array.from({length: 20}, (_, i) => ({x: 7, y: i}));
+assert.commandWorked(coll.insert(docs));
+// { x: 5 } is very selective because it matches 0 documents, so the x index will win.
+// This will succeed despite the low memory limit on sort, because the sort stage will see zero
+// documents.
+assert.eq(0, coll.find({x: 5}).sort({y: 1}).itcount());
+// We need to run the query twice for it to be marked "active" in the plan cache.
+assert.eq(0, coll.find({x: 5}).sort({y: 1}).itcount());
+
+const cachedPlans = coll.aggregate({$planCacheStats: {}}).toArray();
+assert.eq(1, cachedPlans.length, cachedPlans);
+assert.eq(true, cachedPlans[0].isActive, cachedPlans);
+assert.eq("SORT", cachedPlans[0].cachedPlan.stage, cachedPlans);
+
+// Assert we "replan", by running the same query with different parameters.
+// This time the filter is not selective at all.
+db.setProfilingLevel(2);
+assert.eq(20, coll.find({x: 7}).sort({y: 1}).itcount());
+
+const profileObj = getLatestProfilerEntry(db, {op: "query"});
+assert.eq(profileObj.ns, coll.getFullName(), profileObj);
+assert.eq(profileObj.replanned, true, profileObj);
+assert.eq(
+ profileObj.replanReason,
+ "cached plan returned: OperationFailed: Sort operation used more than the maximum 1 bytes of " +
+ "RAM. Add an index, or specify a smaller limit.",
+ profileObj);
+
+MongoRunner.stopMongod(conn);
+}());
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index f334f95b90c..b0fc94350bf 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -699,7 +699,11 @@ string OpDebug::report(Client* client,
OPDEBUG_TOSTRING_HELP_BOOL(hasSortStage);
OPDEBUG_TOSTRING_HELP_BOOL(usedDisk);
OPDEBUG_TOSTRING_HELP_BOOL(fromMultiPlanner);
- OPDEBUG_TOSTRING_HELP_BOOL(replanned);
+ if (replanReason) {
+ bool replanned = true;
+ OPDEBUG_TOSTRING_HELP_BOOL(replanned);
+ s << " replanReason:\"" << str::escape(redact(*replanReason)) << "\"";
+ }
OPDEBUG_TOSTRING_HELP_OPTIONAL("nMatched", additiveMetrics.nMatched);
OPDEBUG_TOSTRING_HELP_OPTIONAL("nModified", additiveMetrics.nModified);
OPDEBUG_TOSTRING_HELP_OPTIONAL("ninserted", additiveMetrics.ninserted);
@@ -801,7 +805,11 @@ void OpDebug::append(const CurOp& curop,
OPDEBUG_APPEND_BOOL(hasSortStage);
OPDEBUG_APPEND_BOOL(usedDisk);
OPDEBUG_APPEND_BOOL(fromMultiPlanner);
- OPDEBUG_APPEND_BOOL(replanned);
+ if (replanReason) {
+ bool replanned = true;
+ OPDEBUG_APPEND_BOOL(replanned);
+ b.append("replanReason", *replanReason);
+ }
OPDEBUG_APPEND_OPTIONAL("nMatched", additiveMetrics.nMatched);
OPDEBUG_APPEND_OPTIONAL("nModified", additiveMetrics.nModified);
OPDEBUG_APPEND_OPTIONAL("ninserted", additiveMetrics.ninserted);
@@ -868,7 +876,7 @@ void OpDebug::setPlanSummaryMetrics(const PlanSummaryStats& planSummaryStats) {
hasSortStage = planSummaryStats.hasSortStage;
usedDisk = planSummaryStats.usedDisk;
fromMultiPlanner = planSummaryStats.fromMultiPlanner;
- replanned = planSummaryStats.replanned;
+ replanReason = planSummaryStats.replanReason;
}
BSONObj OpDebug::makeFlowControlObject(FlowControlTicketholder::CurOp stats) const {
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index d74f0096c1b..e14d98f35c5 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -208,7 +208,7 @@ public:
bool fromMultiPlanner{false};
// True if a replan was triggered during the execution of this operation.
- bool replanned{false};
+ std::optional<std::string> replanReason;
bool upsert{false}; // true if the update actually did an insert
bool cursorExhausted{
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index f47a253669b..d483a64e43e 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -146,7 +146,10 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
<< " status: " << redact(statusObj);
const bool shouldCache = false;
- return replan(yieldPolicy, shouldCache);
+ return replan(yieldPolicy,
+ shouldCache,
+ str::stream() << "cached plan returned: "
+ << WorkingSetCommon::toStatusString(statusObj));
} else {
invariant(PlanStage::NEED_TIME == state);
}
@@ -161,7 +164,13 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
<< " plan summary before replan: " << Explain::getPlanSummary(child().get());
const bool shouldCache = true;
- return replan(yieldPolicy, shouldCache);
+ return replan(
+ yieldPolicy,
+ shouldCache,
+ str::stream()
+ << "cached plan was less efficient than expected: expected trial execution to take "
+ << _decisionWorks << " works but it took at least " << maxWorksBeforeReplan
+ << " works");
}
Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
@@ -178,7 +187,7 @@ Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
return Status::OK();
}
-Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
+Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, std::string reason) {
// We're going to start over with a new plan. Clear out info from our old plan.
{
std::queue<WorkingSetID> emptyQueue;
@@ -187,7 +196,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
_ws->clear();
_children.clear();
- _specificStats.replanned = true;
+ _specificStats.replanReason = std::move(reason);
if (shouldCache) {
// Deactivate the current cache entry.
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 45ca2ed166c..cb1788521e7 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -106,7 +106,7 @@ private:
*
* We only modify the plan cache if 'shouldCache' is true.
*/
- Status replan(PlanYieldPolicy* yieldPolicy, bool shouldCache);
+ Status replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, std::string reason);
/**
* May yield during the cached plan stage's trial period or replanning phases.
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index 9ff69efff9d..43053e27bee 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -201,7 +201,7 @@ struct AndSortedStats : public SpecificStats {
};
struct CachedPlanStats : public SpecificStats {
- CachedPlanStats() : replanned(false) {}
+ CachedPlanStats() = default;
SpecificStats* clone() const final {
return new CachedPlanStats(*this);
@@ -211,7 +211,7 @@ struct CachedPlanStats : public SpecificStats {
return sizeof(*this);
}
- bool replanned;
+ std::optional<std::string> replanReason;
};
struct CollectionScanStats : public SpecificStats {
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 5cd84b98fa8..6af8da19082 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -1001,7 +1001,7 @@ void Explain::getSummaryStats(const PlanExecutor& exec, PlanSummaryStats* statsO
const CachedPlanStage* cachedPlan = static_cast<const CachedPlanStage*>(stages[i]);
const CachedPlanStats* cachedStats =
static_cast<const CachedPlanStats*>(cachedPlan->getSpecificStats());
- statsOut->replanned = cachedStats->replanned;
+ statsOut->replanReason = cachedStats->replanReason;
} else if (STAGE_MULTI_PLAN == stages[i]->stageType()) {
statsOut->fromMultiPlanner = true;
}
diff --git a/src/mongo/db/query/plan_summary_stats.h b/src/mongo/db/query/plan_summary_stats.h
index 35deb4d83c0..340a101e094 100644
--- a/src/mongo/db/query/plan_summary_stats.h
+++ b/src/mongo/db/query/plan_summary_stats.h
@@ -64,7 +64,7 @@ struct PlanSummaryStats {
bool fromMultiPlanner = false;
// Was a replan triggered during the execution of this query?
- bool replanned = false;
+ std::optional<std::string> replanReason;
};
} // namespace mongo
diff --git a/src/mongo/db/sorter/sorter.cpp b/src/mongo/db/sorter/sorter.cpp
index 37b8d98aa02..2f2dc44f1c8 100644
--- a/src/mongo/db/sorter/sorter.cpp
+++ b/src/mongo/db/sorter/sorter.cpp
@@ -552,10 +552,8 @@ private:
// elsewhere where extSortAllowed could possibly be false, this message will
// need to be revisited.
uasserted(16819,
- str::stream()
- << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
- << " bytes, but did not opt in to external sorting. Aborting operation."
- << " Pass allowDiskUse:true to opt in.");
+ str::stream() << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
+ << " bytes, but did not opt in to external sorting.");
}
sort();