summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMihai Andrei <mihai.andrei@10gen.com>2021-09-21 21:41:36 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-09-21 22:11:15 +0000
commit4a43e817011e7cde84c2dce87619d81921f19c9a (patch)
treed8517e3c32a4a5a16e4d30cee14bf1660845f297
parent31f1068f5273f1dd55378eda3fb4bdc441e0a15e (diff)
downloadmongo-4a43e817011e7cde84c2dce87619d81921f19c9a.tar.gz
SERVER-60052 Truncate SBE explain plan stats if they exceed max BSON depth
-rw-r--r--jstests/noPassthrough/truncate_large_profiler_entry.js36
-rw-r--r--src/mongo/db/query/plan_explainer_sbe.cpp28
2 files changed, 57 insertions, 7 deletions
diff --git a/jstests/noPassthrough/truncate_large_profiler_entry.js b/jstests/noPassthrough/truncate_large_profiler_entry.js
new file mode 100644
index 00000000000..1cf05c27d45
--- /dev/null
+++ b/jstests/noPassthrough/truncate_large_profiler_entry.js
@@ -0,0 +1,36 @@
+/**
+ * Test which verifies that large profiler entries generated for SBE plans do not exceed the max
+ * BSON depth. Instead, they get truncated right below the max depth.
+ */
+(function() {
+"use strict";
+
+const conn =
+ MongoRunner.runMongod({setParameter: "internalQueryEnableSlotBasedExecutionEngine=true"});
+assert.neq(null, conn, "mongod was unable to startup");
+const db = conn.getDB("test");
+const collName = jsTestName();
+const coll = db[collName];
+coll.drop();
+
+// Insert some documents so our query will perform some work.
+assert.commandWorked(coll.insert([{a: 1}, {a: 2}]));
+const longField = 'a.'.repeat(99) + 'a';
+const projectionSpec = {
+ [longField]: 1
+};
+
+// Setup the profiler to only pick up the query below.
+assert.commandWorked(db.setProfilingLevel(2, {slowms: 0, sampleRate: 1}, {
+ filter: {'op': 'query', 'command.projection': projectionSpec}
+}));
+
+// Verify that our query was picked up by the profiler.
+coll.find({}, projectionSpec).toArray();
+const profilerEntry = db.system.profile.find().toArray();
+assert.eq(1, profilerEntry.length, profilerEntry);
+
+// Collection validation should detect no issues.
+assert.commandWorked(db.system.profile.validate({full: true}));
+MongoRunner.stopMongod(conn);
+}());
diff --git a/src/mongo/db/query/plan_explainer_sbe.cpp b/src/mongo/db/query/plan_explainer_sbe.cpp
index 67690127987..442411e0123 100644
--- a/src/mongo/db/query/plan_explainer_sbe.cpp
+++ b/src/mongo/db/query/plan_explainer_sbe.cpp
@@ -191,10 +191,10 @@ void statsToBSON(const QuerySolutionNode* node,
}
childrenBob.doneFast();
}
-
-void statsToBSON(const sbe::PlanStageStats* stats,
- BSONObjBuilder* bob,
- const BSONObjBuilder* topLevelBob) {
+void statsToBSONHelper(const sbe::PlanStageStats* stats,
+ BSONObjBuilder* bob,
+ const BSONObjBuilder* topLevelBob,
+ std::uint32_t currentDepth) {
invariant(stats);
invariant(bob);
invariant(topLevelBob);
@@ -205,6 +205,14 @@ void statsToBSON(const sbe::PlanStageStats* stats,
return;
}
+ // Stop as soon as the BSON object we're building becomes too deep. Note that we go 2 less
+ // than the max depth to account for when this stage has multiple children.
+ if (currentDepth >= BSONDepth::getMaxDepthForUserStorage() - 2) {
+ bob->append("warning",
+ "stats tree exceeded BSON depth limit; omitting the rest of the tree");
+ return;
+ }
+
auto stageType = stats->common.stageType;
bob->append("stage", stageType);
bob->appendNumber("planNodeId", static_cast<long long>(stats->common.nodeId));
@@ -234,7 +242,7 @@ void statsToBSON(const sbe::PlanStageStats* stats,
// rather than 'inputStages'.
if (stats->children.size() == 1) {
BSONObjBuilder childBob(bob->subobjStart("inputStage"));
- statsToBSON(stats->children[0].get(), &childBob, topLevelBob);
+ statsToBSONHelper(stats->children[0].get(), &childBob, topLevelBob, currentDepth + 1);
return;
}
@@ -253,7 +261,7 @@ void statsToBSON(const sbe::PlanStageStats* stats,
for (size_t idx = 0; idx < stats->children.size(); ++idx) {
BSONObjBuilder childBob(bob->subobjStart(overridenNames[idx]));
- statsToBSON(stats->children[idx].get(), &childBob, topLevelBob);
+ statsToBSONHelper(stats->children[idx].get(), &childBob, topLevelBob, currentDepth + 1);
}
return;
}
@@ -263,11 +271,17 @@ void statsToBSON(const sbe::PlanStageStats* stats,
BSONArrayBuilder childrenBob(bob->subarrayStart("inputStages"_sd));
for (auto&& child : stats->children) {
BSONObjBuilder childBob(childrenBob.subobjStart());
- statsToBSON(child.get(), &childBob, topLevelBob);
+ statsToBSONHelper(child.get(), &childBob, topLevelBob, currentDepth + 2);
}
childrenBob.doneFast();
}
+void statsToBSON(const sbe::PlanStageStats* stats,
+ BSONObjBuilder* bob,
+ const BSONObjBuilder* topLevelBob) {
+ statsToBSONHelper(stats, bob, topLevelBob, 0);
+}
+
PlanSummaryStats collectExecutionStatsSummary(const sbe::PlanStageStats* stats) {
invariant(stats);