summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJennifer Peshansky <jennifer.peshansky@mongodb.com>2021-12-15 15:23:07 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-01-21 00:41:16 +0000
commit0efeb8cb201e48dca15459c863decacd0ac847e7 (patch)
treeb8310f94113ab5feddf8cd918a81d6818b5719c7
parentd3daee1ab48ffe0dbd0d32cafcc339cf6bbe4f30 (diff)
downloadmongo-0efeb8cb201e48dca15459c863decacd0ac847e7.tar.gz
SERVER-59754 Prevent get_executor from overwriting information set by an outer pipeline
(cherry picked from commit 4970d9626853e3b916aab3d911bcf4aba23e8c88) (cherry picked from commit da0cacc0c3f3aa6e8b3405443aa2011c5e6d8220) (cherry picked from commit b8698458dd5420162e0dec1f5615669ad1c137b1)
-rw-r--r--jstests/noPassthrough/logs_query_hash.js63
-rw-r--r--src/mongo/db/query/get_executor.cpp14
2 files changed, 72 insertions, 5 deletions
diff --git a/jstests/noPassthrough/logs_query_hash.js b/jstests/noPassthrough/logs_query_hash.js
new file mode 100644
index 00000000000..518c1b137c5
--- /dev/null
+++ b/jstests/noPassthrough/logs_query_hash.js
@@ -0,0 +1,63 @@
+/**
+ * Tests that the queryHash and planCacheKey are logged correctly.
+ */
+(function() {
+"use strict";
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+const db = conn.getDB("jstests_logs_query_hash");
+
+// Set logLevel to 1 so that all queries will be logged.
+assert.commandWorked(db.setLogLevel(1));
+
+// Set up collections foo and bar.
+db.foo.drop();
+assert.commandWorked(db.foo.insert({a: 1, b: 1, c: 1}));
+assert.commandWorked(db.foo.createIndexes([{a: 1, b: 1}, {b: 1, a: 1}]));
+
+db.bar.drop();
+assert.commandWorked(db.bar.insert({a: 1, b: 1, c: 1}));
+assert.commandWorked(db.bar.createIndexes([{a: 1, b: 1, c: 1}, {b: 1, a: 1, c: 1}]));
+
+// Ensure the slow query log contains the same queryHash and planCacheKey as the explain output.
+function runAndVerifySlowQueryLog(pipeline, commentObj, hint) {
+ assert.eq(db.foo.aggregate(pipeline, commentObj, hint).itcount(), 1);
+ let queryPlanner =
+ db.foo.explain().aggregate(pipeline, commentObj, hint).stages[0].$cursor.queryPlanner;
+ let regex = new RegExp(`comment: "${commentObj.comment}"[^\n]*queryHash:${
+ queryPlanner.queryHash} planCacheKey:${queryPlanner.planCacheKey}`);
+ assert(checkLog.checkContainsOnce(db, regex),
+ "slow query logs did not contain [" + regex + "] when they should have");
+}
+
+const lookupStage = {
+ "$lookup": {
+ "from": "bar",
+ "let": {"b": {"$ifNull": ["$b", null]}},
+ "pipeline": [
+ {"$match": {"$or": [{"a": {"$exists": false}}, {"a": 1}]}},
+ {"$match": {"$expr": {"$eq": ["$b", "$$b"]}}}
+ ],
+ "as": "bar"
+ }
+};
+
+runAndVerifySlowQueryLog([{"$match": {$or: [{a: 1}, {b: 1}]}}, lookupStage],
+ {"comment": "pipeline1"});
+runAndVerifySlowQueryLog([{"$match": {a: {$in: [1, 2, 3, 4, 5]}}}, lookupStage],
+ {"comment": "pipeline2"});
+runAndVerifySlowQueryLog([{"$match": {b: 1}}, lookupStage], {"comment": "pipeline3"});
+
+const hint = {
+ "hint": {"a": 1, "b": 1}
+};
+
+runAndVerifySlowQueryLog(
+ [{"$match": {$or: [{a: 1}, {b: 1}]}}, lookupStage], {"comment": "pipelineWithHint1"}, hint);
+runAndVerifySlowQueryLog(
+ [{"$match": {a: {$in: [1, 2, 3, 4, 5]}}}, lookupStage], {"comment": "pipelineWithHint2"}, hint);
+runAndVerifySlowQueryLog([{"$match": {b: 1}}, lookupStage], {"comment": "pipelineWithHint3"}, hint);
+
+MongoRunner.stopMongod(conn);
+})();
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index eb0f93d823c..f888a10136e 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -460,13 +460,17 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
// Check that the query should be cached.
if (collection->infoCache()->getPlanCache()->shouldCacheQuery(*canonicalQuery)) {
- // Fill in opDebug information.
+ // Fill in opDebug information, unless it has already been filled by an outer pipeline.
const auto planCacheKey =
collection->infoCache()->getPlanCache()->computeKey(*canonicalQuery);
- CurOp::get(opCtx)->debug().queryHash =
- canonical_query_encoder::computeHash(planCacheKey.getStableKeyStringData());
- CurOp::get(opCtx)->debug().planCacheKey =
- canonical_query_encoder::computeHash(planCacheKey.toString());
+ OpDebug& opDebug = CurOp::get(opCtx)->debug();
+ if (!opDebug.queryHash) {
+ opDebug.queryHash =
+ canonical_query_encoder::computeHash(planCacheKey.getStableKeyStringData());
+ }
+ if (!opDebug.planCacheKey) {
+ opDebug.planCacheKey = canonical_query_encoder::computeHash(planCacheKey.toString());
+ }
// Try to look up a cached solution for the query.
if (auto cs =