summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Boros <matt.boros@mongodb.com>2023-04-11 03:30:05 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-04-11 04:31:55 +0000
commit4a2fe90f565894a439ccb70713892311a3a0ee1d (patch)
tree9ff035e31af7188ade678af3fb67edc244aff235
parent5facb9a7c4e98a7636e061544ad9d359302d9847 (diff)
downloadmongo-4a2fe90f565894a439ccb70713892311a3a0ee1d.tar.gz
SERVER-75506 Cache $lookup inner query plans with single solution
-rw-r--r--jstests/aggregation/sources/lookup/lookup_single_solution_cache.js67
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp1
-rw-r--r--src/mongo/db/pipeline/expression_context.h4
-rw-r--r--src/mongo/db/query/get_executor.cpp4
4 files changed, 75 insertions, 1 deletions
diff --git a/jstests/aggregation/sources/lookup/lookup_single_solution_cache.js b/jstests/aggregation/sources/lookup/lookup_single_solution_cache.js
new file mode 100644
index 00000000000..fd692d8cf48
--- /dev/null
+++ b/jstests/aggregation/sources/lookup/lookup_single_solution_cache.js
@@ -0,0 +1,67 @@
+// Test the plan cache entries for a $lookup with a single query solution. It should produce a
+// single cache entry.
+// @tags: [
+// assumes_unsharded_collection,
+// do_not_wrap_aggregations_in_facets,
+// assumes_read_concern_unchanged,
+// assumes_read_preference_unchanged,
+// tenant_migration_incompatible
+// ]
+(function() {
+"use strict";
+
+const outer = db.outer;
+const inner = db.inner;
+
+outer.drop();
+inner.drop();
+
+assert.commandWorked(outer.insert([{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}]));
+assert.commandWorked(inner.insert([{_id: 3, b: 0}, {_id: 4, b: 1}, {_id: 5, b: 2}]));
+
+const expectedResults = [{a: 0, docs: [{b: 0}]}, {a: 1, docs: [{b: 1}]}, {a: 2, docs: [{b: 2}]}];
+
+{
+ // Sort by "a" so we know the cached plan will hold b=1 (the second run will be cached).
+ const pipeline = [
+ {$sort: {a: 1}},
+ {$lookup: {from: "inner", localField: "a", foreignField: "b", as: "docs", pipeline: [{$project: {_id: 0}}]}},
+ {$project: {_id: 0}}
+ ];
+
+ assert.eq(outer.aggregate(pipeline).toArray(), expectedResults);
+
+ assert.eq(inner.getPlanCache().list().length, 1);
+ const innerPlan = inner.getPlanCache().list()[0];
+ assert(innerPlan.isActive);
+ assert.eq(innerPlan.cachedPlan.inputStage.stage, "COLLSCAN");
+ assert.eq(innerPlan.cachedPlan.inputStage.filter, {b: {$eq: 1}});
+}
+
+inner.getPlanCache().clear();
+
+{
+ // Test with "let" syntax.
+ const pipeline = [
+ {$sort: {a: 1}},
+ {$lookup: {
+ from: "inner",
+ let: {aRenamed: "$a"},
+ pipeline: [
+ {$match: {$expr: {$eq: ["$b", "$$aRenamed"]}}},
+ {$project: {_id: 0}},
+ ],
+ as: "docs"
+ }},
+ {$project: {_id: 0}}
+ ];
+
+ assert.eq(outer.aggregate(pipeline).toArray(), expectedResults);
+
+ assert.eq(inner.getPlanCache().list().length, 1);
+ const innerPlan = inner.getPlanCache().list()[0];
+ assert(innerPlan.isActive);
+ assert.eq(innerPlan.cachedPlan.inputStage.stage, "COLLSCAN");
+ assert.eq(Object.keys(innerPlan.cachedPlan.inputStage.filter["$and"][0]), ["$expr"]);
+}
+}());
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index ec8dfda206b..b34b88368e6 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -595,6 +595,7 @@ std::unique_ptr<Pipeline, PipelineDeleter> DocumentSourceLookUp::buildPipeline(
const Document& inputDoc) {
// Copy all 'let' variables into the foreign pipeline's expression context.
_variables.copyToExpCtx(_variablesParseState, _fromExpCtx.get());
+ _fromExpCtx->forcePlanCache = true;
// Resolve the 'let' variables to values per the given input document.
resolveLetVariables(inputDoc, &_fromExpCtx->variables);
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index 3482c86b510..dcbe7016b56 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -542,6 +542,10 @@ public:
return getCollatorBSON().woCompare(CollationSpec::kSimpleSpec) == 0;
}
+ // Forces the plan cache to be used even if there's only one solution available. Queries that
+ // are ineligible will still not be cached.
+ bool forcePlanCache = false;
+
protected:
static const int kInterruptCheckPeriod = 128;
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 72a72af4bac..ba526bb6c72 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -824,7 +824,9 @@ public:
}
}
- if (1 == solutions.size()) {
+ // Force multiplanning (and therefore caching) if forcePlanCache is set. We could manually
+ // update the plan cache instead without multiplanning but this is simpler.
+ if (1 == solutions.size() && !_cq->getExpCtxRaw()->forcePlanCache) {
// Only one possible plan. Build the stages from the solution.
auto result = releaseResult();
solutions[0]->indexFilterApplied = _plannerParams.indexFiltersApplied;