summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/core/explode_for_sort_plan_cache.js101
-rw-r--r--src/mongo/db/exec/plan_cache_util.cpp4
-rw-r--r--src/mongo/db/exec/plan_cache_util.h5
-rw-r--r--src/mongo/db/query/planner_analysis.cpp11
-rw-r--r--src/mongo/db/query/planner_analysis.h7
-rw-r--r--src/mongo/db/query/query_solution.h5
6 files changed, 128 insertions, 5 deletions
diff --git a/jstests/core/explode_for_sort_plan_cache.js b/jstests/core/explode_for_sort_plan_cache.js
new file mode 100644
index 00000000000..f4e5a87be7f
--- /dev/null
+++ b/jstests/core/explode_for_sort_plan_cache.js
@@ -0,0 +1,101 @@
+/**
+ * Confirms that explode for sort plans are properly cached and recovered from the plan cache,
+ * yielding correct results after the query is auto-parameterized.
+ *
+ * @tags: [
+ * # Since the plan cache is per-node state, this test assumes that all operations are happening
+ * # against the same mongod.
+ * assumes_read_preference_unchanged,
+ * assumes_read_concern_unchanged,
+ * does_not_support_stepdowns,
+ * # If all chunks are moved off of a shard, it can cause the plan cache to miss commands.
+ * assumes_balancer_off,
+ * assumes_unsharded_collection,
+ * ]
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+load("jstests/libs/sbe_util.js");
+
+const coll = db.explode_for_sort_plan_cache;
+coll.drop();
+
+// Create two indexes to ensure the multi-planner kicks in and the query plan gets cached.
+assert.commandWorked(coll.createIndex({a: 1, b: 1, c: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1, c: 1, d: 1}));
+
+assert.commandWorked(coll.insert({a: 2, b: 3}));
+
+// A helper function to look up a cache entry in the plan cache based on the given filter
+// and sort specs.
+function getPlanForCacheEntry(query, sort) {
+ const keyHash = getPlanCacheKeyFromShape({query: query, sort: sort, collection: coll, db: db});
+
+ const res =
+ coll.aggregate([{$planCacheStats: {}}, {$match: {planCacheKey: keyHash}}]).toArray();
+ // We expect exactly one matching cache entry.
+ assert.eq(1, res.length, () => tojson(coll.aggregate([{$planCacheStats: {}}]).toArray()));
+ return res[0];
+}
+
+// A helper function to assert that a cache entry doesn't exist in the plan cache based on the
+// given filter and sort specs.
+function assertCacheEntryDoesNotExist(query, sort) {
+ const keyHash = getPlanCacheKeyFromShape({query: query, sort: sort, collection: coll, db: db});
+ const res =
+ coll.aggregate([{$planCacheStats: {}}, {$match: {planCacheKey: keyHash}}]).toArray();
+ assert.eq(0, res.length, () => tojson(coll.aggregate([{$planCacheStats: {}}]).toArray()));
+}
+
+let querySpec = {a: {$eq: 2}, b: {$in: [99, 4]}};
+const sortSpec = {
+ c: 1
+};
+
+// TODO SERVER-67576: remove this branch once explode for sort plans are supported by the SBE plan
+// cache.
+if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
+ // Run the query for the first time and make sure the plan hasn't been cached.
+ assert.eq(0, coll.find(querySpec).sort(sortSpec).itcount());
+ assertCacheEntryDoesNotExist(querySpec, sortSpec);
+
+ // Run the query again and make sure it's still not cached.
+ assert.eq(0, coll.find(querySpec).sort(sortSpec).itcount());
+ assertCacheEntryDoesNotExist(querySpec, sortSpec);
+
+ // Run a query that returns one document in the collection, but the plan is still not cached.
+ querySpec = {a: {$eq: 2}, b: {$in: [3, 4]}};
+ assert.eq(1, coll.find(querySpec).sort(sortSpec).itcount());
+ assertCacheEntryDoesNotExist(querySpec, sortSpec);
+} else {
+ // Run the query for the first time to create an inactive plan cache entry.
+ assert.eq(0, coll.find(querySpec).sort(sortSpec).itcount());
+ const inactiveEntry = getPlanForCacheEntry(querySpec, sortSpec);
+ assert.eq(inactiveEntry.isActive, false, inactiveEntry);
+
+ // Run the query again to activate the cache entry.
+ assert.eq(0, coll.find(querySpec).sort(sortSpec).itcount());
+ const activeEntry = getPlanForCacheEntry(querySpec, sortSpec);
+ assert.eq(activeEntry.isActive, true, activeEntry);
+ assert.eq(inactiveEntry.queryHash,
+ activeEntry.queryHash,
+ `inactive=${tojson(inactiveEntry)}, active=${tojson(activeEntry)}`);
+ assert.eq(inactiveEntry.planCacheKey,
+ activeEntry.planCacheKey,
+ `inactive=${tojson(inactiveEntry)}, active=${tojson(activeEntry)}`);
+
+ // Run a query that reuses the cache entry and should return one document in the collection.
+ querySpec = {a: {$eq: 2}, b: {$in: [3, 4]}};
+ assert.eq(1, coll.find(querySpec).sort(sortSpec).itcount());
+ const reusedEntry = getPlanForCacheEntry(querySpec, sortSpec);
+ assert.eq(reusedEntry.isActive, true, reusedEntry);
+ assert.eq(activeEntry.queryHash,
+ reusedEntry.queryHash,
+ `active=${tojson(activeEntry)}, reused=${tojson(reusedEntry)}`);
+ assert.eq(activeEntry.planCacheKey,
+ reusedEntry.planCacheKey,
+ `active=${tojson(activeEntry)}, reused=${tojson(reusedEntry)}`);
+}
+}());
diff --git a/src/mongo/db/exec/plan_cache_util.cpp b/src/mongo/db/exec/plan_cache_util.cpp
index a3fc5ff19d1..4aa3ab3f6c4 100644
--- a/src/mongo/db/exec/plan_cache_util.cpp
+++ b/src/mongo/db/exec/plan_cache_util.cpp
@@ -81,8 +81,10 @@ void updatePlanCache(OperationContext* opCtx,
const stage_builder::PlanStageData& data) {
// TODO SERVER-61507: Remove canUseSbePlanCache check once $group pushdown is
// integrated with SBE plan cache.
+ //
+ // TODO SERVER-67576: re-enable caching of "explode for sort" plans in the SBE cache.
if (shouldCacheQuery(query) && collections.getMainCollection() &&
- canonical_query_encoder::canUseSbePlanCache(query) &&
+ !solution.hasExplodedForSort && canonical_query_encoder::canUseSbePlanCache(query) &&
feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV()) {
auto key = plan_cache_key_factory::make(query, collections);
auto plan = std::make_unique<sbe::CachedSbePlan>(root.clone(), data);
diff --git a/src/mongo/db/exec/plan_cache_util.h b/src/mongo/db/exec/plan_cache_util.h
index 2fb16d8be89..14059d89a18 100644
--- a/src/mongo/db/exec/plan_cache_util.h
+++ b/src/mongo/db/exec/plan_cache_util.h
@@ -172,7 +172,10 @@ void updatePlanCache(
// Store the choice we just made in the cache, if the query is of a type that is safe to
// cache.
- if (shouldCacheQuery(query) && canCache) {
+ //
+ // TODO SERVER-67576: re-enable caching of "explode for sort" plans in the SBE cache.
+ if (shouldCacheQuery(query) && canCache &&
+ (!winningPlan.solution->hasExplodedForSort || std::is_same_v<PlanStageType, PlanStage*>)) {
auto rankingDecision = ranking.get();
auto cacheClassicPlan = [&]() {
auto buildDebugInfoFn = [&]() -> plan_cache_debug_info::DebugInfo {
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index 921b79dc70b..b4f6eb5fbab 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -913,8 +913,13 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAnalysis::analyzeSort(
const CanonicalQuery& query,
const QueryPlannerParams& params,
std::unique_ptr<QuerySolutionNode> solnRoot,
- bool* blockingSortOut) {
+ bool* blockingSortOut,
+ bool* explodeForSortOut) {
+ invariant(blockingSortOut);
+ invariant(explodeForSortOut);
+
*blockingSortOut = false;
+ *explodeForSortOut = false;
const FindCommandRequest& findCommand = query.getFindCommandRequest();
if (params.traversalPreference) {
@@ -976,6 +981,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAnalysis::analyzeSort(
// index scans over point intervals to an OR of sub-scans in order to pull out a sort.
// Let's try this.
if (explodeForSort(query, params, &solnRoot)) {
+ *explodeForSortOut = true;
return solnRoot;
}
@@ -1076,7 +1082,8 @@ std::unique_ptr<QuerySolution> QueryPlannerAnalysis::analyzeDataAccess(
}
bool hasSortStage = false;
- solnRoot = analyzeSort(query, params, std::move(solnRoot), &hasSortStage);
+ solnRoot =
+ analyzeSort(query, params, std::move(solnRoot), &hasSortStage, &soln->hasExplodedForSort);
// This can happen if we need to create a blocking sort stage and we're not allowed to.
if (!solnRoot) {
diff --git a/src/mongo/db/query/planner_analysis.h b/src/mongo/db/query/planner_analysis.h
index b4b8979c29d..bd1acfec866 100644
--- a/src/mongo/db/query/planner_analysis.h
+++ b/src/mongo/db/query/planner_analysis.h
@@ -81,12 +81,17 @@ public:
/**
* Sort the results, if there is a sort required.
+ *
+ * The mandatory output parameters 'blockingSortOut' and 'explodeForSortOut' indicate if the
+ * generated sub-plan contains a blocking QSN, such as 'SortNode', and if the sub-plan was
+ * "exploded" to obtain an indexed sort (see QueryPlannerAnalysis::explodeForSort()).
*/
static std::unique_ptr<QuerySolutionNode> analyzeSort(
const CanonicalQuery& query,
const QueryPlannerParams& params,
std::unique_ptr<QuerySolutionNode> solnRoot,
- bool* blockingSortOut);
+ bool* blockingSortOut,
+ bool* explodeForSortOut);
/**
* Internal helper function used by analyzeSort.
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index 27a4ff33977..aa289b6c8de 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -403,6 +403,11 @@ public:
// we would want to fall back on an alternate non-blocking solution.
bool hasBlockingStage{false};
+ // Indicates whether this query solution represents an 'explode for sort' plan when an index
+ // scan over multiple point intervals is 'exploded' into a union of index scans in order to
+ // obtain an indexed sort.
+ bool hasExplodedForSort{false};
+
// Runner executing this solution might be interested in knowing
// if the planning process for this solution was based on filtered indices.
bool indexFilterApplied{false};