summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Ignatyev <alexander.ignatyev@mongodb.com>2021-11-03 13:34:30 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-11-03 14:17:31 +0000
commitaebaaf20a7dea071770976aed8533f011c722b8c (patch)
treecea93e4b21410c5e1186af1d8349afd5b6a33623
parent14cd3f8d8d89a2bdc33374f80ed04d1e326f8fc6 (diff)
downloadmongo-aebaaf20a7dea071770976aed8533f011c722b8c.tar.gz
SERVER-60066 Remove plan cache entries on collection drop
-rw-r--r--src/mongo/db/catalog/SConscript1
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp6
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_clear_command.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp2
-rw-r--r--src/mongo/db/exec/plan_cache_util.h7
-rw-r--r--src/mongo/db/exec/subplan.cpp2
-rw-r--r--src/mongo/db/query/SConscript2
-rw-r--r--src/mongo/db/query/canonical_query_encoder_test.cpp2
-rw-r--r--src/mongo/db/query/canonical_query_test_util.cpp167
-rw-r--r--src/mongo/db/query/canonical_query_test_util.h63
-rw-r--r--src/mongo/db/query/classic_plan_cache.cpp8
-rw-r--r--src/mongo/db/query/classic_plan_cache.h44
-rw-r--r--src/mongo/db/query/collection_query_info.cpp63
-rw-r--r--src/mongo/db/query/collection_query_info.h55
-rw-r--r--src/mongo/db/query/explain.cpp10
-rw-r--r--src/mongo/db/query/get_executor.cpp3
-rw-r--r--src/mongo/db/query/lru_key_value.h21
-rw-r--r--src/mongo/db/query/lru_key_value_test.cpp31
-rw-r--r--src/mongo/db/query/plan_cache.h16
-rw-r--r--src/mongo/db/query/plan_cache_invalidator.cpp78
-rw-r--r--src/mongo/db/query/plan_cache_invalidator.h87
-rw-r--r--src/mongo/db/query/plan_cache_key_factory.cpp24
-rw-r--r--src/mongo/db/query/plan_cache_key_factory.h25
-rw-r--r--src/mongo/db/query/plan_cache_key_info.h (renamed from src/mongo/db/query/plan_cache_key.h)62
-rw-r--r--src/mongo/db/query/plan_cache_key_info_test.cpp642
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp738
-rw-r--r--src/mongo/db/query/sbe_cached_solution_planner.cpp2
-rw-r--r--src/mongo/db/query/sbe_plan_cache.cpp32
-rw-r--r--src/mongo/db/query/sbe_plan_cache.h59
-rw-r--r--src/mongo/db/query/sbe_sub_planner.cpp2
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp24
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp6
36 files changed, 1433 insertions, 862 deletions
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index ca5635e34a8..bc19fa684c8 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -487,6 +487,7 @@ env.Library(
source=[
"$BUILD_DIR/mongo/db/query/collection_query_info.cpp",
"$BUILD_DIR/mongo/db/query/collection_index_usage_tracker_decoration.cpp",
+ "$BUILD_DIR/mongo/db/query/plan_cache_invalidator.cpp",
"$BUILD_DIR/mongo/db/query/plan_cache_key_factory.cpp",
"$BUILD_DIR/mongo/db/query/query_settings_decoration.cpp",
],
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index e2b150bd9c4..2d93bd8a062 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -261,7 +261,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
querySettings->removeAllowedIndices(cq->encodeKey());
// Remove entry from plan cache
- planCache->remove(plan_cache_key_factory::make(*cq, collection));
+ planCache->remove(plan_cache_key_factory::make<PlanCacheKey>(*cq, collection));
LOGV2(20479,
"Removed index filter on {query}",
@@ -320,7 +320,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Remove plan cache entry.
- planCache->remove(plan_cache_key_factory::make(*cq, collection));
+ planCache->remove(plan_cache_key_factory::make<PlanCacheKey>(*cq, collection));
}
LOGV2(20480,
@@ -401,7 +401,7 @@ Status SetFilter::set(OperationContext* opCtx,
querySettings->setAllowedIndices(*cq, indexes, indexNames);
// Remove entry from plan cache.
- planCache->remove(plan_cache_key_factory::make(*cq, collection));
+ planCache->remove(plan_cache_key_factory::make<PlanCacheKey>(*cq, collection));
LOGV2(20481,
"Index filter set on {query} {indexes}",
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 93964a03ae1..932c2280f92 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -57,7 +57,7 @@ static const NamespaceString nss("test.collection");
PlanCacheKey makeKey(const CanonicalQuery& cq) {
CollectionMock coll(nss);
- return plan_cache_key_factory::make(cq, &coll);
+ return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll);
}
/**
diff --git a/src/mongo/db/commands/plan_cache_clear_command.cpp b/src/mongo/db/commands/plan_cache_clear_command.cpp
index 2ed23cfa12b..53879f32ff8 100644
--- a/src/mongo/db/commands/plan_cache_clear_command.cpp
+++ b/src/mongo/db/commands/plan_cache_clear_command.cpp
@@ -79,7 +79,7 @@ Status clear(OperationContext* opCtx,
auto cq = std::move(statusWithCQ.getValue());
- planCache->remove(plan_cache_key_factory::make(*cq, collection));
+ planCache->remove(plan_cache_key_factory::make<PlanCacheKey>(*cq, collection));
return Status::OK();
}
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 4572cf029a4..4ef229c36f4 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -42,7 +42,7 @@ static const NamespaceString nss{"test.collection"_sd};
PlanCacheKey makeKey(const CanonicalQuery& cq) {
CollectionMock coll(nss);
- return plan_cache_key_factory::make(cq, &coll);
+ return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll);
}
TEST(PlanCacheCommandsTest, CannotCanonicalizeWithMissingQueryField) {
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 9c28ded79c4..211ddbdd4da 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -209,7 +209,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
// Deactivate the current cache entry.
const auto& coll = collection();
auto cache = CollectionQueryInfo::get(coll).getPlanCache();
- cache->deactivate(plan_cache_key_factory::make(*_canonicalQuery, coll));
+ cache->deactivate(plan_cache_key_factory::make<PlanCacheKey>(*_canonicalQuery, coll));
}
// Use the query planning module to plan the whole query.
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 43c729e1109..d1f7dfc9e64 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -133,7 +133,7 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
CollectionQueryInfo::get(collection())
.getPlanCache()
- ->remove(plan_cache_key_factory::make(*_query, collection()));
+ ->remove(plan_cache_key_factory::make<PlanCacheKey>(*_query, collection()));
_bestPlanIdx = _backupPlanIdx;
_backupPlanIdx = kNoSuchPlan;
diff --git a/src/mongo/db/exec/plan_cache_util.h b/src/mongo/db/exec/plan_cache_util.h
index 43a0cbb80b7..676a341b229 100644
--- a/src/mongo/db/exec/plan_cache_util.h
+++ b/src/mongo/db/exec/plan_cache_util.h
@@ -162,7 +162,7 @@ void updatePlanCache(
PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{query};
uassertStatusOK(CollectionQueryInfo::get(collection)
.getPlanCache()
- ->set(plan_cache_key_factory::make(query, collection),
+ ->set(plan_cache_key_factory::make<PlanCacheKey>(query, collection),
winningPlan.solution->cacheData->clone(),
std::move(ranking),
opCtx->getServiceContext()->getPreciseClockSource()->now(),
@@ -177,9 +177,10 @@ void updatePlanCache(
auto cachedPlan = std::make_unique<sbe::CachedSbePlan>(
winningPlan.root->clone(), winningPlan.data);
- PlanCacheLoggingCallbacks<PlanCacheKey, sbe::CachedSbePlan> callbacks{query};
+ PlanCacheLoggingCallbacks<sbe::PlanCacheKey, sbe::CachedSbePlan> callbacks{
+ query};
uassertStatusOK(sbe::getPlanCache(opCtx).set(
- plan_cache_key_factory::make(query, collection),
+ plan_cache_key_factory::make<sbe::PlanCacheKey>(query, collection),
std::move(cachedPlan),
std::move(ranking),
opCtx->getServiceContext()->getPreciseClockSource()->now(),
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index a960b99c378..30f61103b4b 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -169,7 +169,7 @@ Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
std::function<PlanCacheKey(const CanonicalQuery& cq, const CollectionPtr& coll)>
createPlanCacheKey = [](const CanonicalQuery& cq, const CollectionPtr& coll) {
- return plan_cache_key_factory::make(cq, coll);
+ return plan_cache_key_factory::make<PlanCacheKey>(cq, coll);
};
// Plan each branch of the $or.
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 9cab5d71a92..352bf2a7181 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -319,6 +319,7 @@ env.CppUnitTest(
source=[
"canonical_query_encoder_test.cpp",
"canonical_query_test.cpp",
+ "canonical_query_test_util.cpp",
"classic_stage_builder_test.cpp",
"count_command_test.cpp",
"cursor_response_test.cpp",
@@ -340,6 +341,7 @@ env.CppUnitTest(
"parsed_distinct_test.cpp",
"plan_cache_indexability_test.cpp",
"plan_cache_size_parameter_test.cpp",
+ "plan_cache_key_info_test.cpp",
"plan_cache_test.cpp",
"plan_ranker_test.cpp",
"planner_access_test.cpp",
diff --git a/src/mongo/db/query/canonical_query_encoder_test.cpp b/src/mongo/db/query/canonical_query_encoder_test.cpp
index b34cdcfea1d..10d610d80a6 100644
--- a/src/mongo/db/query/canonical_query_encoder_test.cpp
+++ b/src/mongo/db/query/canonical_query_encoder_test.cpp
@@ -50,7 +50,7 @@ static const NamespaceString nss("testdb.testcoll");
PlanCacheKey makeKey(const CanonicalQuery& cq) {
CollectionMock coll(nss);
- return plan_cache_key_factory::make(cq, &coll);
+ return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll);
}
/**
diff --git a/src/mongo/db/query/canonical_query_test_util.cpp b/src/mongo/db/query/canonical_query_test_util.cpp
new file mode 100644
index 00000000000..7203c7e9992
--- /dev/null
+++ b/src/mongo/db/query/canonical_query_test_util.cpp
@@ -0,0 +1,167 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/query/canonical_query_test_util.h"
+
+#include "mongo/db/query/query_test_service_context.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+/**
+ * Utility functions to create a CanonicalQuery
+ */
+std::unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj) {
+ QueryTestServiceContext serviceContext;
+ auto opCtx = serviceContext.makeOperationContext();
+
+ auto findCommand = std::make_unique<FindCommandRequest>(nss);
+ findCommand->setFilter(queryObj);
+ const boost::intrusive_ptr<ExpressionContext> expCtx;
+ auto statusWithCQ =
+ CanonicalQuery::canonicalize(opCtx.get(),
+ std::move(findCommand),
+ false,
+ expCtx,
+ ExtensionsCallbackNoop(),
+ MatchExpressionParser::kAllowAllSpecialFeatures);
+ ASSERT_OK(statusWithCQ.getStatus());
+ return std::move(statusWithCQ.getValue());
+}
+
+std::unique_ptr<CanonicalQuery> canonicalize(StringData queryStr) {
+ BSONObj queryObj = fromjson(queryStr.toString());
+ return canonicalize(queryObj);
+}
+
+std::unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
+ BSONObj sort,
+ BSONObj proj,
+ BSONObj collation) {
+ QueryTestServiceContext serviceContext;
+ auto opCtx = serviceContext.makeOperationContext();
+
+ auto findCommand = std::make_unique<FindCommandRequest>(nss);
+ findCommand->setFilter(query);
+ findCommand->setSort(sort);
+ findCommand->setProjection(proj);
+ findCommand->setCollation(collation);
+ const boost::intrusive_ptr<ExpressionContext> expCtx;
+ auto statusWithCQ =
+ CanonicalQuery::canonicalize(opCtx.get(),
+ std::move(findCommand),
+ false,
+ expCtx,
+ ExtensionsCallbackNoop(),
+ MatchExpressionParser::kAllowAllSpecialFeatures);
+ ASSERT_OK(statusWithCQ.getStatus());
+ return std::move(statusWithCQ.getValue());
+}
+
+std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ const char* collationStr) {
+ return canonicalize(
+ fromjson(queryStr), fromjson(sortStr), fromjson(projStr), fromjson(collationStr));
+}
+
+std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr) {
+ QueryTestServiceContext serviceContext;
+ auto opCtx = serviceContext.makeOperationContext();
+
+ auto findCommand = std::make_unique<FindCommandRequest>(nss);
+ findCommand->setFilter(fromjson(queryStr));
+ findCommand->setSort(fromjson(sortStr));
+ findCommand->setProjection(fromjson(projStr));
+ if (skip) {
+ findCommand->setSkip(skip);
+ }
+ if (limit) {
+ findCommand->setLimit(limit);
+ }
+ findCommand->setHint(fromjson(hintStr));
+ findCommand->setMin(fromjson(minStr));
+ findCommand->setMax(fromjson(maxStr));
+ const boost::intrusive_ptr<ExpressionContext> expCtx;
+ auto statusWithCQ =
+ CanonicalQuery::canonicalize(opCtx.get(),
+ std::move(findCommand),
+ false,
+ expCtx,
+ ExtensionsCallbackNoop(),
+ MatchExpressionParser::kAllowAllSpecialFeatures);
+ ASSERT_OK(statusWithCQ.getStatus());
+ return std::move(statusWithCQ.getValue());
+}
+
+std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr,
+ bool explain) {
+ QueryTestServiceContext serviceContext;
+ auto opCtx = serviceContext.makeOperationContext();
+
+ auto findCommand = std::make_unique<FindCommandRequest>(nss);
+ findCommand->setFilter(fromjson(queryStr));
+ findCommand->setSort(fromjson(sortStr));
+ findCommand->setProjection(fromjson(projStr));
+ if (skip) {
+ findCommand->setSkip(skip);
+ }
+ if (limit) {
+ findCommand->setLimit(limit);
+ }
+ findCommand->setHint(fromjson(hintStr));
+ findCommand->setMin(fromjson(minStr));
+ findCommand->setMax(fromjson(maxStr));
+ const boost::intrusive_ptr<ExpressionContext> expCtx;
+ auto statusWithCQ =
+ CanonicalQuery::canonicalize(opCtx.get(),
+ std::move(findCommand),
+ explain,
+ expCtx,
+ ExtensionsCallbackNoop(),
+ MatchExpressionParser::kAllowAllSpecialFeatures);
+ ASSERT_OK(statusWithCQ.getStatus());
+ return std::move(statusWithCQ.getValue());
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query_test_util.h b/src/mongo/db/query/canonical_query_test_util.h
new file mode 100644
index 00000000000..17dedf7045f
--- /dev/null
+++ b/src/mongo/db/query/canonical_query_test_util.h
@@ -0,0 +1,63 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/query/canonical_query.h"
+
+namespace mongo {
+
+const NamespaceString nss("test.collection");
+
+std::unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj);
+std::unique_ptr<CanonicalQuery> canonicalize(StringData queryStr);
+std::unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
+ BSONObj sort,
+ BSONObj proj,
+ BSONObj collation);
+std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ const char* collationStr);
+std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr);
+std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr,
+ bool explain);
+} // namespace mongo
diff --git a/src/mongo/db/query/classic_plan_cache.cpp b/src/mongo/db/query/classic_plan_cache.cpp
index 3acad6c9510..6a373a0c76d 100644
--- a/src/mongo/db/query/classic_plan_cache.cpp
+++ b/src/mongo/db/query/classic_plan_cache.cpp
@@ -37,17 +37,11 @@ ServerStatusMetricField<Counter64> totalPlanCacheSizeEstimateBytesMetric(
"query.planCacheTotalSizeEstimateBytes", &PlanCacheEntry::planCacheTotalSizeEstimateBytes);
} // namespace
-
std::ostream& operator<<(std::ostream& stream, const PlanCacheKey& key) {
- stream << key.stringData();
+ stream << key.toString();
return stream;
}
-StringBuilder& operator<<(StringBuilder& builder, const PlanCacheKey& key) {
- builder << key.stringData();
- return builder;
-}
-
void PlanCacheIndexTree::setIndexEntry(const IndexEntry& ie) {
entry = std::make_unique<IndexEntry>(ie);
}
diff --git a/src/mongo/db/query/classic_plan_cache.h b/src/mongo/db/query/classic_plan_cache.h
index a8fdcd2fed5..d6a05895b5d 100644
--- a/src/mongo/db/query/classic_plan_cache.h
+++ b/src/mongo/db/query/classic_plan_cache.h
@@ -34,9 +34,53 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_entry.h"
#include "mongo/db/query/plan_cache.h"
+#include "mongo/db/query/plan_cache_key_info.h"
namespace mongo {
+/**
+ * Represents the "key" used in the PlanCache mapping from query shape -> query plan.
+ */
+class PlanCacheKey {
+public:
+ PlanCacheKey(PlanCacheKeyInfo&& info) : _info{std::move(info)} {}
+
+ bool operator==(const PlanCacheKey& other) const {
+ return other._info == _info;
+ }
+
+ bool operator!=(const PlanCacheKey& other) const {
+ return !(*this == other);
+ }
+
+ CanonicalQuery::QueryShapeString getQueryShape() const {
+ return _info.getQueryShape();
+ }
+
+ uint32_t queryHash() const {
+ return _info.queryHash();
+ }
+
+ uint32_t planCacheKeyHash() const {
+ return _info.planCacheKeyHash();
+ }
+
+ const std::string& toString() const {
+ return _info.toString();
+ }
+
+private:
+ PlanCacheKeyInfo _info;
+};
+
+std::ostream& operator<<(std::ostream& stream, const PlanCacheKey& key);
+
+class PlanCacheKeyHasher {
+public:
+ std::size_t operator()(const PlanCacheKey& k) const {
+ return k.planCacheKeyHash();
+ }
+};
class PlanCachePartitioner {
public:
diff --git a/src/mongo/db/query/collection_query_info.cpp b/src/mongo/db/query/collection_query_info.cpp
index 84d4cdcaba6..a19bb3b4365 100644
--- a/src/mongo/db/query/collection_query_info.cpp
+++ b/src/mongo/db/query/collection_query_info.cpp
@@ -77,16 +77,37 @@ CoreIndexInfo indexInfoFromIndexCatalogEntry(const IndexCatalogEntry& ice) {
projExec};
}
-std::shared_ptr<PlanCache> makePlanCache() {
- return std::make_shared<PlanCache>(internalQueryCacheMaxEntriesPerCollection.load());
+} // namespace
+
+CollectionQueryInfo::PlanCacheState::PlanCacheState()
+ : classicPlanCache{static_cast<size_t>(internalQueryCacheMaxEntriesPerCollection.load())} {}
+
+CollectionQueryInfo::PlanCacheState::PlanCacheState(OperationContext* opCtx,
+ const CollectionPtr& collection)
+ : classicPlanCache{static_cast<size_t>(internalQueryCacheMaxEntriesPerCollection.load())},
+ planCacheInvalidator{collection, opCtx->getServiceContext()} {
+ std::vector<CoreIndexInfo> indexCores;
+
+ // TODO We shouldn't need to include unfinished indexes, but we must here because the index
+ // catalog may be in an inconsistent state. SERVER-18346.
+ const bool includeUnfinishedIndexes = true;
+ std::unique_ptr<IndexCatalog::IndexIterator> ii =
+ collection->getIndexCatalog()->getIndexIterator(opCtx, includeUnfinishedIndexes);
+ while (ii->more()) {
+ const IndexCatalogEntry* ice = ii->next();
+ indexCores.emplace_back(indexInfoFromIndexCatalogEntry(*ice));
+ }
+
+ planCacheIndexabilityState.updateDiscriminators(indexCores);
}
-} // namespace
+void CollectionQueryInfo::PlanCacheState::clearPlanCache() {
+ classicPlanCache.clear();
+ planCacheInvalidator.clearPlanCache();
+}
CollectionQueryInfo::CollectionQueryInfo()
- : _keysComputed(false),
- _planCacheIndexabilityState(std::make_shared<PlanCacheIndexabilityState>()),
- _planCache(makePlanCache()) {}
+ : _keysComputed{false}, _planCacheState{std::make_shared<PlanCacheState>()} {}
const UpdateIndexData& CollectionQueryInfo::getIndexKeys(OperationContext* opCtx) const {
invariant(_keysComputed);
@@ -186,13 +207,13 @@ void CollectionQueryInfo::clearQueryCache(OperationContext* opCtx, const Collect
// We are operating on a cloned collection, the use_count can only be 1 if we've created a new
// PlanCache instance for this collection clone. Checking the refcount can't race as we can't
// start readers on this collection while it is writable
- if (_planCache.use_count() == 1) {
+ if (_planCacheState.use_count() == 1) {
LOGV2_DEBUG(5014501,
1,
"Clearing plan cache - collection info cache cleared",
"namespace"_attr = coll->ns());
- _planCache->clear();
+ _planCacheState->clearPlanCache();
} else {
LOGV2_DEBUG(5014502,
1,
@@ -208,34 +229,12 @@ void CollectionQueryInfo::clearQueryCacheForSetMultikey(const CollectionPtr& col
1,
"Clearing plan cache for multikey - collection info cache cleared",
"namespace"_attr = coll->ns());
- _planCache->clear();
-}
-
-PlanCache* CollectionQueryInfo::getPlanCache() const {
- return _planCache.get();
-}
-
-const PlanCacheIndexabilityState& CollectionQueryInfo::getPlanCacheIndexabilityState() const {
- return *_planCacheIndexabilityState;
+ _planCacheState->clearPlanCache();
}
void CollectionQueryInfo::updatePlanCacheIndexEntries(OperationContext* opCtx,
const CollectionPtr& coll) {
- std::vector<CoreIndexInfo> indexCores;
-
- // TODO We shouldn't need to include unfinished indexes, but we must here because the index
- // catalog may be in an inconsistent state. SERVER-18346.
- const bool includeUnfinishedIndexes = true;
- std::unique_ptr<IndexCatalog::IndexIterator> ii =
- coll->getIndexCatalog()->getIndexIterator(opCtx, includeUnfinishedIndexes);
- while (ii->more()) {
- const IndexCatalogEntry* ice = ii->next();
- indexCores.emplace_back(indexInfoFromIndexCatalogEntry(*ice));
- }
-
- _planCache = makePlanCache();
- _planCacheIndexabilityState = std::make_shared<PlanCacheIndexabilityState>();
- _planCacheIndexabilityState->updateDiscriminators(indexCores);
+ _planCacheState = std::make_shared<PlanCacheState>(opCtx, coll);
}
void CollectionQueryInfo::init(OperationContext* opCtx, const CollectionPtr& coll) {
diff --git a/src/mongo/db/query/collection_query_info.h b/src/mongo/db/query/collection_query_info.h
index 017bbc96948..e5fbffe720f 100644
--- a/src/mongo/db/query/collection_query_info.h
+++ b/src/mongo/db/query/collection_query_info.h
@@ -32,6 +32,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/query/classic_plan_cache.h"
#include "mongo/db/query/plan_cache_indexability.h"
+#include "mongo/db/query/plan_cache_invalidator.h"
#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/db/update_index_data.h"
@@ -59,14 +60,25 @@ public:
}
/**
- * Get the PlanCache for this collection.
+ * Gets the PlanCache for this collection.
*/
- PlanCache* getPlanCache() const;
+ PlanCache* getPlanCache() const {
+ return &_planCacheState->classicPlanCache;
+ }
+
+ /**
+ * Gets the number of the current collection version used for Plan Cache invalidation.
+ */
+ size_t getPlanCacheInvalidatorVersion() const {
+ return _planCacheState->planCacheInvalidator.versionNumber();
+ }
/**
- * Get the "indexability discriminators" used in the PlanCache for generating plan cache keys.
+ * Gets the "indexability discriminators" used in the PlanCache for generating plan cache keys.
*/
- const PlanCacheIndexabilityState& getPlanCacheIndexabilityState() const;
+ const PlanCacheIndexabilityState& getPlanCacheIndexabilityState() const {
+ return _planCacheState->planCacheIndexabilityState;
+ }
/* get set of index keys for this namespace. handy to quickly check if a given
field is indexed (Note it might be a secondary component of a compound index.)
@@ -105,6 +117,34 @@ public:
const PlanSummaryStats& summaryStats) const;
private:
+ /**
+ * Stores Clasic and SBE PlanCache-related state. Classic Plan Cache is stored per collection
+ * and represented by a mongo::PlanCache object. SBE PlanCache is stored in a process-global
+ * object, therefore, it is represented here as a PlanCacheInvalidator which knows what
+ * collection version to invalidate.
+ */
+ struct PlanCacheState {
+ PlanCacheState();
+
+ PlanCacheState(OperationContext* opCtx, const CollectionPtr& collection);
+
+ /**
+ * Clears classic and SBE cache entries with the current collection version.
+ */
+ void clearPlanCache();
+
+ // Per collection version classic plan cache.
+ PlanCache classicPlanCache;
+
+ // SBE PlanCacheInvalidator which can invalidate cache entries associated with a particular
+ // version of a collection.
+ PlanCacheInvalidator planCacheInvalidator;
+
+ // Holds computed information about the collection's indexes. Used for generating plan
+ // cache keys.
+ PlanCacheIndexabilityState planCacheIndexabilityState;
+ };
+
void computeIndexKeys(OperationContext* opCtx, const CollectionPtr& coll);
void updatePlanCacheIndexEntries(OperationContext* opCtx, const CollectionPtr& coll);
@@ -112,12 +152,7 @@ private:
bool _keysComputed;
UpdateIndexData _indexedPaths;
- // Holds computed information about the collection's indexes. Used for generating plan
- // cache keys.
- std::shared_ptr<PlanCacheIndexabilityState> _planCacheIndexabilityState;
-
- // A cache for query plans. Shared across cloned Collection instances.
- std::shared_ptr<PlanCache> _planCache;
+ std::shared_ptr<PlanCacheState> _planCacheState;
};
} // namespace mongo
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index f1d75379be6..31ded552406 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -94,13 +94,13 @@ void generatePlannerInfo(PlanExecutor* exec,
if (collection && exec->getCanonicalQuery()) {
const QuerySettings* querySettings =
QuerySettingsDecoration::get(collection->getSharedDecorations());
- const PlanCacheKey planCacheKey =
- plan_cache_key_factory::make(*exec->getCanonicalQuery(), collection);
- planCacheKeyHash = planCacheKey.planCacheKeyHash();
- queryHash = planCacheKey.queryHash();
+ const auto planCacheKeyInfo =
+ plan_cache_key_factory::make<PlanCacheKey>(*exec->getCanonicalQuery(), collection);
+ planCacheKeyHash = planCacheKeyInfo.planCacheKeyHash();
+ queryHash = planCacheKeyInfo.queryHash();
if (auto allowedIndicesFilter =
- querySettings->getAllowedIndicesFilter(planCacheKey.getQueryShape())) {
+ querySettings->getAllowedIndicesFilter(planCacheKeyInfo.getQueryShape())) {
// Found an index filter set on the query shape.
indexFilterSet = true;
}
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index c156ac6f963..4e10cb9eff6 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -566,7 +566,8 @@ public:
}
// Fill in some opDebug information.
- const PlanCacheKey planCacheKey = plan_cache_key_factory::make(*_cq, _collection);
+ const PlanCacheKey planCacheKey =
+ plan_cache_key_factory::make<PlanCacheKey>(*_cq, _collection);
CurOp::get(_opCtx)->debug().queryHash = planCacheKey.queryHash();
// Check that the query should be cached.
diff --git a/src/mongo/db/query/lru_key_value.h b/src/mongo/db/query/lru_key_value.h
index 489d0f43791..206dd042c55 100644
--- a/src/mongo/db/query/lru_key_value.h
+++ b/src/mongo/db/query/lru_key_value.h
@@ -193,6 +193,27 @@ public:
}
/**
+ * Remove all the entries for keys for which the predicate returns true. Returns the number of
+ * removed entries.
+ */
+ template <typename UnaryPredicate>
+ size_t removeIf(UnaryPredicate predicate) {
+ size_t removed = 0;
+ for (auto it = _kvList.begin(); it != _kvList.end();) {
+ if (predicate(it->first)) {
+ std::unique_ptr<V> entryToRemove{it->second};
+ _budgetTracker.onRemove(*entryToRemove);
+ _kvMap.erase(it->first);
+ it = _kvList.erase(it);
+ ++removed;
+ } else {
+ ++it;
+ }
+ }
+ return removed;
+ }
+
+ /**
* Deletes all entries in the kv-store.
*/
void clear() {
diff --git a/src/mongo/db/query/lru_key_value_test.cpp b/src/mongo/db/query/lru_key_value_test.cpp
index a82dda3f1d4..99d8037a35b 100644
--- a/src/mongo/db/query/lru_key_value_test.cpp
+++ b/src/mongo/db/query/lru_key_value_test.cpp
@@ -46,8 +46,10 @@ namespace {
//
struct TrivialBudgetEstimator {
+ static constexpr size_t kSize = 1;
+
size_t operator()(int) {
- return 1;
+ return kSize;
}
};
@@ -269,4 +271,31 @@ TEST(LRUKeyValueTest, IterationTest) {
ASSERT(i == cache.end());
}
+TEST(LRUKeyValueTest, RemoveIfTest) {
+ TestKeyValue cache{10};
+ for (int i = 0; i < 10; ++i) {
+ cache.add(i, new int(i));
+ }
+
+ size_t sizeBefore = cache.size();
+
+ // Remove all even keys.
+ size_t nRemoved = cache.removeIf([](int key) { return key % 2 == 0; });
+ ASSERT_EQ(5, nRemoved);
+
+ // Assert that all odd keys are in store.
+ for (int i = 1; i < 10; i += 2) {
+ assertInKVStore(cache, i, i);
+ }
+
+ // Assert that all even keys are not in store.
+ for (int i = 0; i < 10; i += 2) {
+ assertNotInKVStore(cache, i);
+ }
+
+ size_t sizeAfter = cache.size();
+
+ ASSERT_EQ(sizeAfter + nRemoved * TrivialBudgetEstimator::kSize, sizeBefore);
+}
+
} // namespace
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 72f2b6c995f..7b0369a6fd9 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -33,7 +33,6 @@
#include "mongo/db/query/lru_key_value.h"
#include "mongo/db/query/plan_cache_callbacks.h"
#include "mongo/db/query/plan_cache_debug_info.h"
-#include "mongo/db/query/plan_cache_key.h"
#include "mongo/platform/mutex.h"
#include "mongo/util/container_size_helper.h"
@@ -447,6 +446,21 @@ public:
_partitionedCache->erase(key);
}
+
+ /**
+ * Remove all the entries for keys for which the predicate returns true. Return the number of
+ * removed entries.
+ */
+ template <typename UnaryPredicate>
+ size_t removeIf(UnaryPredicate predicate) {
+ size_t nRemoved = 0;
+ for (size_t partitionId = 0; partitionId < _numPartitions; ++partitionId) {
+ auto lockedPartition = _partitionedCache->lockOnePartitionById(partitionId);
+ nRemoved += lockedPartition->removeIf(predicate);
+ }
+ return nRemoved;
+ }
+
/**
* Remove *all* cached plans. Does not clear index information.
*/
diff --git a/src/mongo/db/query/plan_cache_invalidator.cpp b/src/mongo/db/query/plan_cache_invalidator.cpp
new file mode 100644
index 00000000000..7e673379e99
--- /dev/null
+++ b/src/mongo/db/query/plan_cache_invalidator.cpp
@@ -0,0 +1,78 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
+
+#include "mongo/db/query/plan_cache_invalidator.h"
+
+#include "mongo/logv2/log.h"
+
+namespace mongo {
+namespace {
+
+const auto getCollectionVersionNumber =
+ SharedCollectionDecorations::declareDecoration<AtomicWord<size_t>>();
+
+const auto getCallback =
+ ServiceContext::declareDecoration<std::unique_ptr<PlanCacheInvalidatorCallback>>();
+} // namespace
+
+void PlanCacheInvalidatorCallback::set(ServiceContext* serviceContext,
+ std::unique_ptr<PlanCacheInvalidatorCallback> callback) {
+ getCallback(serviceContext) = std::move(callback);
+}
+
+PlanCacheInvalidator::PlanCacheInvalidator(const CollectionPtr& collection,
+ ServiceContext* serviceContext)
+ : _version{getCollectionVersionNumber(collection->getSharedDecorations()).fetchAndAdd(1u)},
+ _uuid{collection->uuid()},
+ _callback{getCallback(serviceContext).get()} {}
+
+PlanCacheInvalidator::~PlanCacheInvalidator() {
+ try {
+ clearPlanCache();
+ } catch (const DBException& ex) {
+ LOGV2_WARNING(6006610, "DBException occured on clearing plan cache", "exception"_attr = ex);
+ } catch (const std::exception& ex) {
+ LOGV2_WARNING(
+ 6006611, "Exception occured on clearing plan cache", "message"_attr = ex.what());
+ } catch (...) {
+ LOGV2_WARNING(6006612, "Unknown exception occured on clearing plan cache");
+ }
+}
+
+void PlanCacheInvalidator::clearPlanCache() const {
+ // Some unit tests cannot properly initialize CollectionQueryInfo but rely on it partially
+ // initialized to make PlanCacheKeys.
+ if (_callback && _uuid) {
+ _callback->invalidateCacheEntriesWith(*_uuid, _version);
+ }
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_invalidator.h b/src/mongo/db/query/plan_cache_invalidator.h
new file mode 100644
index 00000000000..62beae0e494
--- /dev/null
+++ b/src/mongo/db/query/plan_cache_invalidator.h
@@ -0,0 +1,87 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/catalog/collection.h"
+
+namespace mongo {
+
+/**
+ * Encapsulates a callback function called on the SBE Plan Cache invalidation.
+ */
+class PlanCacheInvalidatorCallback {
+public:
+ /**
+ * Stores the callback as a service context decoration.
+ */
+ static void set(ServiceContext* serviceContext,
+ std::unique_ptr<PlanCacheInvalidatorCallback> callback);
+
+ virtual ~PlanCacheInvalidatorCallback() = default;
+
+ virtual void invalidateCacheEntriesWith(UUID collectionUuid, size_t oldVersion) = 0;
+};
+
+/**
+ * Controls life-time of PlanCache entries associated with a particular collection of a particular
+ * version. A new copy of the collection is created each time the collection
+ * changes (copy-on-write policy). The collection version is incremented only after changes that
+ * require invalidating the plan cache (for example, creating an index or deleting the collection).
+ * If the catalog change does not require invalidation of plan cache entries (for example, changing
+ * the document validator), then the collection version remains unchanged.
+ */
+class PlanCacheInvalidator {
+public:
+ PlanCacheInvalidator() = default;
+ PlanCacheInvalidator(const CollectionPtr& collection, ServiceContext* serviceContext);
+
+ ~PlanCacheInvalidator();
+
+ /**
+ * Forces SBE PlanCache invalidation for the collection UUID and version stored in this
+ * invalidator.
+ */
+ void clearPlanCache() const;
+
+ size_t versionNumber() const {
+ return _version;
+ }
+
+private:
+ // "Version" of the collection, increased any time we need to invalidate PlanCache.
+ const size_t _version{};
+
+ // The collection's UUID.
+ const boost::optional<UUID> _uuid{};
+
+ // A callback to be called when we need to clean PlanCache.
+ PlanCacheInvalidatorCallback* const _callback{};
+};
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_key_factory.cpp b/src/mongo/db/query/plan_cache_key_factory.cpp
index 15757a0521c..d8db1b80010 100644
--- a/src/mongo/db/query/plan_cache_key_factory.cpp
+++ b/src/mongo/db/query/plan_cache_key_factory.cpp
@@ -29,6 +29,7 @@
#include "mongo/db/query/plan_cache_key_factory.h"
+#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/query/planner_ixselect.h"
namespace mongo {
@@ -73,10 +74,9 @@ void encodeIndexability(const MatchExpression* tree,
encodeIndexability(tree->getChild(i), indexabilityState, keyBuilder);
}
}
-} // namespace plan_cache_detail
-namespace plan_cache_key_factory {
-PlanCacheKey make(const CanonicalQuery& query, const CollectionPtr& collection) {
+PlanCacheKeyInfo makePlanCacheKeyInfo(const CanonicalQuery& query,
+ const CollectionPtr& collection) {
const auto shapeString = query.encodeKey();
StringBuilder indexabilityKeyBuilder;
@@ -85,7 +85,21 @@ PlanCacheKey make(const CanonicalQuery& query, const CollectionPtr& collection)
CollectionQueryInfo::get(collection).getPlanCacheIndexabilityState(),
&indexabilityKeyBuilder);
- return PlanCacheKey(shapeString, indexabilityKeyBuilder.str());
+ return PlanCacheKeyInfo(shapeString, indexabilityKeyBuilder.str());
+}
+
+PlanCacheKey make(const CanonicalQuery& query,
+ const CollectionPtr& collection,
+ PlanCacheKeyTag<PlanCacheKey>) {
+ return {makePlanCacheKeyInfo(query, collection)};
}
-} // namespace plan_cache_key_factory
+
+sbe::PlanCacheKey make(const CanonicalQuery& query,
+ const CollectionPtr& collection,
+ PlanCacheKeyTag<sbe::PlanCacheKey>) {
+ auto collectionVersion = CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion();
+
+ return {makePlanCacheKeyInfo(query, collection), collection->uuid(), collectionVersion};
+}
+} // namespace plan_cache_detail
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_key_factory.h b/src/mongo/db/query/plan_cache_key_factory.h
index 1380c542cee..8c81b5f1301 100644
--- a/src/mongo/db/query/plan_cache_key_factory.h
+++ b/src/mongo/db/query/plan_cache_key_factory.h
@@ -44,12 +44,35 @@ namespace plan_cache_detail {
void encodeIndexability(const MatchExpression* tree,
const PlanCacheIndexabilityState& indexabilityState,
StringBuilder* keyBuilder);
+
+/**
+ * A dispatch tag for the factory functions below.
+ */
+template <typename KeyType>
+struct PlanCacheKeyTag {};
+
+/**
+ * Creates a key for the classic plan cache from the canonical query and collection instances.
+ */
+PlanCacheKey make(const CanonicalQuery& query,
+ const CollectionPtr& collection,
+ PlanCacheKeyTag<PlanCacheKey> tag);
+
+/**
+ * Creates a key for the SBE plan cache from the canonical query and collection instances.
+ */
+sbe::PlanCacheKey make(const CanonicalQuery& query,
+ const CollectionPtr& collection,
+ PlanCacheKeyTag<sbe::PlanCacheKey> tag);
} // namespace plan_cache_detail
namespace plan_cache_key_factory {
/**
* A factory helper to make a plan cache key of the given type.
*/
-PlanCacheKey make(const CanonicalQuery& query, const CollectionPtr& collection);
+template <typename Key>
+Key make(const CanonicalQuery& query, const CollectionPtr& collection) {
+ return plan_cache_detail::make(query, collection, plan_cache_detail::PlanCacheKeyTag<Key>{});
+}
} // namespace plan_cache_key_factory
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_key.h b/src/mongo/db/query/plan_cache_key_info.h
index 1aae00b6f55..ff97ef6a57c 100644
--- a/src/mongo/db/query/plan_cache_key.h
+++ b/src/mongo/db/query/plan_cache_key_info.h
@@ -33,54 +33,54 @@
namespace mongo {
/**
- * Represents the "key" used in the PlanCache mapping from query shape -> query plan.
+ * Encapsulates Plan Cache key - related information used to lookup entries in the PlanCache.
*/
-class PlanCacheKey {
+class PlanCacheKeyInfo {
public:
- PlanCacheKey(CanonicalQuery::QueryShapeString shapeString, std::string indexabilityString) {
- _lengthOfQueryShape = shapeString.size();
+ PlanCacheKeyInfo(CanonicalQuery::QueryShapeString shapeString, std::string indexabilityString)
+ : _lengthOfQueryShape{shapeString.size()} {
_key = std::move(shapeString);
_key += indexabilityString;
- }
+ };
CanonicalQuery::QueryShapeString getQueryShape() const {
return std::string(_key, 0, _lengthOfQueryShape);
}
- StringData getQueryShapeStringData() const {
- return StringData(_key.c_str(), _lengthOfQueryShape);
+ bool operator==(const PlanCacheKeyInfo& other) const {
+ return other._lengthOfQueryShape == _lengthOfQueryShape && other._key == _key;
}
- /**
- * Return the 'indexability discriminators', that is, the plan cache key component after the
- * stable key, but before the boolean indicating whether we are using the classic engine.
- */
- StringData getIndexabilityDiscriminators() const {
- return StringData(_key.c_str() + _lengthOfQueryShape, _key.size() - _lengthOfQueryShape);
+ bool operator!=(const PlanCacheKeyInfo& other) const {
+ return !(*this == other);
}
- StringData stringData() const {
- return _key;
+ uint32_t queryHash() const {
+ return canonical_query_encoder::computeHash(getQueryShapeStringData());
}
- const std::string& toString() const {
- return _key;
+ uint32_t planCacheKeyHash() const {
+ return canonical_query_encoder::computeHash(stringData());
}
- bool operator==(const PlanCacheKey& other) const {
- return other._key == _key && other._lengthOfQueryShape == _lengthOfQueryShape;
+ const std::string& toString() const {
+ return _key;
}
- bool operator!=(const PlanCacheKey& other) const {
- return !(*this == other);
+ /**
+ * Return the 'indexability discriminators', that is, the plan cache key component after the
+ * stable key, but before the boolean indicating whether we are using the classic engine.
+ */
+ StringData getIndexabilityDiscriminators() const {
+ return StringData(_key.c_str() + _lengthOfQueryShape, _key.size() - _lengthOfQueryShape);
}
- uint32_t queryHash() const {
- return canonical_query_encoder::computeHash(getQueryShapeStringData());
+ StringData getQueryShapeStringData() const {
+ return StringData(_key.c_str(), _lengthOfQueryShape);
}
- uint32_t planCacheKeyHash() const {
- return canonical_query_encoder::computeHash(stringData());
+ StringData stringData() const {
+ return _key;
}
private:
@@ -89,16 +89,6 @@ private:
std::string _key;
// How long the "query shape" is.
- size_t _lengthOfQueryShape;
-};
-
-std::ostream& operator<<(std::ostream& stream, const PlanCacheKey& key);
-StringBuilder& operator<<(StringBuilder& builder, const PlanCacheKey& key);
-
-class PlanCacheKeyHasher {
-public:
- std::size_t operator()(const PlanCacheKey& k) const {
- return std::hash<std::string>{}(k.toString());
- }
+ const size_t _lengthOfQueryShape;
};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_key_info_test.cpp b/src/mongo/db/query/plan_cache_key_info_test.cpp
new file mode 100644
index 00000000000..13d37bd7b12
--- /dev/null
+++ b/src/mongo/db/query/plan_cache_key_info_test.cpp
@@ -0,0 +1,642 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/index/wildcard_key_generator.h"
+#include "mongo/db/pipeline/expression_context_for_test.h"
+#include "mongo/db/query/canonical_query_test_util.h"
+#include "mongo/db/query/collation/collator_interface_mock.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
+#include "mongo/db/query/plan_cache_key_info.h"
+#include "mongo/idl/server_parameter_test_util.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+using unittest::assertGet;
+
+std::ostream& operator<<(std::ostream& stream, const PlanCacheKeyInfo& key) {
+ stream << key.toString();
+ return stream;
+}
+
+namespace {
+PlanCacheKeyInfo makeKey(const CanonicalQuery& cq,
+ const std::vector<CoreIndexInfo>& indexCores = {}) {
+ PlanCacheIndexabilityState indexabilityState;
+ indexabilityState.updateDiscriminators(indexCores);
+
+ StringBuilder indexabilityKeyBuilder;
+ plan_cache_detail::encodeIndexability(cq.root(), indexabilityState, &indexabilityKeyBuilder);
+
+ return {cq.encodeKey(), indexabilityKeyBuilder.str()};
+}
+
+/**
+ * Utility function to create MatchExpression
+ */
+unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) {
+ boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
+ StatusWithMatchExpression status =
+ MatchExpressionParser::parse(obj,
+ std::move(expCtx),
+ ExtensionsCallbackNoop(),
+ MatchExpressionParser::kAllowAllSpecialFeatures);
+ if (!status.isOK()) {
+ str::stream ss;
+ ss << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString();
+ FAIL(ss);
+ }
+
+ return std::move(status.getValue());
+}
+
+
+// A version of the above for CoreIndexInfo, used for plan cache update tests.
+std::pair<CoreIndexInfo, std::unique_ptr<WildcardProjection>> makeWildcardUpdate(
+ BSONObj keyPattern) {
+ auto wcProj = std::make_unique<WildcardProjection>(
+ WildcardKeyGenerator::createProjectionExecutor(keyPattern, {}));
+ return {CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{"indexName"}, // name
+ nullptr, // filterExpr
+ nullptr, // collation
+ wcProj.get()), // wildcard
+ std::move(wcProj)};
+}
+
+/**
+ * Check that the stable keys of 'a' and 'b' are not equal because of the last character.
+ */
+void assertPlanCacheKeysUnequalDueToForceClassicEngineValue(const PlanCacheKeyInfo& a,
+ const PlanCacheKeyInfo& b) {
+ auto aUnstablePart = a.getIndexabilityDiscriminators();
+ auto bUnstablePart = b.getIndexabilityDiscriminators();
+ auto aStablePart = a.getQueryShape();
+ auto bStablePart = b.getQueryShape();
+
+ ASSERT_EQ(aUnstablePart, bUnstablePart);
+ // The last character of the stable part encodes the engine that uses this PlanCacheKey. So the
+ // stable parts except for the last character should be identical.
+ ASSERT_EQ(aStablePart.substr(0, aStablePart.size() - 1),
+ bStablePart.substr(0, bStablePart.size() - 1));
+
+ // Should have at least 1 byte to represent whether we must use the classic engine.
+ ASSERT_GTE(aStablePart.size(), 1);
+
+ // The indexability discriminators should match.
+ ASSERT_EQ(a.getIndexabilityDiscriminators(), b.getIndexabilityDiscriminators());
+
+ // The stable parts should not match because of the last character.
+ ASSERT_NE(aStablePart, bStablePart);
+ ASSERT_NE(aStablePart.back(), bStablePart.back());
+}
+
+/**
+ * Check that the stable keys of 'a' and 'b' are equal, but the index discriminators are not.
+ */
+void assertPlanCacheKeysUnequalDueToDiscriminators(const PlanCacheKeyInfo& a,
+ const PlanCacheKeyInfo& b) {
+ ASSERT_EQ(a.getQueryShapeStringData(), b.getQueryShapeStringData());
+ ASSERT_EQ(a.getIndexabilityDiscriminators().size(), b.getIndexabilityDiscriminators().size());
+ ASSERT_NE(a.getIndexabilityDiscriminators(), b.getIndexabilityDiscriminators());
+
+ // Should always have the begin and end delimiters.
+ ASSERT_GTE(a.getIndexabilityDiscriminators().size(), 2u);
+}
+
+} // namespace
+
+// When a sparse index is present, computeKey() should generate different keys depending on
+// whether or not the predicates in the given query can use the index.
+TEST(PlanCacheKeyInfoTest, ComputeKeySparseIndex) {
+ const auto keyPattern = BSON("a" << 1);
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ true, // sparse
+ IndexEntry::Identifier{""})}; // name
+
+ unique_ptr<CanonicalQuery> cqEqNumber(canonicalize("{a: 0}}"));
+ unique_ptr<CanonicalQuery> cqEqString(canonicalize("{a: 'x'}}"));
+ unique_ptr<CanonicalQuery> cqEqNull(canonicalize("{a: null}}"));
+
+ // 'cqEqNumber' and 'cqEqString' get the same key, since both are compatible with this
+ // index.
+ const auto eqNumberKey = makeKey(*cqEqNumber, indexCores);
+ const auto eqStringKey = makeKey(*cqEqString, indexCores);
+ ASSERT_EQ(eqNumberKey, eqStringKey);
+
+ // 'cqEqNull' gets a different key, since it is not compatible with this index.
+ const auto eqNullKey = makeKey(*cqEqNull, indexCores);
+ ASSERT_NOT_EQUALS(eqNullKey, eqNumberKey);
+
+ assertPlanCacheKeysUnequalDueToDiscriminators(eqNullKey, eqNumberKey);
+ assertPlanCacheKeysUnequalDueToDiscriminators(eqNullKey, eqStringKey);
+}
+
+// When a partial index is present, computeKey() should generate different keys depending on
+// whether or not the predicates in the given query "match" the predicates in the partial index
+// filter.
+TEST(PlanCacheKeyInfoTest, ComputeKeyPartialIndex) {
+ BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
+ unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+
+ const auto keyPattern = BSON("a" << 1);
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""}, // name
+ filterExpr.get())}; // filterExpr
+
+ unique_ptr<CanonicalQuery> cqGtNegativeFive(canonicalize("{f: {$gt: -5}}"));
+ unique_ptr<CanonicalQuery> cqGtZero(canonicalize("{f: {$gt: 0}}"));
+ unique_ptr<CanonicalQuery> cqGtFive(canonicalize("{f: {$gt: 5}}"));
+
+ // 'cqGtZero' and 'cqGtFive' get the same key, since both are compatible with this index.
+ ASSERT_EQ(makeKey(*cqGtZero, indexCores), makeKey(*cqGtFive, indexCores));
+
+ // 'cqGtNegativeFive' gets a different key, since it is not compatible with this index.
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*cqGtNegativeFive, indexCores),
+ makeKey(*cqGtZero, indexCores));
+}
+
+// Query shapes should get the same plan cache key if they have the same collation indexability.
+TEST(PlanCacheKeyInfoTest, ComputeKeyCollationIndex) {
+ CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+
+ const auto keyPattern = BSON("a" << 1);
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""}, // name
+ nullptr, // filterExpr
+ &collator)}; // collation
+
+ unique_ptr<CanonicalQuery> containsString(canonicalize("{a: 'abc'}"));
+ unique_ptr<CanonicalQuery> containsObject(canonicalize("{a: {b: 'abc'}}"));
+ unique_ptr<CanonicalQuery> containsArray(canonicalize("{a: ['abc', 'xyz']}"));
+ unique_ptr<CanonicalQuery> noStrings(canonicalize("{a: 5}"));
+ unique_ptr<CanonicalQuery> containsStringHasCollation(
+ canonicalize("{a: 'abc'}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
+
+ // 'containsString', 'containsObject', and 'containsArray' have the same key, since none are
+ // compatible with the index.
+ ASSERT_EQ(makeKey(*containsString, indexCores), makeKey(*containsObject, indexCores));
+ ASSERT_EQ(makeKey(*containsString, indexCores), makeKey(*containsArray, indexCores));
+
+ // 'noStrings' gets a different key since it is compatible with the index.
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*containsString, indexCores),
+ makeKey(*noStrings, indexCores));
+ ASSERT_EQ(makeKey(*containsString, indexCores).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(makeKey(*noStrings, indexCores).getIndexabilityDiscriminators(), "<1>");
+
+ // 'noStrings' and 'containsStringHasCollation' get different keys, since the collation
+ // specified in the query is considered part of its shape. However, they have the same index
+ // compatibility, so the unstable part of their PlanCacheKeys should be the same.
+ auto noStringKey = makeKey(*noStrings, indexCores);
+ auto withStringAndCollationKey = makeKey(*containsStringHasCollation, indexCores);
+ ASSERT_NE(noStringKey, withStringAndCollationKey);
+ ASSERT_EQ(noStringKey.getIndexabilityDiscriminators(),
+ withStringAndCollationKey.getIndexabilityDiscriminators());
+ ASSERT_NE(noStringKey.getQueryShapeStringData(),
+ withStringAndCollationKey.getQueryShapeStringData());
+
+ unique_ptr<CanonicalQuery> inContainsString(canonicalize("{a: {$in: [1, 'abc', 2]}}"));
+ unique_ptr<CanonicalQuery> inContainsObject(canonicalize("{a: {$in: [1, {b: 'abc'}, 2]}}"));
+ unique_ptr<CanonicalQuery> inContainsArray(canonicalize("{a: {$in: [1, ['abc', 'xyz'], 2]}}"));
+ unique_ptr<CanonicalQuery> inNoStrings(canonicalize("{a: {$in: [1, 2]}}"));
+ unique_ptr<CanonicalQuery> inContainsStringHasCollation(
+ canonicalize("{a: {$in: [1, 'abc', 2]}}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
+
+ // 'inContainsString', 'inContainsObject', and 'inContainsArray' have the same key, since none
+ // are compatible with the index.
+ ASSERT_EQ(makeKey(*inContainsString, indexCores), makeKey(*inContainsObject, indexCores));
+ ASSERT_EQ(makeKey(*inContainsString, indexCores), makeKey(*inContainsArray, indexCores));
+
+ // 'inNoStrings' gets a different key since it is compatible with the index.
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*inContainsString, indexCores),
+ makeKey(*inNoStrings, indexCores));
+ ASSERT_EQ(makeKey(*inContainsString, indexCores).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(makeKey(*inNoStrings, indexCores).getIndexabilityDiscriminators(), "<1>");
+
+ // 'inNoStrings' and 'inContainsStringHasCollation' get the same key since they compatible with
+ // the index.
+ ASSERT_NE(makeKey(*inNoStrings, indexCores),
+ makeKey(*inContainsStringHasCollation, indexCores));
+ ASSERT_EQ(makeKey(*inNoStrings, indexCores).getIndexabilityDiscriminators(),
+ makeKey(*inContainsStringHasCollation, indexCores).getIndexabilityDiscriminators());
+}
+
+TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardIndex) {
+ auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
+
+ const std::vector<CoreIndexInfo> indexCores = {entryProjUpdatePair.first};
+
+ // Used to check that two queries have the same shape when no indexes are present.
+ PlanCache planCacheWithNoIndexes(5000);
+
+ // Compatible with index.
+ unique_ptr<CanonicalQuery> usesPathWithScalar(canonicalize("{a: 'abcdef'}"));
+ unique_ptr<CanonicalQuery> usesPathWithEmptyArray(canonicalize("{a: []}"));
+
+ // Not compatible with index.
+ unique_ptr<CanonicalQuery> usesPathWithObject(canonicalize("{a: {b: 'abc'}}"));
+ unique_ptr<CanonicalQuery> usesPathWithArray(canonicalize("{a: [1, 2]}"));
+ unique_ptr<CanonicalQuery> usesPathWithArrayContainingObject(canonicalize("{a: [1, {b: 1}]}"));
+ unique_ptr<CanonicalQuery> usesPathWithEmptyObject(canonicalize("{a: {}}"));
+ unique_ptr<CanonicalQuery> doesNotUsePath(canonicalize("{b: 1234}"));
+
+ // Check that the queries which are compatible with the index have the same key.
+ ASSERT_EQ(makeKey(*usesPathWithScalar, indexCores),
+ makeKey(*usesPathWithEmptyArray, indexCores));
+
+ // Check that the queries which have the same path as the index, but aren't supported, have
+ // different keys.
+ ASSERT_EQ(makeKey(*usesPathWithScalar), makeKey(*usesPathWithObject));
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*usesPathWithScalar, indexCores),
+ makeKey(*usesPathWithObject, indexCores));
+ ASSERT_EQ(makeKey(*usesPathWithScalar, indexCores).getIndexabilityDiscriminators(), "<1>");
+ ASSERT_EQ(makeKey(*usesPathWithObject, indexCores).getIndexabilityDiscriminators(), "<0>");
+
+ ASSERT_EQ(makeKey(*usesPathWithObject, indexCores), makeKey(*usesPathWithArray, indexCores));
+ ASSERT_EQ(makeKey(*usesPathWithObject, indexCores),
+ makeKey(*usesPathWithArrayContainingObject, indexCores));
+
+ // The query on 'b' should have a completely different plan cache key (both with and without a
+ // wildcard index).
+ ASSERT_NE(makeKey(*usesPathWithScalar), makeKey(*doesNotUsePath));
+ ASSERT_NE(makeKey(*usesPathWithScalar, indexCores), makeKey(*doesNotUsePath, indexCores));
+ ASSERT_NE(makeKey(*usesPathWithObject), makeKey(*doesNotUsePath));
+ ASSERT_NE(makeKey(*usesPathWithObject, indexCores), makeKey(*doesNotUsePath, indexCores));
+
+ // More complex queries with similar shapes. This is to ensure that plan cache key encoding
+ // correctly traverses the expression tree.
+ auto orQueryWithOneBranchAllowed = canonicalize("{$or: [{a: 3}, {a: {$gt: [1,2]}}]}");
+ // Same shape except 'a' is compared to an object.
+ auto orQueryWithNoBranchesAllowed =
+ canonicalize("{$or: [{a: {someobject: 1}}, {a: {$gt: [1,2]}}]}");
+ // The two queries should have the same shape when no indexes are present, but different shapes
+ // when a $** index is present.
+ ASSERT_EQ(makeKey(*orQueryWithOneBranchAllowed), makeKey(*orQueryWithNoBranchesAllowed));
+ assertPlanCacheKeysUnequalDueToDiscriminators(
+ makeKey(*orQueryWithOneBranchAllowed, indexCores),
+ makeKey(*orQueryWithNoBranchesAllowed, indexCores));
+ ASSERT_EQ(makeKey(*orQueryWithOneBranchAllowed, indexCores).getIndexabilityDiscriminators(),
+ "<1><0>");
+ ASSERT_EQ(makeKey(*orQueryWithNoBranchesAllowed, indexCores).getIndexabilityDiscriminators(),
+ "<0><0>");
+}
+
+TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardIndexDiscriminatesEqualityToEmptyObj) {
+ auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
+
+ const std::vector<CoreIndexInfo> indexCores = {entryProjUpdatePair.first};
+
+ // Equality to empty obj and equality to non-empty obj have different plan cache keys.
+ std::unique_ptr<CanonicalQuery> equalsEmptyObj(canonicalize("{a: {}}"));
+ std::unique_ptr<CanonicalQuery> equalsNonEmptyObj(canonicalize("{a: {b: 1}}"));
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*equalsEmptyObj, indexCores),
+ makeKey(*equalsNonEmptyObj, indexCores));
+ ASSERT_EQ(makeKey(*equalsNonEmptyObj, indexCores).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(makeKey(*equalsEmptyObj, indexCores).getIndexabilityDiscriminators(), "<1>");
+
+ // $in with empty obj and $in with non-empty obj have different plan cache keys.
+ std::unique_ptr<CanonicalQuery> inWithEmptyObj(canonicalize("{a: {$in: [{}]}}"));
+ std::unique_ptr<CanonicalQuery> inWithNonEmptyObj(canonicalize("{a: {$in: [{b: 1}]}}"));
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*inWithEmptyObj, indexCores),
+ makeKey(*inWithNonEmptyObj, indexCores));
+ ASSERT_EQ(makeKey(*inWithNonEmptyObj, indexCores).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(makeKey(*inWithEmptyObj, indexCores).getIndexabilityDiscriminators(), "<1>");
+}
+
+TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilterExpression) {
+ BSONObj filterObj = BSON("x" << BSON("$gt" << 0));
+ std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+
+ auto entryProjUpdatePair = makeWildcardUpdate(BSON("$**" << 1));
+ auto indexInfo = std::move(entryProjUpdatePair.first);
+ indexInfo.filterExpr = filterExpr.get();
+
+ const std::vector<CoreIndexInfo> indexCores = {indexInfo};
+
+ // Test that queries on field 'x' are discriminated based on their relationship with the partial
+ // filter expression.
+ {
+ auto compatibleWithFilter = canonicalize("{x: {$eq: 5}}");
+ auto incompatibleWithFilter = canonicalize("{x: {$eq: -5}}");
+ auto compatibleKey = makeKey(*compatibleWithFilter, indexCores);
+ auto incompatibleKey = makeKey(*incompatibleWithFilter, indexCores);
+
+ assertPlanCacheKeysUnequalDueToDiscriminators(compatibleKey, incompatibleKey);
+ // The discriminator strings have the format "<xx>". That is, there are two discriminator
+ // bits for the "x" predicate, the first pertaining to the partialFilterExpression and the
+ // second around applicability to the wildcard index.
+ ASSERT_EQ(compatibleKey.getIndexabilityDiscriminators(), "<11>");
+ ASSERT_EQ(incompatibleKey.getIndexabilityDiscriminators(), "<01>");
+ }
+
+ // The partialFilterExpression should lead to a discriminator over field 'x', but not over 'y'.
+ // (Separately, there are wildcard-related discriminator bits for both 'x' and 'y'.)
+ {
+ auto compatibleWithFilter = canonicalize("{x: {$eq: 5}, y: 1}");
+ auto incompatibleWithFilter = canonicalize("{x: {$eq: -5}, y: 1}");
+ auto compatibleKey = makeKey(*compatibleWithFilter, indexCores);
+ auto incompatibleKey = makeKey(*incompatibleWithFilter, indexCores);
+
+ assertPlanCacheKeysUnequalDueToDiscriminators(compatibleKey, incompatibleKey);
+ // The discriminator strings have the format "<xx><y>". That is, there are two discriminator
+ // bits for the "x" predicate (the first pertaining to the partialFilterExpression, the
+ // second around applicability to the wildcard index) and one discriminator bit for "y".
+ ASSERT_EQ(compatibleKey.getIndexabilityDiscriminators(), "<11><1>");
+ ASSERT_EQ(incompatibleKey.getIndexabilityDiscriminators(), "<01><1>");
+ }
+
+ // $eq:null predicates cannot be assigned to a wildcard index. Make sure that this is
+ // discrimated correctly. This test is designed to reproduce SERVER-48614.
+ {
+ auto compatibleQuery = canonicalize("{x: {$eq: 5}, y: 1}");
+ auto incompatibleQuery = canonicalize("{x: {$eq: 5}, y: null}");
+ auto compatibleKey = makeKey(*compatibleQuery, indexCores);
+ auto incompatibleKey = makeKey(*incompatibleQuery, indexCores);
+
+ assertPlanCacheKeysUnequalDueToDiscriminators(compatibleKey, incompatibleKey);
+ // The discriminator strings have the format "<xx><y>". That is, there are two discriminator
+ // bits for the "x" predicate (the first pertaining to the partialFilterExpression, the
+ // second around applicability to the wildcard index) and one discriminator bit for "y".
+ ASSERT_EQ(compatibleKey.getIndexabilityDiscriminators(), "<11><1>");
+ ASSERT_EQ(incompatibleKey.getIndexabilityDiscriminators(), "<11><0>");
+ }
+
+ // Test that the discriminators are correct for an $eq:null predicate on 'x'. This predicate is
+ // imcompatible for two reasons: null equality predicates cannot be answered by wildcard
+ // indexes, and the predicate is not compatible with the partial filter expression. This should
+ // result in two "0" bits inside the discriminator string.
+ {
+ auto key = makeKey(*canonicalize("{x: {$eq: null}}"), indexCores);
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<00>");
+ }
+}
+
+TEST(PlanCacheKeyInfoTest, DifferentQueryEngines) {
+ const auto keyPattern = BSON("a" << 1);
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""})}; // name
+
+ // Helper to construct a plan cache key given the 'forceClassicEngine' flag.
+ auto constructPlanCacheKey = [&](bool forceClassicEngine) {
+ RAIIServerParameterControllerForTest controller{"internalQueryForceClassicEngine",
+ forceClassicEngine};
+ const auto queryStr = "{a: 0}";
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
+ return makeKey(*cq, indexCores);
+ };
+
+ const auto classicEngineKey = constructPlanCacheKey(false);
+ const auto noClassicEngineKey = constructPlanCacheKey(true);
+
+ // Check that the two plan cache keys are not equal because the plans were created under
+ // different engines.
+ assertPlanCacheKeysUnequalDueToForceClassicEngineValue(classicEngineKey, noClassicEngineKey);
+}
+
+TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAndExpression) {
+ // Partial filter is an AND of multiple conditions.
+ BSONObj filterObj = BSON("x" << BSON("$gt" << 0) << "y" << BSON("$gt" << 0));
+ std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+
+ auto entryProjUpdatePair = makeWildcardUpdate(BSON("$**" << 1));
+ auto indexInfo = std::move(entryProjUpdatePair.first);
+ indexInfo.filterExpr = filterExpr.get();
+
+ const std::vector<CoreIndexInfo> indexCores = {indexInfo};
+
+ {
+ // The discriminators should have the format <xx><yy><z>. The 'z' predicate has just one
+ // discriminator because it is not referenced in the partial filter expression. All
+ // predicates are compatible.
+ auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"), indexCores);
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<11><11><1>");
+ }
+
+ {
+ // The discriminators should have the format <xx><yy><z>. The 'y' predicate is not
+ // compatible with the partial filter expression, leading to one of the 'y' bits being set
+ // to zero.
+ auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: -2}, z: {$eq: 3}}"), indexCores);
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<11><01><1>");
+ }
+}
+
+TEST(PlanCacheKeyInfoTest, ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndWildcardProjection) {
+ BSONObj filterObj = BSON("x" << BSON("$gt" << 0));
+ std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+
+ auto entryProjUpdatePair = makeWildcardUpdate(BSON("y.$**" << 1));
+ auto indexInfo = std::move(entryProjUpdatePair.first);
+ indexInfo.filterExpr = filterExpr.get();
+
+ const std::vector<CoreIndexInfo> indexCores = {indexInfo};
+
+ {
+ // The discriminators have the format <x><y>. The discriminator for 'x' indicates whether
+ // the predicate is compatible with the partial filter expression, whereas the disciminator
+ // for 'y' is about compatibility with the wildcard index.
+ auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"), indexCores);
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><1>");
+ }
+
+ {
+ // Similar to the previous case, except with an 'x' predicate that is incompatible with the
+ // partial filter expression.
+ auto key = makeKey(*canonicalize("{x: {$eq: -1}, y: {$eq: 2}, z: {$eq: 3}}"), indexCores);
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<0><1>");
+ }
+
+ {
+ // Case where the 'y' predicate is not compatible with the wildcard index.
+ auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: null}, z: {$eq: 3}}"), indexCores);
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><0>");
+ }
+}
+
+TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterOnNestedField) {
+ BSONObj filterObj = BSON("x.y" << BSON("$gt" << 0));
+ std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+
+ auto entryProjUpdatePair = makeWildcardUpdate(BSON("$**" << 1));
+ auto indexInfo = std::move(entryProjUpdatePair.first);
+ indexInfo.filterExpr = filterExpr.get();
+
+ const std::vector<CoreIndexInfo> indexCores = {indexInfo};
+
+ {
+ // The discriminators have the format <x><(x.y)(x.y)<y>. All predicates are compatible
+ auto key =
+ makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: 3}}"), indexCores);
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><11><1>");
+ }
+
+ {
+ // Here, the predicate on "x.y" is not compatible with the partial filter expression.
+ auto key =
+ makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: -3}}"), indexCores);
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><01><1>");
+ }
+}
+
+TEST(PlanCacheKeyInfoTest, StableKeyDoesNotChangeAcrossIndexCreation) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 0}}"));
+ const auto preIndexKey = makeKey(*cq);
+ const auto preIndexStableKey = preIndexKey.getQueryShape();
+ ASSERT_EQ(preIndexKey.getIndexabilityDiscriminators(), "");
+
+ const auto keyPattern = BSON("a" << 1);
+ // Create a sparse index (which requires a discriminator).
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ true, // sparse
+ IndexEntry::Identifier{""})}; // name
+
+ const auto postIndexKey = makeKey(*cq, indexCores);
+ const auto postIndexStableKey = postIndexKey.getQueryShape();
+ ASSERT_NE(preIndexKey, postIndexKey);
+ ASSERT_EQ(preIndexStableKey, postIndexStableKey);
+ ASSERT_EQ(postIndexKey.getIndexabilityDiscriminators(), "<1>");
+}
+
+TEST(PlanCacheKeyInfoTest, ComputeKeyNotEqualsArray) {
+ unique_ptr<CanonicalQuery> cqNeArray(canonicalize("{a: {$ne: [1]}}"));
+ unique_ptr<CanonicalQuery> cqNeScalar(canonicalize("{a: {$ne: 123}}"));
+
+ const auto noIndexNeArrayKey = makeKey(*cqNeArray);
+ const auto noIndexNeScalarKey = makeKey(*cqNeScalar);
+ ASSERT_EQ(noIndexNeArrayKey.getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(noIndexNeScalarKey.getIndexabilityDiscriminators(), "<1>");
+ ASSERT_EQ(noIndexNeScalarKey.getQueryShape(), noIndexNeArrayKey.getQueryShape());
+
+ const auto keyPattern = BSON("a" << 1);
+ // Create a normal btree index. It will have a discriminator.
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""})}; // name*/
+
+ const auto withIndexNeArrayKey = makeKey(*cqNeArray, indexCores);
+ const auto withIndexNeScalarKey = makeKey(*cqNeScalar, indexCores);
+
+ ASSERT_NE(noIndexNeArrayKey, withIndexNeArrayKey);
+ ASSERT_EQ(noIndexNeArrayKey.getQueryShape(), withIndexNeArrayKey.getQueryShape());
+
+ ASSERT_EQ(noIndexNeScalarKey.getQueryShape(), withIndexNeScalarKey.getQueryShape());
+ // There will be one discriminator for the $not and another for the leaf node ({$eq: 123}).
+ ASSERT_EQ(withIndexNeScalarKey.getIndexabilityDiscriminators(), "<1><1>");
+ // There will be one discriminator for the $not and another for the leaf node ({$eq: [1]}).
+ // Since the index can support equality to an array, the second discriminator will have a value
+ // of '1'.
+ ASSERT_EQ(withIndexNeArrayKey.getIndexabilityDiscriminators(), "<0><1>");
+}
+
+TEST(PlanCacheKeyInfoTest, ComputeKeyNinArray) {
+ unique_ptr<CanonicalQuery> cqNinArray(canonicalize("{a: {$nin: [123, [1]]}}"));
+ unique_ptr<CanonicalQuery> cqNinScalar(canonicalize("{a: {$nin: [123, 456]}}"));
+
+ const auto noIndexNinArrayKey = makeKey(*cqNinArray);
+ const auto noIndexNinScalarKey = makeKey(*cqNinScalar);
+ ASSERT_EQ(noIndexNinArrayKey.getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(noIndexNinScalarKey.getIndexabilityDiscriminators(), "<1>");
+ ASSERT_EQ(noIndexNinScalarKey.getQueryShape(), noIndexNinArrayKey.getQueryShape());
+
+ const auto keyPattern = BSON("a" << 1);
+ // Create a normal btree index. It will have a discriminator.
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""})}; // name
+
+ const auto withIndexNinArrayKey = makeKey(*cqNinArray, indexCores);
+ const auto withIndexNinScalarKey = makeKey(*cqNinScalar, indexCores);
+
+ // The unstable part of the key for $nin: [<array>] should have changed. The stable part,
+ // however, should not.
+ ASSERT_EQ(noIndexNinArrayKey.getQueryShape(), withIndexNinArrayKey.getQueryShape());
+ ASSERT_NE(noIndexNinArrayKey.getIndexabilityDiscriminators(),
+ withIndexNinArrayKey.getIndexabilityDiscriminators());
+
+ ASSERT_EQ(noIndexNinScalarKey.getQueryShape(), withIndexNinScalarKey.getQueryShape());
+ ASSERT_EQ(withIndexNinArrayKey.getIndexabilityDiscriminators(), "<0><1>");
+ ASSERT_EQ(withIndexNinScalarKey.getIndexabilityDiscriminators(), "<1><1>");
+}
+
+// Test for a bug which would be easy to introduce. If we only inserted discriminators for some
+// nodes, we would have a problem. For example if our "stable" key was:
+// (or[nt[eqa],nt[eqa]])
+// And there was just one discriminator:
+// <0>
+
+// Whether the discriminator referred to the first not-eq node or the second would be
+// ambiguous. This would make it possible for two queries with different shapes (and different
+// plans) to get the same plan cache key. We test that this does not happen for a simple example.
+TEST(PlanCacheKeyInfoTest, PlanCacheKeyCollision) {
+ unique_ptr<CanonicalQuery> cqNeA(canonicalize("{$or: [{a: {$ne: 5}}, {a: {$ne: [12]}}]}"));
+ unique_ptr<CanonicalQuery> cqNeB(canonicalize("{$or: [{a: {$ne: [12]}}, {a: {$ne: 5}}]}"));
+
+ const auto keyA = makeKey(*cqNeA);
+ const auto keyB = makeKey(*cqNeB);
+ ASSERT_EQ(keyA.getQueryShape(), keyB.getQueryShape());
+ ASSERT_NE(keyA.getIndexabilityDiscriminators(), keyB.getIndexabilityDiscriminators());
+ const auto keyPattern = BSON("a" << 1);
+ // Create a normal btree index. It will have a discriminator.
+ std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""})}; // name
+ const auto keyAWithIndex = makeKey(*cqNeA, indexCores);
+ const auto keyBWithIndex = makeKey(*cqNeB, indexCores);
+
+ ASSERT_EQ(keyAWithIndex.getQueryShape(), keyBWithIndex.getQueryShape());
+ ASSERT_NE(keyAWithIndex.getIndexabilityDiscriminators(),
+ keyBWithIndex.getIndexabilityDiscriminators());
+}
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 4ec8387bb75..cf9ef5e9e6f 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/query/canonical_query_encoder.h"
+#include "mongo/db/query/canonical_query_test_util.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_ranker.h"
@@ -70,8 +71,6 @@ using std::string;
using std::unique_ptr;
using std::vector;
-static const NamespaceString nss("test.collection");
-
PlanCacheKey makeKey(const CanonicalQuery& cq, const std::vector<CoreIndexInfo>& indexCores = {}) {
PlanCacheIndexabilityState indexabilityState;
indexabilityState.updateDiscriminators(indexCores);
@@ -79,210 +78,7 @@ PlanCacheKey makeKey(const CanonicalQuery& cq, const std::vector<CoreIndexInfo>&
StringBuilder indexabilityKeyBuilder;
plan_cache_detail::encodeIndexability(cq.root(), indexabilityState, &indexabilityKeyBuilder);
- return {cq.encodeKey(), indexabilityKeyBuilder.str()};
-}
-
-/**
- * Utility functions to create a CanonicalQuery
- */
-unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj) {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
- auto findCommand = std::make_unique<FindCommandRequest>(nss);
- findCommand->setFilter(queryObj);
- const boost::intrusive_ptr<ExpressionContext> expCtx;
- auto statusWithCQ =
- CanonicalQuery::canonicalize(opCtx.get(),
- std::move(findCommand),
- false,
- expCtx,
- ExtensionsCallbackNoop(),
- MatchExpressionParser::kAllowAllSpecialFeatures);
- ASSERT_OK(statusWithCQ.getStatus());
- return std::move(statusWithCQ.getValue());
-}
-
-unique_ptr<CanonicalQuery> canonicalize(StringData queryStr) {
- BSONObj queryObj = fromjson(queryStr.toString());
- return canonicalize(queryObj);
-}
-
-unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
- BSONObj sort,
- BSONObj proj,
- BSONObj collation) {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
- auto findCommand = std::make_unique<FindCommandRequest>(nss);
- findCommand->setFilter(query);
- findCommand->setSort(sort);
- findCommand->setProjection(proj);
- findCommand->setCollation(collation);
- const boost::intrusive_ptr<ExpressionContext> expCtx;
- auto statusWithCQ =
- CanonicalQuery::canonicalize(opCtx.get(),
- std::move(findCommand),
- false,
- expCtx,
- ExtensionsCallbackNoop(),
- MatchExpressionParser::kAllowAllSpecialFeatures);
- ASSERT_OK(statusWithCQ.getStatus());
- return std::move(statusWithCQ.getValue());
-}
-
-unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- const char* collationStr) {
- return canonicalize(
- fromjson(queryStr), fromjson(sortStr), fromjson(projStr), fromjson(collationStr));
-}
-
-unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- long long skip,
- long long limit,
- const char* hintStr,
- const char* minStr,
- const char* maxStr) {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
- auto findCommand = std::make_unique<FindCommandRequest>(nss);
- findCommand->setFilter(fromjson(queryStr));
- findCommand->setSort(fromjson(sortStr));
- findCommand->setProjection(fromjson(projStr));
- if (skip) {
- findCommand->setSkip(skip);
- }
- if (limit) {
- findCommand->setLimit(limit);
- }
- findCommand->setHint(fromjson(hintStr));
- findCommand->setMin(fromjson(minStr));
- findCommand->setMax(fromjson(maxStr));
- const boost::intrusive_ptr<ExpressionContext> expCtx;
- auto statusWithCQ =
- CanonicalQuery::canonicalize(opCtx.get(),
- std::move(findCommand),
- false,
- expCtx,
- ExtensionsCallbackNoop(),
- MatchExpressionParser::kAllowAllSpecialFeatures);
- ASSERT_OK(statusWithCQ.getStatus());
- return std::move(statusWithCQ.getValue());
-}
-
-unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- long long skip,
- long long limit,
- const char* hintStr,
- const char* minStr,
- const char* maxStr,
- bool explain) {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
- auto findCommand = std::make_unique<FindCommandRequest>(nss);
- findCommand->setFilter(fromjson(queryStr));
- findCommand->setSort(fromjson(sortStr));
- findCommand->setProjection(fromjson(projStr));
- if (skip) {
- findCommand->setSkip(skip);
- }
- if (limit) {
- findCommand->setLimit(limit);
- }
- findCommand->setHint(fromjson(hintStr));
- findCommand->setMin(fromjson(minStr));
- findCommand->setMax(fromjson(maxStr));
- const boost::intrusive_ptr<ExpressionContext> expCtx;
- auto statusWithCQ =
- CanonicalQuery::canonicalize(opCtx.get(),
- std::move(findCommand),
- explain,
- expCtx,
- ExtensionsCallbackNoop(),
- MatchExpressionParser::kAllowAllSpecialFeatures);
- ASSERT_OK(statusWithCQ.getStatus());
- return std::move(statusWithCQ.getValue());
-}
-
-/**
- * Check that the stable keys of 'a' and 'b' are equal, but the index discriminators are not.
- */
-void assertPlanCacheKeysUnequalDueToDiscriminators(const PlanCacheKey& a, const PlanCacheKey& b) {
- ASSERT_EQ(a.getQueryShapeStringData(), b.getQueryShapeStringData());
- ASSERT_EQ(a.getIndexabilityDiscriminators().size(), b.getIndexabilityDiscriminators().size());
- ASSERT_NE(a.getIndexabilityDiscriminators(), b.getIndexabilityDiscriminators());
-
- // Should always have the begin and end delimiters.
- ASSERT_GTE(a.getIndexabilityDiscriminators().size(), 2u);
-}
-
-/**
- * Check that the stable keys of 'a' and 'b' are not equal because of the last character.
- */
-void assertPlanCacheKeysUnequalDueToForceClassicEngineValue(const PlanCacheKey& a,
- const PlanCacheKey& b) {
- auto aUnstablePart = a.getIndexabilityDiscriminators();
- auto bUnstablePart = b.getIndexabilityDiscriminators();
- auto aStablePart = a.getQueryShape();
- auto bStablePart = b.getQueryShape();
-
- ASSERT_EQ(aUnstablePart, bUnstablePart);
- // The last character of the stable part encodes the engine that uses this PlanCacheKey. So the
- // stable parts except for the last character should be identical.
- ASSERT_EQ(aStablePart.substr(0, aStablePart.size() - 1),
- bStablePart.substr(0, bStablePart.size() - 1));
-
- // Should have at least 1 byte to represent whether we must use the classic engine.
- ASSERT_GTE(aStablePart.size(), 1);
-
- // The indexability discriminators should match.
- ASSERT_EQ(a.getIndexabilityDiscriminators(), b.getIndexabilityDiscriminators());
-
- // The stable parts should not match because of the last character.
- ASSERT_NE(aStablePart, bStablePart);
- ASSERT_NE(aStablePart.back(), bStablePart.back());
-}
-
-/**
- * Utility function to create MatchExpression
- */
-unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) {
- boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- StatusWithMatchExpression status =
- MatchExpressionParser::parse(obj,
- std::move(expCtx),
- ExtensionsCallbackNoop(),
- MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!status.isOK()) {
- str::stream ss;
- ss << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString();
- FAIL(ss);
- }
-
- return std::move(status.getValue());
-}
-
-void assertEquivalent(const char* queryStr,
- const MatchExpression* expected,
- const MatchExpression* actual) {
- if (actual->equivalent(expected)) {
- return;
- }
- str::stream ss;
- ss << "Match expressions are not equivalent."
- << "\nOriginal query: " << queryStr << "\nExpected: " << expected->debugString()
- << "\nActual: " << actual->debugString();
- FAIL(ss);
+ return {PlanCacheKeyInfo{cq.encodeKey(), indexabilityKeyBuilder.str()}};
}
// Helper which constructs a $** IndexEntry and returns it along with an owned ProjectionExecutor.
@@ -308,21 +104,6 @@ std::pair<IndexEntry, std::unique_ptr<WildcardProjection>> makeWildcardEntry(BSO
std::move(wcProj)};
}
-// A version of the above for CoreIndexInfo, used for plan cache update tests.
-std::pair<CoreIndexInfo, std::unique_ptr<WildcardProjection>> makeWildcardUpdate(
- BSONObj keyPattern) {
- auto wcProj = std::make_unique<WildcardProjection>(
- WildcardKeyGenerator::createProjectionExecutor(keyPattern, {}));
- return {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{"indexName"}, // name
- nullptr, // filterExpr
- nullptr, // collation
- wcProj.get()), // wildcard
- std::move(wcProj)};
-}
-
//
// Tests for CachedSolution
//
@@ -1294,7 +1075,7 @@ protected:
};
const std::string mockKey("mock_cache_key");
-const PlanCacheKey CachePlanSelectionTest::ck(mockKey, "");
+const PlanCacheKey CachePlanSelectionTest::ck{PlanCacheKeyInfo{mockKey, ""}};
//
// Equality
@@ -1863,495 +1644,6 @@ TEST_F(CachePlanSelectionTest, ContainedOrAndIntersection) {
"]}}}}");
}
-// When a sparse index is present, computeKey() should generate different keys depending on
-// whether or not the predicates in the given query can use the index.
-TEST(PlanCacheTest, ComputeKeySparseIndex) {
- PlanCache planCache(5000);
- const auto keyPattern = BSON("a" << 1);
- const std::vector<CoreIndexInfo> indexCores = {
- CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- true, // sparse
- IndexEntry::Identifier{""})}; // name
-
- unique_ptr<CanonicalQuery> cqEqNumber(canonicalize("{a: 0}}"));
- unique_ptr<CanonicalQuery> cqEqString(canonicalize("{a: 'x'}}"));
- unique_ptr<CanonicalQuery> cqEqNull(canonicalize("{a: null}}"));
-
- // 'cqEqNumber' and 'cqEqString' get the same key, since both are compatible with this
- // index.
- const auto eqNumberKey = makeKey(*cqEqNumber, indexCores);
- const auto eqStringKey = makeKey(*cqEqString, indexCores);
- ASSERT_EQ(eqNumberKey, eqStringKey);
-
- // 'cqEqNull' gets a different key, since it is not compatible with this index.
- const auto eqNullKey = makeKey(*cqEqNull, indexCores);
- ASSERT_NOT_EQUALS(eqNullKey, eqNumberKey);
-
- assertPlanCacheKeysUnequalDueToDiscriminators(eqNullKey, eqNumberKey);
- assertPlanCacheKeysUnequalDueToDiscriminators(eqNullKey, eqStringKey);
-}
-
-// When a partial index is present, computeKey() should generate different keys depending on
-// whether or not the predicates in the given query "match" the predicates in the partial index
-// filter.
-TEST(PlanCacheTest, ComputeKeyPartialIndex) {
- BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
- unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
-
- PlanCache planCache(5000);
- const auto keyPattern = BSON("a" << 1);
- const std::vector<CoreIndexInfo> indexCores = {
- CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""}, // name
- filterExpr.get())}; // filterExpr
-
- unique_ptr<CanonicalQuery> cqGtNegativeFive(canonicalize("{f: {$gt: -5}}"));
- unique_ptr<CanonicalQuery> cqGtZero(canonicalize("{f: {$gt: 0}}"));
- unique_ptr<CanonicalQuery> cqGtFive(canonicalize("{f: {$gt: 5}}"));
-
- // 'cqGtZero' and 'cqGtFive' get the same key, since both are compatible with this index.
- ASSERT_EQ(makeKey(*cqGtZero, indexCores), makeKey(*cqGtFive, indexCores));
-
- // 'cqGtNegativeFive' gets a different key, since it is not compatible with this index.
- assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*cqGtNegativeFive, indexCores),
- makeKey(*cqGtZero, indexCores));
-}
-
-// Query shapes should get the same plan cache key if they have the same collation indexability.
-TEST(PlanCacheTest, ComputeKeyCollationIndex) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
-
- PlanCache planCache(5000);
- const auto keyPattern = BSON("a" << 1);
- const std::vector<CoreIndexInfo> indexCores = {
- CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""}, // name
- nullptr, // filterExpr
- &collator)}; // collation
-
- unique_ptr<CanonicalQuery> containsString(canonicalize("{a: 'abc'}"));
- unique_ptr<CanonicalQuery> containsObject(canonicalize("{a: {b: 'abc'}}"));
- unique_ptr<CanonicalQuery> containsArray(canonicalize("{a: ['abc', 'xyz']}"));
- unique_ptr<CanonicalQuery> noStrings(canonicalize("{a: 5}"));
- unique_ptr<CanonicalQuery> containsStringHasCollation(
- canonicalize("{a: 'abc'}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
-
- // 'containsString', 'containsObject', and 'containsArray' have the same key, since none are
- // compatible with the index.
- ASSERT_EQ(makeKey(*containsString, indexCores), makeKey(*containsObject, indexCores));
- ASSERT_EQ(makeKey(*containsString, indexCores), makeKey(*containsArray, indexCores));
-
- // 'noStrings' gets a different key since it is compatible with the index.
- assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*containsString, indexCores),
- makeKey(*noStrings, indexCores));
- ASSERT_EQ(makeKey(*containsString, indexCores).getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(makeKey(*noStrings, indexCores).getIndexabilityDiscriminators(), "<1>");
-
- // 'noStrings' and 'containsStringHasCollation' get different keys, since the collation
- // specified in the query is considered part of its shape. However, they have the same index
- // compatibility, so the unstable part of their PlanCacheKeys should be the same.
- PlanCacheKey noStringKey = makeKey(*noStrings, indexCores);
- PlanCacheKey withStringAndCollationKey = makeKey(*containsStringHasCollation, indexCores);
- ASSERT_NE(noStringKey, withStringAndCollationKey);
- ASSERT_EQ(noStringKey.getIndexabilityDiscriminators(),
- withStringAndCollationKey.getIndexabilityDiscriminators());
- ASSERT_NE(noStringKey.getQueryShapeStringData(),
- withStringAndCollationKey.getQueryShapeStringData());
-
- unique_ptr<CanonicalQuery> inContainsString(canonicalize("{a: {$in: [1, 'abc', 2]}}"));
- unique_ptr<CanonicalQuery> inContainsObject(canonicalize("{a: {$in: [1, {b: 'abc'}, 2]}}"));
- unique_ptr<CanonicalQuery> inContainsArray(canonicalize("{a: {$in: [1, ['abc', 'xyz'], 2]}}"));
- unique_ptr<CanonicalQuery> inNoStrings(canonicalize("{a: {$in: [1, 2]}}"));
- unique_ptr<CanonicalQuery> inContainsStringHasCollation(
- canonicalize("{a: {$in: [1, 'abc', 2]}}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
-
- // 'inContainsString', 'inContainsObject', and 'inContainsArray' have the same key, since none
- // are compatible with the index.
- ASSERT_EQ(makeKey(*inContainsString, indexCores), makeKey(*inContainsObject, indexCores));
- ASSERT_EQ(makeKey(*inContainsString, indexCores), makeKey(*inContainsArray, indexCores));
-
- // 'inNoStrings' gets a different key since it is compatible with the index.
- assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*inContainsString, indexCores),
- makeKey(*inNoStrings, indexCores));
- ASSERT_EQ(makeKey(*inContainsString, indexCores).getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(makeKey(*inNoStrings, indexCores).getIndexabilityDiscriminators(), "<1>");
-
- // 'inNoStrings' and 'inContainsStringHasCollation' get the same key since they compatible with
- // the index.
- ASSERT_NE(makeKey(*inNoStrings, indexCores),
- makeKey(*inContainsStringHasCollation, indexCores));
- ASSERT_EQ(makeKey(*inNoStrings, indexCores).getIndexabilityDiscriminators(),
- makeKey(*inContainsStringHasCollation, indexCores).getIndexabilityDiscriminators());
-}
-
-TEST(PlanCacheTest, ComputeKeyWildcardIndex) {
- auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
-
- PlanCache planCache(5000);
- const std::vector<CoreIndexInfo> indexCores = {entryProjUpdatePair.first};
-
- // Used to check that two queries have the same shape when no indexes are present.
- PlanCache planCacheWithNoIndexes(5000);
-
- // Compatible with index.
- unique_ptr<CanonicalQuery> usesPathWithScalar(canonicalize("{a: 'abcdef'}"));
- unique_ptr<CanonicalQuery> usesPathWithEmptyArray(canonicalize("{a: []}"));
-
- // Not compatible with index.
- unique_ptr<CanonicalQuery> usesPathWithObject(canonicalize("{a: {b: 'abc'}}"));
- unique_ptr<CanonicalQuery> usesPathWithArray(canonicalize("{a: [1, 2]}"));
- unique_ptr<CanonicalQuery> usesPathWithArrayContainingObject(canonicalize("{a: [1, {b: 1}]}"));
- unique_ptr<CanonicalQuery> usesPathWithEmptyObject(canonicalize("{a: {}}"));
- unique_ptr<CanonicalQuery> doesNotUsePath(canonicalize("{b: 1234}"));
-
- // Check that the queries which are compatible with the index have the same key.
- ASSERT_EQ(makeKey(*usesPathWithScalar, indexCores),
- makeKey(*usesPathWithEmptyArray, indexCores));
-
- // Check that the queries which have the same path as the index, but aren't supported, have
- // different keys.
- ASSERT_EQ(makeKey(*usesPathWithScalar), makeKey(*usesPathWithObject));
- assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*usesPathWithScalar, indexCores),
- makeKey(*usesPathWithObject, indexCores));
- ASSERT_EQ(makeKey(*usesPathWithScalar, indexCores).getIndexabilityDiscriminators(), "<1>");
- ASSERT_EQ(makeKey(*usesPathWithObject, indexCores).getIndexabilityDiscriminators(), "<0>");
-
- ASSERT_EQ(makeKey(*usesPathWithObject, indexCores), makeKey(*usesPathWithArray, indexCores));
- ASSERT_EQ(makeKey(*usesPathWithObject, indexCores),
- makeKey(*usesPathWithArrayContainingObject, indexCores));
-
- // The query on 'b' should have a completely different plan cache key (both with and without a
- // wildcard index).
- ASSERT_NE(makeKey(*usesPathWithScalar), makeKey(*doesNotUsePath));
- ASSERT_NE(makeKey(*usesPathWithScalar, indexCores), makeKey(*doesNotUsePath, indexCores));
- ASSERT_NE(makeKey(*usesPathWithObject), makeKey(*doesNotUsePath));
- ASSERT_NE(makeKey(*usesPathWithObject, indexCores), makeKey(*doesNotUsePath, indexCores));
-
- // More complex queries with similar shapes. This is to ensure that plan cache key encoding
- // correctly traverses the expression tree.
- auto orQueryWithOneBranchAllowed = canonicalize("{$or: [{a: 3}, {a: {$gt: [1,2]}}]}");
- // Same shape except 'a' is compared to an object.
- auto orQueryWithNoBranchesAllowed =
- canonicalize("{$or: [{a: {someobject: 1}}, {a: {$gt: [1,2]}}]}");
- // The two queries should have the same shape when no indexes are present, but different shapes
- // when a $** index is present.
- ASSERT_EQ(makeKey(*orQueryWithOneBranchAllowed), makeKey(*orQueryWithNoBranchesAllowed));
- assertPlanCacheKeysUnequalDueToDiscriminators(
- makeKey(*orQueryWithOneBranchAllowed, indexCores),
- makeKey(*orQueryWithNoBranchesAllowed, indexCores));
- ASSERT_EQ(makeKey(*orQueryWithOneBranchAllowed, indexCores).getIndexabilityDiscriminators(),
- "<1><0>");
- ASSERT_EQ(makeKey(*orQueryWithNoBranchesAllowed, indexCores).getIndexabilityDiscriminators(),
- "<0><0>");
-}
-
-TEST(PlanCacheTest, ComputeKeyWildcardIndexDiscriminatesEqualityToEmptyObj) {
- auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
-
- PlanCache planCache(5000);
- const std::vector<CoreIndexInfo> indexCores = {entryProjUpdatePair.first};
-
- // Equality to empty obj and equality to non-empty obj have different plan cache keys.
- std::unique_ptr<CanonicalQuery> equalsEmptyObj(canonicalize("{a: {}}"));
- std::unique_ptr<CanonicalQuery> equalsNonEmptyObj(canonicalize("{a: {b: 1}}"));
- assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*equalsEmptyObj, indexCores),
- makeKey(*equalsNonEmptyObj, indexCores));
- ASSERT_EQ(makeKey(*equalsNonEmptyObj, indexCores).getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(makeKey(*equalsEmptyObj, indexCores).getIndexabilityDiscriminators(), "<1>");
-
- // $in with empty obj and $in with non-empty obj have different plan cache keys.
- std::unique_ptr<CanonicalQuery> inWithEmptyObj(canonicalize("{a: {$in: [{}]}}"));
- std::unique_ptr<CanonicalQuery> inWithNonEmptyObj(canonicalize("{a: {$in: [{b: 1}]}}"));
- assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*inWithEmptyObj, indexCores),
- makeKey(*inWithNonEmptyObj, indexCores));
- ASSERT_EQ(makeKey(*inWithNonEmptyObj, indexCores).getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(makeKey(*inWithEmptyObj, indexCores).getIndexabilityDiscriminators(), "<1>");
-}
-
-TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilterExpression) {
- BSONObj filterObj = BSON("x" << BSON("$gt" << 0));
- std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
-
- auto entryProjUpdatePair = makeWildcardUpdate(BSON("$**" << 1));
- auto indexInfo = std::move(entryProjUpdatePair.first);
- indexInfo.filterExpr = filterExpr.get();
-
- PlanCache planCache(5000);
- const std::vector<CoreIndexInfo> indexCores = {indexInfo};
-
- // Test that queries on field 'x' are discriminated based on their relationship with the partial
- // filter expression.
- {
- auto compatibleWithFilter = canonicalize("{x: {$eq: 5}}");
- auto incompatibleWithFilter = canonicalize("{x: {$eq: -5}}");
- auto compatibleKey = makeKey(*compatibleWithFilter, indexCores);
- auto incompatibleKey = makeKey(*incompatibleWithFilter, indexCores);
-
- assertPlanCacheKeysUnequalDueToDiscriminators(compatibleKey, incompatibleKey);
- // The discriminator strings have the format "<xx>". That is, there are two discriminator
- // bits for the "x" predicate, the first pertaining to the partialFilterExpression and the
- // second around applicability to the wildcard index.
- ASSERT_EQ(compatibleKey.getIndexabilityDiscriminators(), "<11>");
- ASSERT_EQ(incompatibleKey.getIndexabilityDiscriminators(), "<01>");
- }
-
- // The partialFilterExpression should lead to a discriminator over field 'x', but not over 'y'.
- // (Separately, there are wildcard-related discriminator bits for both 'x' and 'y'.)
- {
- auto compatibleWithFilter = canonicalize("{x: {$eq: 5}, y: 1}");
- auto incompatibleWithFilter = canonicalize("{x: {$eq: -5}, y: 1}");
- auto compatibleKey = makeKey(*compatibleWithFilter, indexCores);
- auto incompatibleKey = makeKey(*incompatibleWithFilter, indexCores);
-
- assertPlanCacheKeysUnequalDueToDiscriminators(compatibleKey, incompatibleKey);
- // The discriminator strings have the format "<xx><y>". That is, there are two discriminator
- // bits for the "x" predicate (the first pertaining to the partialFilterExpression, the
- // second around applicability to the wildcard index) and one discriminator bit for "y".
- ASSERT_EQ(compatibleKey.getIndexabilityDiscriminators(), "<11><1>");
- ASSERT_EQ(incompatibleKey.getIndexabilityDiscriminators(), "<01><1>");
- }
-
- // $eq:null predicates cannot be assigned to a wildcard index. Make sure that this is
- // discrimated correctly. This test is designed to reproduce SERVER-48614.
- {
- auto compatibleQuery = canonicalize("{x: {$eq: 5}, y: 1}");
- auto incompatibleQuery = canonicalize("{x: {$eq: 5}, y: null}");
- auto compatibleKey = makeKey(*compatibleQuery, indexCores);
- auto incompatibleKey = makeKey(*incompatibleQuery, indexCores);
-
- assertPlanCacheKeysUnequalDueToDiscriminators(compatibleKey, incompatibleKey);
- // The discriminator strings have the format "<xx><y>". That is, there are two discriminator
- // bits for the "x" predicate (the first pertaining to the partialFilterExpression, the
- // second around applicability to the wildcard index) and one discriminator bit for "y".
- ASSERT_EQ(compatibleKey.getIndexabilityDiscriminators(), "<11><1>");
- ASSERT_EQ(incompatibleKey.getIndexabilityDiscriminators(), "<11><0>");
- }
-
- // Test that the discriminators are correct for an $eq:null predicate on 'x'. This predicate is
- // imcompatible for two reasons: null equality predicates cannot be answered by wildcard
- // indexes, and the predicate is not compatible with the partial filter expression. This should
- // result in two "0" bits inside the discriminator string.
- {
- auto key = makeKey(*canonicalize("{x: {$eq: null}}"), indexCores);
- ASSERT_EQ(key.getIndexabilityDiscriminators(), "<00>");
- }
-}
-
-TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAndExpression) {
- // Partial filter is an AND of multiple conditions.
- BSONObj filterObj = BSON("x" << BSON("$gt" << 0) << "y" << BSON("$gt" << 0));
- std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
-
- auto entryProjUpdatePair = makeWildcardUpdate(BSON("$**" << 1));
- auto indexInfo = std::move(entryProjUpdatePair.first);
- indexInfo.filterExpr = filterExpr.get();
-
- PlanCache planCache(5000);
- const std::vector<CoreIndexInfo> indexCores = {indexInfo};
-
- {
- // The discriminators should have the format <xx><yy><z>. The 'z' predicate has just one
- // discriminator because it is not referenced in the partial filter expression. All
- // predicates are compatible.
- auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"), indexCores);
- ASSERT_EQ(key.getIndexabilityDiscriminators(), "<11><11><1>");
- }
-
- {
- // The discriminators should have the format <xx><yy><z>. The 'y' predicate is not
- // compatible with the partial filter expression, leading to one of the 'y' bits being set
- // to zero.
- auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: -2}, z: {$eq: 3}}"), indexCores);
- ASSERT_EQ(key.getIndexabilityDiscriminators(), "<11><01><1>");
- }
-}
-
-TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterOnNestedField) {
- BSONObj filterObj = BSON("x.y" << BSON("$gt" << 0));
- std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
-
- auto entryProjUpdatePair = makeWildcardUpdate(BSON("$**" << 1));
- auto indexInfo = std::move(entryProjUpdatePair.first);
- indexInfo.filterExpr = filterExpr.get();
-
- PlanCache planCache(5000);
- const std::vector<CoreIndexInfo> indexCores = {indexInfo};
-
- {
- // The discriminators have the format <x><(x.y)(x.y)<y>. All predicates are compatible
- auto key =
- makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: 3}}"), indexCores);
- ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><11><1>");
- }
-
- {
- // Here, the predicate on "x.y" is not compatible with the partial filter expression.
- auto key =
- makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: -3}}"), indexCores);
- ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><01><1>");
- }
-}
-
-TEST(PlanCacheTest, ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndWildcardProjection) {
- BSONObj filterObj = BSON("x" << BSON("$gt" << 0));
- std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
-
- auto entryProjUpdatePair = makeWildcardUpdate(BSON("y.$**" << 1));
- auto indexInfo = std::move(entryProjUpdatePair.first);
- indexInfo.filterExpr = filterExpr.get();
-
- PlanCache planCache(5000);
- const std::vector<CoreIndexInfo> indexCores = {indexInfo};
-
- {
- // The discriminators have the format <x><y>. The discriminator for 'x' indicates whether
- // the predicate is compatible with the partial filter expression, whereas the disciminator
- // for 'y' is about compatibility with the wildcard index.
- auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"), indexCores);
- ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><1>");
- }
-
- {
- // Similar to the previous case, except with an 'x' predicate that is incompatible with the
- // partial filter expression.
- auto key = makeKey(*canonicalize("{x: {$eq: -1}, y: {$eq: 2}, z: {$eq: 3}}"), indexCores);
- ASSERT_EQ(key.getIndexabilityDiscriminators(), "<0><1>");
- }
-
- {
- // Case where the 'y' predicate is not compatible with the wildcard index.
- auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: null}, z: {$eq: 3}}"), indexCores);
- ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><0>");
- }
-}
-
-TEST(PlanCacheTest, StableKeyDoesNotChangeAcrossIndexCreation) {
- PlanCache planCache(5000);
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 0}}"));
- const PlanCacheKey preIndexKey = makeKey(*cq);
- const auto preIndexStableKey = preIndexKey.getQueryShape();
- ASSERT_EQ(preIndexKey.getIndexabilityDiscriminators(), "");
-
- const auto keyPattern = BSON("a" << 1);
- // Create a sparse index (which requires a discriminator).
- const std::vector<CoreIndexInfo> indexCores = {
- CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- true, // sparse
- IndexEntry::Identifier{""})}; // name
-
- const PlanCacheKey postIndexKey = makeKey(*cq, indexCores);
- const auto postIndexStableKey = postIndexKey.getQueryShape();
- ASSERT_NE(preIndexKey, postIndexKey);
- ASSERT_EQ(preIndexStableKey, postIndexStableKey);
- ASSERT_EQ(postIndexKey.getIndexabilityDiscriminators(), "<1>");
-}
-
-TEST(PlanCacheTest, ComputeKeyNotEqualsArray) {
- PlanCache planCache(5000);
- unique_ptr<CanonicalQuery> cqNeArray(canonicalize("{a: {$ne: [1]}}"));
- unique_ptr<CanonicalQuery> cqNeScalar(canonicalize("{a: {$ne: 123}}"));
-
- const PlanCacheKey noIndexNeArrayKey = makeKey(*cqNeArray);
- const PlanCacheKey noIndexNeScalarKey = makeKey(*cqNeScalar);
- ASSERT_EQ(noIndexNeArrayKey.getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(noIndexNeScalarKey.getIndexabilityDiscriminators(), "<1>");
- ASSERT_EQ(noIndexNeScalarKey.getQueryShape(), noIndexNeArrayKey.getQueryShape());
-
- const auto keyPattern = BSON("a" << 1);
- // Create a normal btree index. It will have a discriminator.
- const std::vector<CoreIndexInfo> indexCores = {
- CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""})}; // name*/
-
- const PlanCacheKey withIndexNeArrayKey = makeKey(*cqNeArray, indexCores);
- const PlanCacheKey withIndexNeScalarKey = makeKey(*cqNeScalar, indexCores);
-
- ASSERT_NE(noIndexNeArrayKey, withIndexNeArrayKey);
- ASSERT_EQ(noIndexNeArrayKey.getQueryShape(), withIndexNeArrayKey.getQueryShape());
-
- ASSERT_EQ(noIndexNeScalarKey.getQueryShape(), withIndexNeScalarKey.getQueryShape());
- // There will be one discriminator for the $not and another for the leaf node ({$eq: 123}).
- ASSERT_EQ(withIndexNeScalarKey.getIndexabilityDiscriminators(), "<1><1>");
- // There will be one discriminator for the $not and another for the leaf node ({$eq: [1]}).
- // Since the index can support equality to an array, the second discriminator will have a value
- // of '1'.
- ASSERT_EQ(withIndexNeArrayKey.getIndexabilityDiscriminators(), "<0><1>");
-}
-
-TEST(PlanCacheTest, ComputeKeyNinArray) {
- PlanCache planCache(5000);
- unique_ptr<CanonicalQuery> cqNinArray(canonicalize("{a: {$nin: [123, [1]]}}"));
- unique_ptr<CanonicalQuery> cqNinScalar(canonicalize("{a: {$nin: [123, 456]}}"));
-
- const PlanCacheKey noIndexNinArrayKey = makeKey(*cqNinArray);
- const PlanCacheKey noIndexNinScalarKey = makeKey(*cqNinScalar);
- ASSERT_EQ(noIndexNinArrayKey.getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(noIndexNinScalarKey.getIndexabilityDiscriminators(), "<1>");
- ASSERT_EQ(noIndexNinScalarKey.getQueryShape(), noIndexNinArrayKey.getQueryShape());
-
- const auto keyPattern = BSON("a" << 1);
- // Create a normal btree index. It will have a discriminator.
- const std::vector<CoreIndexInfo> indexCores = {
- CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""})}; // name
-
- const PlanCacheKey withIndexNinArrayKey = makeKey(*cqNinArray, indexCores);
- const PlanCacheKey withIndexNinScalarKey = makeKey(*cqNinScalar, indexCores);
-
- // The unstable part of the key for $nin: [<array>] should have changed. The stable part,
- // however, should not.
- ASSERT_EQ(noIndexNinArrayKey.getQueryShape(), withIndexNinArrayKey.getQueryShape());
- ASSERT_NE(noIndexNinArrayKey.getIndexabilityDiscriminators(),
- withIndexNinArrayKey.getIndexabilityDiscriminators());
-
- ASSERT_EQ(noIndexNinScalarKey.getQueryShape(), withIndexNinScalarKey.getQueryShape());
- ASSERT_EQ(withIndexNinArrayKey.getIndexabilityDiscriminators(), "<0><1>");
- ASSERT_EQ(withIndexNinScalarKey.getIndexabilityDiscriminators(), "<1><1>");
-}
-
-// Test for a bug which would be easy to introduce. If we only inserted discriminators for some
-// nodes, we would have a problem. For example if our "stable" key was:
-// (or[nt[eqa],nt[eqa]])
-// And there was just one discriminator:
-// <0>
-
-// Whether the discriminator referred to the first not-eq node or the second would be
-// ambiguous. This would make it possible for two queries with different shapes (and different
-// plans) to get the same plan cache key. We test that this does not happen for a simple example.
-TEST(PlanCacheTest, PlanCacheKeyCollision) {
- PlanCache planCache(5000);
- unique_ptr<CanonicalQuery> cqNeA(canonicalize("{$or: [{a: {$ne: 5}}, {a: {$ne: [12]}}]}"));
- unique_ptr<CanonicalQuery> cqNeB(canonicalize("{$or: [{a: {$ne: [12]}}, {a: {$ne: 5}}]}"));
-
- const PlanCacheKey keyA = makeKey(*cqNeA);
- const PlanCacheKey keyB = makeKey(*cqNeB);
- ASSERT_EQ(keyA.getQueryShape(), keyB.getQueryShape());
- ASSERT_NE(keyA.getIndexabilityDiscriminators(), keyB.getIndexabilityDiscriminators());
- const auto keyPattern = BSON("a" << 1);
- // Create a normal btree index. It will have a discriminator.
- std::vector<CoreIndexInfo> indexCores = {
- CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""})}; // name
- const PlanCacheKey keyAWithIndex = makeKey(*cqNeA, indexCores);
- const PlanCacheKey keyBWithIndex = makeKey(*cqNeB, indexCores);
-
- ASSERT_EQ(keyAWithIndex.getQueryShape(), keyBWithIndex.getQueryShape());
- ASSERT_NE(keyAWithIndex.getIndexabilityDiscriminators(),
- keyBWithIndex.getIndexabilityDiscriminators());
-}
-
TEST(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1, b: 1}"));
@@ -2594,28 +1886,4 @@ TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
}
-TEST(PlanCacheTest, DifferentQueryEngines) {
- const auto keyPattern = BSON("a" << 1);
- const std::vector<CoreIndexInfo> indexCores = {
- CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""})}; // name
-
- // Helper to construct a plan cache key given the 'forceClassicEngine' flag.
- auto constructPlanCacheKey = [&](bool forceClassicEngine) {
- RAIIServerParameterControllerForTest controller{"internalQueryForceClassicEngine",
- forceClassicEngine};
- const auto queryStr = "{a: 0}";
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
- return makeKey(*cq, indexCores);
- };
-
- const auto classicEngineKey = constructPlanCacheKey(false);
- const auto noClassicEngineKey = constructPlanCacheKey(true);
-
- // Check that the two plan cache keys are not equal because the plans were created under
- // different engines.
- assertPlanCacheKeysUnequalDueToForceClassicEngineValue(classicEngineKey, noClassicEngineKey);
-}
} // namespace
diff --git a/src/mongo/db/query/sbe_cached_solution_planner.cpp b/src/mongo/db/query/sbe_cached_solution_planner.cpp
index c6fe9f03764..c5970636a1d 100644
--- a/src/mongo/db/query/sbe_cached_solution_planner.cpp
+++ b/src/mongo/db/query/sbe_cached_solution_planner.cpp
@@ -133,7 +133,7 @@ CandidatePlans CachedSolutionPlanner::replan(bool shouldCache, std::string reaso
if (shouldCache) {
// Deactivate the current cache entry.
auto cache = CollectionQueryInfo::get(_collection).getPlanCache();
- cache->deactivate(plan_cache_key_factory::make(_cq, _collection));
+ cache->deactivate(plan_cache_key_factory::make<mongo::PlanCacheKey>(_cq, _collection));
}
auto buildExecutableTree = [&](const QuerySolution& sol) {
diff --git a/src/mongo/db/query/sbe_plan_cache.cpp b/src/mongo/db/query/sbe_plan_cache.cpp
index 6dc300e846a..07d904b1506 100644
--- a/src/mongo/db/query/sbe_plan_cache.cpp
+++ b/src/mongo/db/query/sbe_plan_cache.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/query/sbe_plan_cache.h"
+#include "mongo/db/query/plan_cache_invalidator.h"
#include "mongo/db/query/plan_cache_size_parameter.h"
#include "mongo/db/server_options.h"
#include "mongo/logv2/log.h"
@@ -42,6 +43,18 @@ namespace {
const auto sbePlanCacheDecoration =
ServiceContext::declareDecoration<std::unique_ptr<sbe::PlanCache>>();
+class SbePlanCacheInvalidatorCallback final : public PlanCacheInvalidatorCallback {
+public:
+ SbePlanCacheInvalidatorCallback(ServiceContext* serviceCtx) : _serviceCtx{serviceCtx} {}
+
+ void invalidateCacheEntriesWith(UUID collectionUuid, size_t oldVersion) override {
+ clearPlanCache(_serviceCtx, collectionUuid, oldVersion);
+ }
+
+private:
+ ServiceContext* _serviceCtx;
+};
+
size_t convertToSizeInBytes(const plan_cache_util::PlanCacheSizeParameter& param) {
constexpr size_t kBytesInMB = 1014 * 1024;
constexpr size_t kMBytesInGB = 1014;
@@ -116,6 +129,8 @@ ServiceContext::ConstructorActionRegisterer planCacheRegisterer{
plan_cache_util::sbePlanCacheSizeUpdaterDecoration(serviceCtx) =
std::make_unique<PlanCacheSizeUpdaterImpl>();
+ PlanCacheInvalidatorCallback::set(
+ serviceCtx, std::make_unique<SbePlanCacheInvalidatorCallback>(serviceCtx));
if (feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV()) {
auto status = plan_cache_util::PlanCacheSizeParameter::parse(planCacheSize.get());
uassertStatusOK(status);
@@ -142,4 +157,21 @@ sbe::PlanCache& getPlanCache(OperationContext* opCtx) {
tassert(5933400, "Cannot get the global SBE plan cache by a nullptr", opCtx);
return getPlanCache(opCtx->getServiceContext());
}
+
+void clearPlanCache(ServiceContext* serviceCtx, UUID collectionUuid, size_t collectionVersion) {
+ if (feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV()) {
+ auto removed = sbe::getPlanCache(serviceCtx)
+ .removeIf([&collectionUuid, collectionVersion](const PlanCacheKey& key) {
+ return key.getCollectionVersion() == collectionVersion &&
+ key.getCollectionUuid() == collectionUuid;
+ });
+
+ LOGV2_DEBUG(6006600,
+ 1,
+ "Clearing SBE Plan Cache",
+ "collectionUuid"_attr = collectionUuid,
+ "collectionVersion"_attr = collectionVersion,
+ "removedEntries"_attr = removed);
+ }
+}
} // namespace mongo::sbe
diff --git a/src/mongo/db/query/sbe_plan_cache.h b/src/mongo/db/query/sbe_plan_cache.h
index b47fe15034d..8e4fee7e13a 100644
--- a/src/mongo/db/query/sbe_plan_cache.h
+++ b/src/mongo/db/query/sbe_plan_cache.h
@@ -29,16 +29,70 @@
#pragma once
+#include <boost/functional/hash.hpp>
+
#include "mongo/db/exec/sbe/stages/stages.h"
#include "mongo/db/hasher.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/query/plan_cache.h"
+#include "mongo/db/query/plan_cache_key_info.h"
#include "mongo/db/query/sbe_stage_builder.h"
#include "mongo/db/service_context.h"
namespace mongo {
namespace sbe {
+/**
+ * Represents the "key" used in the PlanCache mapping from query shape -> query plan.
+ */
+class PlanCacheKey {
+public:
+ PlanCacheKey(PlanCacheKeyInfo&& info, UUID collectionUuid, size_t collectionVersion)
+ : _info{std::move(info)},
+ _collectionUuid{collectionUuid},
+ _collectionVersion{collectionVersion} {}
+
+ const UUID& getCollectionUuid() const {
+ return _collectionUuid;
+ }
+
+ size_t getCollectionVersion() const {
+ return _collectionVersion;
+ }
+
+ bool operator==(const PlanCacheKey& other) const {
+ return other._info == _info && other._collectionUuid == _collectionUuid &&
+ other._collectionVersion == _collectionVersion;
+ }
+
+ bool operator!=(const PlanCacheKey& other) const {
+ return !(*this == other);
+ }
+
+ uint32_t queryHash() const {
+ return _info.queryHash();
+ }
+
+ uint32_t planCacheKeyHash() const {
+ size_t hash = _info.planCacheKeyHash();
+ boost::hash_combine(hash, UUID::Hash{}(_collectionUuid));
+ boost::hash_combine(hash, _collectionVersion);
+ return hash;
+ }
+
+private:
+ const PlanCacheKeyInfo _info;
+ const UUID _collectionUuid;
+ const size_t _collectionVersion;
+};
+
+class PlanCacheKeyHasher {
+public:
+ std::size_t operator()(const PlanCacheKey& k) const {
+ return k.planCacheKeyHash();
+ }
+};
+
struct PlanCachePartitioner {
// Determines the partitioning function for use with the 'Partitioned' utility.
std::size_t operator()(const PlanCacheKey& k, const std::size_t nPartitions) const {
@@ -90,5 +144,10 @@ PlanCache& getPlanCache(ServiceContext* serviceCtx);
*/
PlanCache& getPlanCache(OperationContext* opCtx);
+/**
+ * Remove cached plan entries with the given collection UUID and collection version number.
+ */
+void clearPlanCache(ServiceContext* serviceCtx, UUID collectionUuid, size_t collectionVersion);
+
} // namespace sbe
} // namespace mongo
diff --git a/src/mongo/db/query/sbe_sub_planner.cpp b/src/mongo/db/query/sbe_sub_planner.cpp
index 4d51c9b8672..a0ee086864f 100644
--- a/src/mongo/db/query/sbe_sub_planner.cpp
+++ b/src/mongo/db/query/sbe_sub_planner.cpp
@@ -43,7 +43,7 @@ CandidatePlans SubPlanner::plan(
std::vector<std::pair<std::unique_ptr<PlanStage>, stage_builder::PlanStageData>> roots) {
std::function<mongo::PlanCacheKey(const CanonicalQuery& cq, const CollectionPtr& coll)>
createPlanCacheKey = [](const CanonicalQuery& cq, const CollectionPtr& coll) {
- return plan_cache_key_factory::make(cq, coll);
+ return plan_cache_key_factory::make<mongo::PlanCacheKey>(cq, coll);
};
// Plan each branch of the $or.
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 1972cd204be..71fcee970c6 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -250,7 +250,8 @@ public:
StatusWith<std::unique_ptr<PlanCacheEntry>> planCacheEntryWithStatus =
CollectionQueryInfo::get(collection.getCollection())
.getPlanCache()
- ->getEntry(plan_cache_key_factory::make(*cq, collection.getCollection()));
+ ->getEntry(
+ plan_cache_key_factory::make<PlanCacheKey>(*cq, collection.getCollection()));
ASSERT_OK(planCacheEntryWithStatus.getStatus());
auto debugInfo = planCacheEntryWithStatus.getValue()->debugInfo;
ASSERT(debugInfo);
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index edc47390edd..d3cf8bd8a63 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -193,7 +193,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailureMemoryLimitExceeded) {
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- auto key = plan_cache_key_factory::make(*cq, collection.getCollection());
+ auto key = plan_cache_key_factory::make<PlanCacheKey>(*cq, collection.getCollection());
// We shouldn't have anything in the plan cache for this shape yet.
PlanCache* cache = CollectionQueryInfo::get(collection.getCollection()).getPlanCache();
@@ -244,7 +244,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- auto key = plan_cache_key_factory::make(*cq, collection.getCollection());
+ auto key = plan_cache_key_factory::make<PlanCacheKey>(*cq, collection.getCollection());
// We shouldn't have anything in the plan cache for this shape yet.
PlanCache* cache = CollectionQueryInfo::get(collection.getCollection()).getPlanCache();
@@ -295,7 +295,8 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
// CanonicalQueries created in this test will have this shape.
const auto shapeCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 123}, b: {$gte: 123}}"));
- auto planCacheKey = plan_cache_key_factory::make(*shapeCq, collection.getCollection());
+ auto planCacheKey =
+ plan_cache_key_factory::make<PlanCacheKey>(*shapeCq, collection.getCollection());
// Query can be answered by either index on "a" or index on "b".
const auto noResultsCq =
@@ -354,7 +355,8 @@ TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
// CanonicalQueries created in this test will have this shape.
const auto shapeCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 123}, b: {$gte: 123}}"));
- auto planCacheKey = plan_cache_key_factory::make(*shapeCq, collection.getCollection());
+ auto planCacheKey =
+ plan_cache_key_factory::make<PlanCacheKey>(*shapeCq, collection.getCollection());
// Query can be answered by either index on "a" or index on "b".
const auto noResultsCq =
@@ -376,9 +378,11 @@ TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
forceReplanning(collection.getCollection(), noResultsCq.get());
// The works should be 1 for the entry since the query we ran should not have any results.
- ASSERT_EQ(
- cache->get(plan_cache_key_factory::make(*noResultsCq, collection.getCollection())).state,
- PlanCache::CacheEntryState::kPresentActive);
+ ASSERT_EQ(cache
+ ->get(plan_cache_key_factory::make<PlanCacheKey>(*noResultsCq,
+ collection.getCollection()))
+ .state,
+ PlanCache::CacheEntryState::kPresentActive);
auto entry = assertGet(cache->getEntry(planCacheKey));
size_t works = 1U;
ASSERT_EQ(entry->works, works);
@@ -415,12 +419,14 @@ TEST_F(QueryStageCachedPlan, EntriesAreNotDeactivatedWhenInactiveEntriesDisabled
// CanonicalQueries created in this test will have this shape.
const auto shapeCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 123}, b: {$gte: 123}}"));
- auto planCacheKey = plan_cache_key_factory::make(*shapeCq, collection.getCollection());
+ auto planCacheKey =
+ plan_cache_key_factory::make<PlanCacheKey>(*shapeCq, collection.getCollection());
// Query can be answered by either index on "a" or index on "b".
const auto noResultsCq =
canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 11}, b: {$gte: 11}}"));
- auto noResultKey = plan_cache_key_factory::make(*noResultsCq, collection.getCollection());
+ auto noResultKey =
+ plan_cache_key_factory::make<PlanCacheKey>(*noResultsCq, collection.getCollection());
// We shouldn't have anything in the plan cache for this shape yet.
PlanCache* cache = CollectionQueryInfo::get(collection.getCollection()).getPlanCache();
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index ec44604b6b5..eaf04add3f1 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -299,7 +299,7 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesNotCreateActiveCacheEntryImmediately) {
const CollectionPtr& coll = ctx.getCollection();
const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
- auto key = plan_cache_key_factory::make(*cq, coll);
+ auto key = plan_cache_key_factory::make<PlanCacheKey>(*cq, coll);
// Run an index scan and collection scan, searching for {foo: 7}.
auto mps = runMultiPlanner(_expCtx.get(), nss, coll, 7);
@@ -355,7 +355,7 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesCreatesActiveEntryWhenInactiveEntriesDisa
const CollectionPtr& coll = ctx.getCollection();
const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
- auto key = plan_cache_key_factory::make(*cq, coll);
+ auto key = plan_cache_key_factory::make<PlanCacheKey>(*cq, coll);
// Run an index scan and collection scan, searching for {foo: 7}.
auto mps = runMultiPlanner(_expCtx.get(), nss, coll, 7);
@@ -390,7 +390,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
- auto key = plan_cache_key_factory::make(*cq, collection.getCollection());
+ auto key = plan_cache_key_factory::make<PlanCacheKey>(*cq, collection.getCollection());
// Force index intersection.
bool forceIxisectOldValue = internalQueryForceIntersectionPlans.load();