summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/SConscript7
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp30
-rw-r--r--src/mongo/db/commands/index_filter_commands.h4
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp117
-rw-r--r--src/mongo/db/commands/plan_cache_clear_command.cpp33
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp15
-rw-r--r--src/mongo/db/exec/cached_plan.cpp6
-rw-r--r--src/mongo/db/exec/multi_plan.cpp7
-rw-r--r--src/mongo/db/exec/plan_cache_util.h10
-rw-r--r--src/mongo/db/exec/subplan.cpp9
-rw-r--r--src/mongo/db/pipeline/plan_explainer_pipeline.cpp2
-rw-r--r--src/mongo/db/pipeline/plan_explainer_pipeline.h2
-rw-r--r--src/mongo/db/query/SConscript3
-rw-r--r--src/mongo/db/query/canonical_query_encoder_test.cpp9
-rw-r--r--src/mongo/db/query/classic_plan_cache.cpp35
-rw-r--r--src/mongo/db/query/classic_plan_cache.h8
-rw-r--r--src/mongo/db/query/collection_query_info.cpp26
-rw-r--r--src/mongo/db/query/collection_query_info.h11
-rw-r--r--src/mongo/db/query/explain.cpp10
-rw-r--r--src/mongo/db/query/get_executor.cpp13
-rw-r--r--src/mongo/db/query/lru_key_value.h7
-rw-r--r--src/mongo/db/query/plan_cache.h419
-rw-r--r--src/mongo/db/query/plan_cache_callbacks.cpp (renamed from src/mongo/db/query/plan_cache.cpp)82
-rw-r--r--src/mongo/db/query/plan_cache_callbacks.h184
-rw-r--r--src/mongo/db/query/plan_cache_debug_info.cpp54
-rw-r--r--src/mongo/db/query/plan_cache_debug_info.h117
-rw-r--r--src/mongo/db/query/plan_cache_key_factory.cpp96
-rw-r--r--src/mongo/db/query/plan_cache_key_factory.h78
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp680
-rw-r--r--src/mongo/db/query/plan_executor_factory.cpp1
-rw-r--r--src/mongo/db/query/plan_explainer.h3
-rw-r--r--src/mongo/db/query/plan_explainer_impl.cpp2
-rw-r--r--src/mongo/db/query/plan_explainer_impl.h2
-rw-r--r--src/mongo/db/query/plan_explainer_sbe.cpp2
-rw-r--r--src/mongo/db/query/plan_explainer_sbe.h2
-rw-r--r--src/mongo/db/query/query_planner.cpp122
-rw-r--r--src/mongo/db/query/query_planner.h105
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.h1
-rw-r--r--src/mongo/db/query/sbe_cached_solution_planner.cpp3
-rw-r--r--src/mongo/db/query/sbe_cached_solution_planner.h1
-rw-r--r--src/mongo/db/query/sbe_sub_planner.cpp9
-rw-r--r--src/mongo/db/query/sbe_sub_planner.h1
42 files changed, 1396 insertions, 932 deletions
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index 5a2c9253d94..554a7bf69ae 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -474,8 +474,13 @@ env.Library(
source=[
"$BUILD_DIR/mongo/db/query/collection_query_info.cpp",
"$BUILD_DIR/mongo/db/query/collection_index_usage_tracker_decoration.cpp",
+ "$BUILD_DIR/mongo/db/query/plan_cache_key_factory.cpp",
"$BUILD_DIR/mongo/db/query/query_settings_decoration.cpp",
],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/db/query/query_planner',
+ '$BUILD_DIR/mongo/db/update_index_data',
+ ],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/catalog/index_catalog',
@@ -483,9 +488,7 @@ env.Library(
'$BUILD_DIR/mongo/db/concurrency/lock_manager',
'$BUILD_DIR/mongo/db/curop',
'$BUILD_DIR/mongo/db/fts/base_fts',
- '$BUILD_DIR/mongo/db/query/query_planner',
'$BUILD_DIR/mongo/db/service_context',
- '$BUILD_DIR/mongo/db/update_index_data',
],
)
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index bc2498acd60..2d93bd8a062 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -50,6 +50,7 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/collection_query_info.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/query_settings_decoration.h"
#include "mongo/logv2/log.h"
#include "mongo/stdx/unordered_set.h"
@@ -66,7 +67,6 @@ using namespace mongo;
*/
static Status getQuerySettingsAndPlanCache(OperationContext* opCtx,
const CollectionPtr& collection,
- const string& ns,
QuerySettings** querySettingsOut,
PlanCache** planCacheOut) {
*querySettingsOut = nullptr;
@@ -163,7 +163,7 @@ Status ListFilters::runIndexFilterCommand(OperationContext* opCtx,
QuerySettings* querySettings;
PlanCache* unused;
Status status =
- getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), ns, &querySettings, &unused);
+ getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), &querySettings, &unused);
if (!status.isOK()) {
// No collection - return empty array of filters.
BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
@@ -231,19 +231,19 @@ Status ClearFilters::runIndexFilterCommand(OperationContext* opCtx,
QuerySettings* querySettings;
PlanCache* planCache;
Status status =
- getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), ns, &querySettings, &planCache);
+ getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), &querySettings, &planCache);
if (!status.isOK()) {
// No collection - do nothing.
return Status::OK();
}
- return clear(opCtx, querySettings, planCache, ns, cmdObj);
+ return clear(opCtx, ctx.getCollection(), querySettings, planCache, cmdObj);
}
// static
Status ClearFilters::clear(OperationContext* opCtx,
+ const CollectionPtr& collection,
QuerySettings* querySettings,
PlanCache* planCache,
- const std::string& ns,
const BSONObj& cmdObj) {
invariant(querySettings);
@@ -252,7 +252,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
// - clear hints for single query shape when a query shape is described in the
// command arguments.
if (cmdObj.hasField("query")) {
- auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, ns, cmdObj);
+ auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, collection->ns().ns(), cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -261,7 +261,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
querySettings->removeAllowedIndices(cq->encodeKey());
// Remove entry from plan cache
- planCache->remove(*cq).transitional_ignore();
+ planCache->remove(plan_cache_key_factory::make<PlanCacheKey>(*cq, collection));
LOGV2(20479,
"Removed index filter on {query}",
@@ -286,7 +286,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
// OK to proceed with clearing entire cache.
querySettings->clearAllowedIndices();
- const NamespaceString nss(ns);
+ const NamespaceString nss(collection->ns());
const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
// Remove corresponding entries from plan cache.
@@ -320,13 +320,13 @@ Status ClearFilters::clear(OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Remove plan cache entry.
- planCache->remove(*cq).transitional_ignore();
+ planCache->remove(plan_cache_key_factory::make<PlanCacheKey>(*cq, collection));
}
LOGV2(20480,
"Removed all index filters for collection: {namespace}",
"Removed all index filters for collection",
- "namespace"_attr = ns);
+ "namespace"_attr = collection->ns().ns());
return Status::OK();
}
@@ -346,18 +346,18 @@ Status SetFilter::runIndexFilterCommand(OperationContext* opCtx,
QuerySettings* querySettings;
PlanCache* planCache;
Status status =
- getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), ns, &querySettings, &planCache);
+ getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), &querySettings, &planCache);
if (!status.isOK()) {
return status;
}
- return set(opCtx, querySettings, planCache, ns, cmdObj);
+ return set(opCtx, ctx.getCollection(), querySettings, planCache, cmdObj);
}
// static
Status SetFilter::set(OperationContext* opCtx,
+ const CollectionPtr& collection,
QuerySettings* querySettings,
PlanCache* planCache,
- const string& ns,
const BSONObj& cmdObj) {
// indexes - required
BSONElement indexesElt = cmdObj.getField("indexes");
@@ -391,7 +391,7 @@ Status SetFilter::set(OperationContext* opCtx,
}
}
- auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, ns, cmdObj);
+ auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, collection->ns().ns(), cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -401,7 +401,7 @@ Status SetFilter::set(OperationContext* opCtx,
querySettings->setAllowedIndices(*cq, indexes, indexNames);
// Remove entry from plan cache.
- planCache->remove(*cq).transitional_ignore();
+ planCache->remove(plan_cache_key_factory::make<PlanCacheKey>(*cq, collection));
LOGV2(20481,
"Index filter set on {query} {indexes}",
diff --git a/src/mongo/db/commands/index_filter_commands.h b/src/mongo/db/commands/index_filter_commands.h
index e572aa4ca49..18ce443f2a8 100644
--- a/src/mongo/db/commands/index_filter_commands.h
+++ b/src/mongo/db/commands/index_filter_commands.h
@@ -142,9 +142,9 @@ public:
* Removes corresponding entries from plan cache.
*/
static Status clear(OperationContext* opCtx,
+ const CollectionPtr& collection,
QuerySettings* querySettings,
PlanCache* planCache,
- const std::string& ns,
const BSONObj& cmdObj);
};
@@ -174,9 +174,9 @@ public:
* Removes entry for query shape from plan cache.
*/
static Status set(OperationContext* opCtx,
+ const CollectionPtr& collection,
QuerySettings* querySettings,
PlanCache* planCache,
- const std::string& ns,
const BSONObj& cmdObj);
};
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 2c3af6d7aae..bb3e79b2ebb 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -35,9 +35,11 @@
#include <memory>
+#include "mongo/db/catalog/collection_mock.h"
#include "mongo/db/json.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/query_test_service_context.h"
@@ -53,6 +55,11 @@ using std::vector;
static const NamespaceString nss("test.collection");
+PlanCacheKey makeKey(const CanonicalQuery& cq) {
+ CollectionMock coll(nss);
+ return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll);
+}
+
/**
* Utility function to get list of index filters from the query settings.
*/
@@ -144,11 +151,14 @@ void addQueryShapeToPlanCache(OperationContext* opCtx,
qs.cacheData->tree.reset(new PlanCacheIndexTree());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- ASSERT_OK(planCache->set(*cq,
+ PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{*cq};
+ ASSERT_OK(planCache->set(makeKey(*cq),
qs.cacheData->clone(),
solns,
createDecision(1U),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
+ opCtx->getServiceContext()->getPreciseClockSource()->now(),
+ boost::none, /* worksGrowthCoefficient */
+ &callbacks));
}
/**
@@ -191,7 +201,7 @@ bool planCacheContains(OperationContext* opCtx,
ASSERT_OK(statusWithCurrentQuery.getStatus());
unique_ptr<CanonicalQuery> currentQuery = std::move(statusWithCurrentQuery.getValue());
- if (planCache.computeKey(*currentQuery) == planCache.computeKey(*inputQuery)) {
+ if (makeKey(*currentQuery) == makeKey(*inputQuery)) {
found = true;
}
}
@@ -216,44 +226,43 @@ TEST(IndexFilterCommandsTest, ClearFiltersInvalidParameter) {
QuerySettings empty;
PlanCache planCache(5000);
OperationContextNoop opCtx;
+ CollectionMock coll(nss);
// If present, query has to be an object.
ASSERT_NOT_OK(
- ClearFilters::clear(&opCtx, &empty, &planCache, nss.ns(), fromjson("{query: 1234}")));
+ ClearFilters::clear(&opCtx, &coll, &empty, &planCache, fromjson("{query: 1234}")));
// If present, sort must be an object.
ASSERT_NOT_OK(ClearFilters::clear(
- &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, sort: 1234}")));
+ &opCtx, &coll, &empty, &planCache, fromjson("{query: {a: 1}, sort: 1234}")));
// If present, projection must be an object.
ASSERT_NOT_OK(ClearFilters::clear(
- &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, projection: 1234}")));
+ &opCtx, &coll, &empty, &planCache, fromjson("{query: {a: 1}, projection: 1234}")));
// Query must pass canonicalization.
ASSERT_NOT_OK(ClearFilters::clear(
- &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
+ &opCtx, &coll, &empty, &planCache, fromjson("{query: {a: {$no_such_op: 1}}}")));
// Sort present without query is an error.
ASSERT_NOT_OK(
- ClearFilters::clear(&opCtx, &empty, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
+ ClearFilters::clear(&opCtx, &coll, &empty, &planCache, fromjson("{sort: {a: 1}}")));
// Projection present without query is an error.
ASSERT_NOT_OK(ClearFilters::clear(
- &opCtx, &empty, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
+ &opCtx, &coll, &empty, &planCache, fromjson("{projection: {_id: 0, a: 1}}")));
}
TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
QuerySettings querySettings;
PlanCache planCache(5000);
OperationContextNoop opCtx;
+ CollectionMock coll(nss);
- ASSERT_OK(SetFilter::set(&opCtx,
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
+ ASSERT_OK(SetFilter::set(
+ &opCtx, &coll, &querySettings, &planCache, fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
vector<BSONObj> filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
// Clear nonexistent hint.
// Command should succeed and cache should remain unchanged.
ASSERT_OK(ClearFilters::clear(
- &opCtx, &querySettings, &planCache, nss.ns(), fromjson("{query: {b: 1}}")));
+ &opCtx, &coll, &querySettings, &planCache, fromjson("{query: {b: 1}}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
}
@@ -266,59 +275,56 @@ TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
QuerySettings empty;
PlanCache planCache(5000);
OperationContextNoop opCtx;
+ CollectionMock coll(nss);
- ASSERT_NOT_OK(SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{}")));
+ ASSERT_NOT_OK(SetFilter::set(&opCtx, &coll, &empty, &planCache, fromjson("{}")));
// Missing required query field.
ASSERT_NOT_OK(
- SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{indexes: [{a: 1}]}")));
+ SetFilter::set(&opCtx, &coll, &empty, &planCache, fromjson("{indexes: [{a: 1}]}")));
// Missing required indexes field.
- ASSERT_NOT_OK(
- SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
+ ASSERT_NOT_OK(SetFilter::set(&opCtx, &coll, &empty, &planCache, fromjson("{query: {a: 1}}")));
// Query has to be an object.
- ASSERT_NOT_OK(SetFilter::set(&opCtx,
- &empty,
- &planCache,
- nss.ns(),
- fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
+ ASSERT_NOT_OK(SetFilter::set(
+ &opCtx, &coll, &empty, &planCache, fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
// Indexes field has to be an array.
ASSERT_NOT_OK(SetFilter::set(
- &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: 1234}")));
+ &opCtx, &coll, &empty, &planCache, fromjson("{query: {a: 1}, indexes: 1234}")));
// Array indexes field cannot empty.
ASSERT_NOT_OK(SetFilter::set(
- &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: []}")));
+ &opCtx, &coll, &empty, &planCache, fromjson("{query: {a: 1}, indexes: []}")));
// Elements in indexes have to be objects.
ASSERT_NOT_OK(SetFilter::set(
- &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
+ &opCtx, &coll, &empty, &planCache, fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
// Objects in indexes cannot be empty.
ASSERT_NOT_OK(SetFilter::set(
- &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
+ &opCtx, &coll, &empty, &planCache, fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
// If present, sort must be an object.
ASSERT_NOT_OK(
SetFilter::set(&opCtx,
+ &coll,
&empty,
&planCache,
- nss.ns(),
fromjson("{query: {a: 1}, sort: 1234, indexes: [{a: 1}, {b: 1}]}")));
// If present, projection must be an object.
ASSERT_NOT_OK(
SetFilter::set(&opCtx,
+ &coll,
&empty,
&planCache,
- nss.ns(),
fromjson("{query: {a: 1}, projection: 1234, indexes: [{a: 1}, {b: 1}]}")));
// If present, collation must be an object.
ASSERT_NOT_OK(
SetFilter::set(&opCtx,
+ &coll,
&empty,
&planCache,
- nss.ns(),
fromjson("{query: {a: 1}, collation: 1234, indexes: [{a: 1}, {b: 1}]}")));
// Query must pass canonicalization.
ASSERT_NOT_OK(
SetFilter::set(&opCtx,
+ &coll,
&empty,
&planCache,
- nss.ns(),
fromjson("{query: {a: {$no_such_op: 1}}, indexes: [{a: 1}, {b: 1}]}")));
}
@@ -327,6 +333,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
PlanCache planCache(5000);
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
+ CollectionMock coll(nss);
// Inject query shape into plan cache.
addQueryShapeToPlanCache(opCtx.get(),
@@ -343,9 +350,9 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
"{locale: 'mock_reverse_string'}"));
ASSERT_OK(SetFilter::set(opCtx.get(),
+ &coll,
&querySettings,
&planCache,
- nss.ns(),
fromjson("{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, "
"a: 1}, collation: {locale: 'mock_reverse_string'}, "
"indexes: [{a: 1}]}")));
@@ -370,9 +377,9 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
// Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
// share same shape) should not change the query settings size.
ASSERT_OK(SetFilter::set(opCtx.get(),
+ &coll,
&querySettings,
&planCache,
- nss.ns(),
fromjson("{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, "
"a: 1}, collation: {locale: 'mock_reverse_string'}, "
"indexes: [{a: 1, b: 1}]}")));
@@ -386,18 +393,18 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
// Add hint for different query shape.
ASSERT_OK(SetFilter::set(opCtx.get(),
+ &coll,
&querySettings,
&planCache,
- nss.ns(),
fromjson("{query: {b: 1}, indexes: [{b: 1}]}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 2U);
// Add hint for 3rd query shape. This is to prepare for ClearHint tests.
ASSERT_OK(SetFilter::set(opCtx.get(),
+ &coll,
&querySettings,
&planCache,
- nss.ns(),
fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 3U);
@@ -408,7 +415,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
// Clear single hint.
ASSERT_OK(ClearFilters::clear(
- opCtx.get(), &querySettings, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
+ opCtx.get(), &coll, &querySettings, &planCache, fromjson("{query: {a: 1}}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 2U);
@@ -417,8 +424,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_TRUE(planCacheContains(opCtx.get(), planCache, "{b: 1}", "{}", "{}", "{}"));
// Clear all filters
- ASSERT_OK(
- ClearFilters::clear(opCtx.get(), &querySettings, &planCache, nss.ns(), fromjson("{}")));
+ ASSERT_OK(ClearFilters::clear(opCtx.get(), &coll, &querySettings, &planCache, fromjson("{}")));
filters = getFilters(querySettings);
ASSERT_TRUE(filters.empty());
@@ -430,15 +436,8 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
QuerySettings querySettings;
-
- // Create a plan cache. Add an index so that indexability is included in the plan cache keys.
+ CollectionMock coll(nss);
PlanCache planCache(5000);
- const auto keyPattern = fromjson("{a: 1}");
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- true, // sparse
- IndexEntry::Identifier{"index_name"})}); // name
// Inject query shapes with and without collation into plan cache.
addQueryShapeToPlanCache(
@@ -449,9 +448,9 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
ASSERT_TRUE(planCacheContains(opCtx.get(), planCache, "{a: 'foo'}", "{}", "{}", "{}"));
ASSERT_OK(SetFilter::set(opCtx.get(),
+ &coll,
&querySettings,
&planCache,
- nss.ns(),
fromjson("{query: {a: 'foo'}, sort: {}, projection: {}, collation: "
"{locale: 'mock_reverse_string'}, "
"indexes: [{a: 1}]}")));
@@ -471,9 +470,9 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
// Add filter for query shape without collation.
ASSERT_OK(SetFilter::set(opCtx.get(),
+ &coll,
&querySettings,
&planCache,
- nss.ns(),
fromjson("{query: {a: 'foo'}, indexes: [{b: 1}]}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 2U);
@@ -486,9 +485,9 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
// Clear filter for query with collation.
ASSERT_OK(ClearFilters::clear(
opCtx.get(),
+ &coll,
&querySettings,
&planCache,
- nss.ns(),
fromjson("{query: {a: 'foo'}, collation: {locale: 'mock_reverse_string'}}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
@@ -505,34 +504,22 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
TEST(IndexFilterCommandsTest, SetFilterAcceptsIndexNames) {
- CollatorInterfaceMock reverseCollator(CollatorInterfaceMock::MockType::kReverseString);
PlanCache planCache(5000);
- const auto keyPattern = fromjson("{a: 1}");
- CoreIndexInfo collatedIndex(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{"a_1:rev"}); // name
- collatedIndex.collator = &reverseCollator;
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
QuerySettings querySettings;
-
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{"a_1"}), // name
- collatedIndex});
+ CollectionMock coll(nss);
addQueryShapeToPlanCache(opCtx.get(), &planCache, "{a: 2}", "{}", "{}", "{}");
ASSERT_TRUE(planCacheContains(opCtx.get(), planCache, "{a: 2}", "{}", "{}", "{}"));
ASSERT_OK(SetFilter::set(opCtx.get(),
+ &coll,
&querySettings,
&planCache,
- nss.ns(),
fromjson("{query: {a: 2}, sort: {}, projection: {},"
"indexes: [{a: 1}, 'a_1:rev']}")));
+ ASSERT_FALSE(planCacheContains(opCtx.get(), planCache, "{a: 2}", "{}", "{}", "{}"));
auto filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
auto indexes = filters[0]["indexes"].Array();
diff --git a/src/mongo/db/commands/plan_cache_clear_command.cpp b/src/mongo/db/commands/plan_cache_clear_command.cpp
index b49a54fe416..53879f32ff8 100644
--- a/src/mongo/db/commands/plan_cache_clear_command.cpp
+++ b/src/mongo/db/commands/plan_cache_clear_command.cpp
@@ -41,6 +41,8 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/collection_query_info.h"
+#include "mongo/db/query/plan_cache_callbacks.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_ranker.h"
#include "mongo/logv2/log.h"
@@ -59,6 +61,7 @@ PlanCache* getPlanCache(OperationContext* opCtx, const CollectionPtr& collection
* shape only.
*/
Status clear(OperationContext* opCtx,
+ const CollectionPtr& collection,
PlanCache* planCache,
const std::string& ns,
const BSONObj& cmdObj) {
@@ -76,33 +79,7 @@ Status clear(OperationContext* opCtx,
auto cq = std::move(statusWithCQ.getValue());
- Status result = planCache->remove(*cq);
- if (!result.isOK()) {
- invariant(result.code() == ErrorCodes::NoSuchKey);
- LOGV2_DEBUG(23906,
- 1,
- "{namespace}: Query shape doesn't exist in PlanCache - {query}"
- "(sort: {sort}; projection: {projection}; collation: {collation})",
- "Query shape doesn't exist in PlanCache",
- "namespace"_attr = ns,
- "query"_attr = redact(cq->getQueryObj()),
- "sort"_attr = cq->getFindCommandRequest().getSort(),
- "projection"_attr = cq->getFindCommandRequest().getProjection(),
- "collation"_attr = cq->getFindCommandRequest().getCollation());
- return Status::OK();
- }
-
- LOGV2_DEBUG(23907,
- 1,
- "{namespace}: Removed plan cache entry - {query}"
- "(sort: {sort}; projection: {projection}; collation: {collation})",
- "Removed plan cache entry",
- "namespace"_attr = ns,
- "query"_attr = redact(cq->getQueryObj()),
- "sort"_attr = cq->getFindCommandRequest().getSort(),
- "projection"_attr = cq->getFindCommandRequest().getProjection(),
- "collation"_attr = cq->getFindCommandRequest().getCollation());
-
+ planCache->remove(plan_cache_key_factory::make<PlanCacheKey>(*cq, collection));
return Status::OK();
}
@@ -189,7 +166,7 @@ bool PlanCacheClearCommand::run(OperationContext* opCtx,
}
auto planCache = getPlanCache(opCtx, ctx.getCollection());
- uassertStatusOK(clear(opCtx, planCache, nss.ns(), cmdObj));
+ uassertStatusOK(clear(opCtx, ctx.getCollection(), planCache, nss.ns(), cmdObj));
return true;
}
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 707a7329969..4ef229c36f4 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -27,9 +27,11 @@
* it in the license file.
*/
+#include "mongo/db/catalog/collection_mock.h"
#include "mongo/db/commands/plan_cache_commands.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/classic_plan_cache.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/query_test_service_context.h"
#include "mongo/unittest/unittest.h"
@@ -38,6 +40,11 @@ namespace {
static const NamespaceString nss{"test.collection"_sd};
+PlanCacheKey makeKey(const CanonicalQuery& cq) {
+ CollectionMock coll(nss);
+ return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll);
+}
+
TEST(PlanCacheCommandsTest, CannotCanonicalizeWithMissingQueryField) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
@@ -99,7 +106,7 @@ TEST(PlanCacheCommandsTest, CanCanonicalizeWithValidQuery) {
plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {b: 3, a: 4}}"));
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> equivQuery = std::move(statusWithCQ.getValue());
- ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
+ ASSERT_EQUALS(makeKey(*query), makeKey(*equivQuery));
}
TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted) {
@@ -117,7 +124,7 @@ TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted)
opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> sortQuery = std::move(statusWithCQ.getValue());
- ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery));
+ ASSERT_NOT_EQUALS(makeKey(*query), makeKey(*sortQuery));
}
// Regression test for SERVER-17158.
@@ -136,7 +143,7 @@ TEST(PlanCacheCommandsTest, SortsAreProperlyDelimitedInPlanCacheKey) {
opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> sortQuery2 = std::move(statusWithCQ.getValue());
- ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
+ ASSERT_NOT_EQUALS(makeKey(*sortQuery1), makeKey(*sortQuery2));
}
TEST(PlanCacheCommandsTest, ProjectQueryResultsInDifferentPlanCacheKeyFromUnprojected) {
@@ -153,7 +160,7 @@ TEST(PlanCacheCommandsTest, ProjectQueryResultsInDifferentPlanCacheKeyFromUnproj
opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> projectionQuery = std::move(statusWithCQ.getValue());
- ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
+ ASSERT_NOT_EQUALS(makeKey(*query), makeKey(*projectionQuery));
}
} // namespace
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 6cf9497d4d2..211ddbdd4da 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/query/classic_plan_cache.h"
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/query/explain.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_yield_policy.h"
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_planner.h"
@@ -206,8 +207,9 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
if (shouldCache) {
// Deactivate the current cache entry.
- PlanCache* cache = CollectionQueryInfo::get(collection()).getPlanCache();
- cache->deactivate(*_canonicalQuery);
+ const auto& coll = collection();
+ auto cache = CollectionQueryInfo::get(coll).getPlanCache();
+ cache->deactivate(plan_cache_key_factory::make<PlanCacheKey>(*_canonicalQuery, coll));
}
// Use the query planning module to plan the whole query.
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 3d1b2bea611..d1f7dfc9e64 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/query/classic_plan_cache.h"
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/query/explain.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/plan_ranker_util.h"
#include "mongo/logv2/log.h"
@@ -130,9 +131,9 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
LOGV2_DEBUG(20588, 5, "Best plan errored, switching to backup plan");
- // Attempt to remove the plan from the cache. This will fail if the plan has already been
- // removed, and we intentionally ignore such errors.
- CollectionQueryInfo::get(collection()).getPlanCache()->remove(*_query).ignore();
+ CollectionQueryInfo::get(collection())
+ .getPlanCache()
+ ->remove(plan_cache_key_factory::make<PlanCacheKey>(*_query, collection()));
_bestPlanIdx = _backupPlanIdx;
_backupPlanIdx = kNoSuchPlan;
diff --git a/src/mongo/db/exec/plan_cache_util.h b/src/mongo/db/exec/plan_cache_util.h
index 6b13804363a..5ef7cd8fd45 100644
--- a/src/mongo/db/exec/plan_cache_util.h
+++ b/src/mongo/db/exec/plan_cache_util.h
@@ -32,6 +32,7 @@
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/collection_query_info.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_explainer_factory.h"
#include "mongo/db/query/sbe_plan_ranker.h"
@@ -156,7 +157,7 @@ void updatePlanCache(
// Store the choice we just made in the cache, if the query is of a type that is safe to
// cache.
- if (PlanCache::shouldCacheQuery(query) && canCache) {
+ if (shouldCacheQuery(query) && canCache) {
// Create list of candidate solutions for the cache with the best solution at the front.
std::vector<QuerySolution*> solutions;
@@ -191,13 +192,16 @@ void updatePlanCache(
invariant(solutions[0]->cacheData);
auto plannerDataForCache = solutions[0]->cacheData->clone();
+ PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{query};
uassertStatusOK(CollectionQueryInfo::get(collection)
.getPlanCache()
- ->set(query,
+ ->set(plan_cache_key_factory::make<PlanCacheKey>(query, collection),
std::move(plannerDataForCache),
solutions,
std::move(ranking),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
+ opCtx->getServiceContext()->getPreciseClockSource()->now(),
+ boost::none, /* worksGrowthCoefficient */
+ &callbacks));
}
}
}
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 82e25f2907f..30f61103b4b 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/query/get_executor.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/planner_access.h"
#include "mongo/db/query/planner_analysis.h"
@@ -166,11 +167,17 @@ Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// Dismiss the requirement that no indices can be dropped when this method returns.
ON_BLOCK_EXIT([this] { releaseAllIndicesRequirement(); });
+ std::function<PlanCacheKey(const CanonicalQuery& cq, const CollectionPtr& coll)>
+ createPlanCacheKey = [](const CanonicalQuery& cq, const CollectionPtr& coll) {
+ return plan_cache_key_factory::make<PlanCacheKey>(cq, coll);
+ };
+
// Plan each branch of the $or.
auto subplanningStatus =
QueryPlanner::planSubqueries(expCtx()->opCtx,
- collection(),
CollectionQueryInfo::get(collection()).getPlanCache(),
+ createPlanCacheKey,
+ collection(),
*_query,
_plannerParams);
if (!subplanningStatus.isOK()) {
diff --git a/src/mongo/db/pipeline/plan_explainer_pipeline.cpp b/src/mongo/db/pipeline/plan_explainer_pipeline.cpp
index 947080867ae..072643fd7e6 100644
--- a/src/mongo/db/pipeline/plan_explainer_pipeline.cpp
+++ b/src/mongo/db/pipeline/plan_explainer_pipeline.cpp
@@ -114,7 +114,7 @@ std::vector<PlanExplainer::PlanStatsDetails> PlanExplainerPipeline::getRejectedP
}
std::vector<PlanExplainer::PlanStatsDetails> PlanExplainerPipeline::getCachedPlanStats(
- const PlanCacheEntry::DebugInfo&, ExplainOptions::Verbosity) const {
+ const plan_cache_debug_info::DebugInfo&, ExplainOptions::Verbosity) const {
// Pipelines are not cached, so we should never try to rebuild the stats from a cached entry.
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/pipeline/plan_explainer_pipeline.h b/src/mongo/db/pipeline/plan_explainer_pipeline.h
index e964b75e45b..ae2dca8239f 100644
--- a/src/mongo/db/pipeline/plan_explainer_pipeline.h
+++ b/src/mongo/db/pipeline/plan_explainer_pipeline.h
@@ -51,7 +51,7 @@ public:
PlanStatsDetails getWinningPlanTrialStats() const final;
std::vector<PlanStatsDetails> getRejectedPlansStats(
ExplainOptions::Verbosity verbosity) const final;
- std::vector<PlanStatsDetails> getCachedPlanStats(const PlanCacheEntry::DebugInfo&,
+ std::vector<PlanStatsDetails> getCachedPlanStats(const plan_cache_debug_info::DebugInfo&,
ExplainOptions::Verbosity) const final;
void incrementNReturned() {
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 682b1a26173..2fe3d905387 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -43,8 +43,9 @@ env.Library(
"index_entry.cpp",
"index_tag.cpp",
"interval.cpp",
+ "plan_cache_callbacks.cpp",
+ "plan_cache_debug_info.cpp",
"plan_cache_indexability.cpp",
- "plan_cache.cpp",
"plan_enumerator.cpp",
"planner_access.cpp",
"planner_analysis.cpp",
diff --git a/src/mongo/db/query/canonical_query_encoder_test.cpp b/src/mongo/db/query/canonical_query_encoder_test.cpp
index d925fc227b7..df211c592b4 100644
--- a/src/mongo/db/query/canonical_query_encoder_test.cpp
+++ b/src/mongo/db/query/canonical_query_encoder_test.cpp
@@ -29,10 +29,12 @@
#include "mongo/db/query/canonical_query_encoder.h"
+#include "mongo/db/catalog/collection_mock.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/query_test_service_context.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
@@ -45,6 +47,11 @@ using std::unique_ptr;
static const NamespaceString nss("testdb.testcoll");
+PlanCacheKey makeKey(const CanonicalQuery& cq) {
+ CollectionMock coll(nss);
+ return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll);
+}
+
/**
* Utility functions to create a CanonicalQuery
*/
@@ -223,7 +230,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyGeoWithin) {
canonicalize("{a: {$geoWithin: "
"{$geometry: {type: 'Polygon', coordinates: "
"[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqLegacy), planCache.computeKey(*cqNew));
+ ASSERT_NOT_EQUALS(makeKey(*cqLegacy), makeKey(*cqNew));
}
// GEO_NEAR cache keys should include information on geometry and CRS in addition
diff --git a/src/mongo/db/query/classic_plan_cache.cpp b/src/mongo/db/query/classic_plan_cache.cpp
index 4afe2264c3f..3acad6c9510 100644
--- a/src/mongo/db/query/classic_plan_cache.cpp
+++ b/src/mongo/db/query/classic_plan_cache.cpp
@@ -129,4 +129,39 @@ std::string SolutionCacheData::toString() const {
MONGO_UNREACHABLE;
}
+bool shouldCacheQuery(const CanonicalQuery& query) {
+ const FindCommandRequest& findCommand = query.getFindCommandRequest();
+ const MatchExpression* expr = query.root();
+
+ if (!query.getSortPattern() && expr->matchType() == MatchExpression::AND &&
+ expr->numChildren() == 0) {
+ return false;
+ }
+
+ if (!findCommand.getHint().isEmpty()) {
+ return false;
+ }
+
+ if (!findCommand.getMin().isEmpty()) {
+ return false;
+ }
+
+ if (!findCommand.getMax().isEmpty()) {
+ return false;
+ }
+
+ // We don't read or write from the plan cache for explain. This ensures that explain queries
+ // don't affect cache state, and it also makes sure that we can always generate information
+ // regarding rejected plans and/or trial period execution of candidate plans.
+ if (query.getExplain()) {
+ return false;
+ }
+
+ // Tailable cursors won't get cached, just turn into collscans.
+ if (query.getFindCommandRequest().getTailable()) {
+ return false;
+ }
+
+ return true;
+}
} // namespace mongo
diff --git a/src/mongo/db/query/classic_plan_cache.h b/src/mongo/db/query/classic_plan_cache.h
index 93770fcb139..daeca87857e 100644
--- a/src/mongo/db/query/classic_plan_cache.h
+++ b/src/mongo/db/query/classic_plan_cache.h
@@ -32,9 +32,12 @@
#include <string>
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/canonical_query_encoder.h"
+#include "mongo/db/query/index_entry.h"
#include "mongo/db/query/plan_cache.h"
namespace mongo {
+
/**
* Represents the "key" used in the PlanCache mapping from query shape -> query plan.
*/
@@ -272,4 +275,9 @@ struct BudgetEstimator {
using PlanCache =
PlanCacheBase<PlanCacheKey, SolutionCacheData, BudgetEstimator, PlanCacheKeyHasher>;
+/**
+ * We don't want to cache every possible query. This function encapsulates the criteria for what
+ * makes a canonical query suitable for inclusion in the cache.
+ */
+bool shouldCacheQuery(const CanonicalQuery& query);
} // namespace mongo
diff --git a/src/mongo/db/query/collection_query_info.cpp b/src/mongo/db/query/collection_query_info.cpp
index 4abf7b82ecb..b4fb718964e 100644
--- a/src/mongo/db/query/collection_query_info.cpp
+++ b/src/mongo/db/query/collection_query_info.cpp
@@ -77,20 +77,23 @@ CoreIndexInfo indexInfoFromIndexCatalogEntry(const IndexCatalogEntry& ice) {
projExec};
}
+std::shared_ptr<PlanCache> makePlanCache() {
+ return std::make_shared<PlanCache>(
+ PlanCache::BudgetTracker(internalQueryCacheMaxEntriesPerCollection.load()));
+}
+
} // namespace
-CollectionQueryInfo::CollectionQueryInfo() : _keysComputed(false), _planCache(makePlanCache()) {}
+CollectionQueryInfo::CollectionQueryInfo()
+ : _keysComputed(false),
+ _planCacheIndexabilityState(std::make_shared<PlanCacheIndexabilityState>()),
+ _planCache(makePlanCache()) {}
const UpdateIndexData& CollectionQueryInfo::getIndexKeys(OperationContext* opCtx) const {
invariant(_keysComputed);
return _indexedPaths;
}
-std::shared_ptr<PlanCache> CollectionQueryInfo::makePlanCache() {
- return std::make_shared<PlanCache>(
- PlanCache::BudgetTracker(internalQueryCacheMaxEntriesPerCollection.load()));
-}
-
void CollectionQueryInfo::computeIndexKeys(OperationContext* opCtx, const CollectionPtr& coll) {
_indexedPaths.clear();
@@ -197,7 +200,6 @@ void CollectionQueryInfo::clearQueryCache(OperationContext* opCtx, const Collect
"Clearing plan cache - collection info cache reinstantiated",
"namespace"_attr = coll->ns());
- _planCache = makePlanCache();
updatePlanCacheIndexEntries(opCtx, coll);
}
}
@@ -214,6 +216,10 @@ PlanCache* CollectionQueryInfo::getPlanCache() const {
return _planCache.get();
}
+const PlanCacheIndexabilityState& CollectionQueryInfo::getPlanCacheIndexabilityState() const {
+ return *_planCacheIndexabilityState;
+}
+
void CollectionQueryInfo::updatePlanCacheIndexEntries(OperationContext* opCtx,
const CollectionPtr& coll) {
std::vector<CoreIndexInfo> indexCores;
@@ -228,7 +234,9 @@ void CollectionQueryInfo::updatePlanCacheIndexEntries(OperationContext* opCtx,
indexCores.emplace_back(indexInfoFromIndexCatalogEntry(*ice));
}
- _planCache->notifyOfIndexUpdates(indexCores);
+ _planCache = makePlanCache();
+ _planCacheIndexabilityState = std::make_shared<PlanCacheIndexabilityState>();
+ _planCacheIndexabilityState->updateDiscriminators(indexCores);
}
void CollectionQueryInfo::init(OperationContext* opCtx, const CollectionPtr& coll) {
@@ -245,8 +253,6 @@ void CollectionQueryInfo::init(OperationContext* opCtx, const CollectionPtr& col
}
void CollectionQueryInfo::rebuildIndexData(OperationContext* opCtx, const CollectionPtr& coll) {
- _planCache = makePlanCache();
-
_keysComputed = false;
computeIndexKeys(opCtx, coll);
updatePlanCacheIndexEntries(opCtx, coll);
diff --git a/src/mongo/db/query/collection_query_info.h b/src/mongo/db/query/collection_query_info.h
index ac7438b3803..017bbc96948 100644
--- a/src/mongo/db/query/collection_query_info.h
+++ b/src/mongo/db/query/collection_query_info.h
@@ -31,6 +31,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/query/classic_plan_cache.h"
+#include "mongo/db/query/plan_cache_indexability.h"
#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/db/update_index_data.h"
@@ -62,6 +63,11 @@ public:
*/
PlanCache* getPlanCache() const;
+ /**
+ * Get the "indexability discriminators" used in the PlanCache for generating plan cache keys.
+ */
+ const PlanCacheIndexabilityState& getPlanCacheIndexabilityState() const;
+
/* get set of index keys for this namespace. handy to quickly check if a given
field is indexed (Note it might be a secondary component of a compound index.)
*/
@@ -99,7 +105,6 @@ public:
const PlanSummaryStats& summaryStats) const;
private:
- static std::shared_ptr<PlanCache> makePlanCache();
void computeIndexKeys(OperationContext* opCtx, const CollectionPtr& coll);
void updatePlanCacheIndexEntries(OperationContext* opCtx, const CollectionPtr& coll);
@@ -107,6 +112,10 @@ private:
bool _keysComputed;
UpdateIndexData _indexedPaths;
+ // Holds computed information about the collection's indexes. Used for generating plan
+ // cache keys.
+ std::shared_ptr<PlanCacheIndexabilityState> _planCacheIndexabilityState;
+
// A cache for query plans. Shared across cloned Collection instances.
std::shared_ptr<PlanCache> _planCache;
};
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 53fd3c00f06..92211645bce 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/query/explain_common.h"
#include "mongo/db/query/get_executor.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/plan_executor_impl.h"
#include "mongo/db/query/plan_executor_sbe.h"
@@ -93,11 +94,10 @@ void generatePlannerInfo(PlanExecutor* exec,
if (collection && exec->getCanonicalQuery()) {
const QuerySettings* querySettings =
QuerySettingsDecoration::get(collection->getSharedDecorations());
- PlanCacheKey planCacheKey = CollectionQueryInfo::get(collection)
- .getPlanCache()
- ->computeKey(*exec->getCanonicalQuery());
- planCacheKeyHash = canonical_query_encoder::computeHash(planCacheKey.toString());
- queryHash = canonical_query_encoder::computeHash(planCacheKey.getStableKeyStringData());
+ const PlanCacheKey planCacheKey =
+ plan_cache_key_factory::make<PlanCacheKey>(*exec->getCanonicalQuery(), collection);
+ planCacheKeyHash = planCacheKey.planCacheKeyHash();
+ queryHash = planCacheKey.queryHash();
if (auto allowedIndicesFilter =
querySettings->getAllowedIndicesFilter(planCacheKey.getStableKey())) {
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index be4e57a46da..0db45b2aa7d 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -70,6 +70,7 @@
#include "mongo/db/query/explain.h"
#include "mongo/db/query/index_bounds_builder.h"
#include "mongo/db/query/internal_plans.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_executor_factory.h"
#include "mongo/db/query/planner_access.h"
#include "mongo/db/query/planner_analysis.h"
@@ -563,16 +564,14 @@ public:
}
// Fill in some opDebug information.
- const auto planCacheKey =
- CollectionQueryInfo::get(_collection).getPlanCache()->computeKey(*_cq);
- CurOp::get(_opCtx)->debug().queryHash =
- canonical_query_encoder::computeHash(planCacheKey.getStableKeyStringData());
+ const PlanCacheKey planCacheKey =
+ plan_cache_key_factory::make<PlanCacheKey>(*_cq, _collection);
+ CurOp::get(_opCtx)->debug().queryHash = planCacheKey.queryHash();
// Check that the query should be cached.
- if (CollectionQueryInfo::get(_collection).getPlanCache()->shouldCacheQuery(*_cq)) {
+ if (shouldCacheQuery(*_cq)) {
// Fill in the 'planCacheKey' too if the query is actually being cached.
- CurOp::get(_opCtx)->debug().planCacheKey =
- canonical_query_encoder::computeHash(planCacheKey.toString());
+ CurOp::get(_opCtx)->debug().planCacheKey = planCacheKey.planCacheKeyHash();
// Try to look up a cached solution for the query.
if (auto cs = CollectionQueryInfo::get(_collection)
diff --git a/src/mongo/db/query/lru_key_value.h b/src/mongo/db/query/lru_key_value.h
index abb5cf475fd..3d2430c1c74 100644
--- a/src/mongo/db/query/lru_key_value.h
+++ b/src/mongo/db/query/lru_key_value.h
@@ -202,18 +202,19 @@ public:
/**
* Remove the kv-store entry keyed by 'key'.
+ * Returns false if there doesn't exist such 'key', otherwise returns true.
*/
- Status remove(const K& key) {
+ bool remove(const K& key) {
KVMapConstIt i = _kvMap.find(key);
if (i == _kvMap.end()) {
- return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
+ return false;
}
KVListIt found = i->second;
_budgetTracker.onRemove(*i->second->second);
delete found->second;
_kvMap.erase(i);
_kvList.erase(found);
- return Status::OK();
+ return true;
}
/**
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 8fcc4522c50..64b1db6a34c 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -29,65 +29,13 @@
#pragma once
-#include <boost/optional/optional.hpp>
-#include <set>
-
-#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/query/canonical_query_encoder.h"
#include "mongo/db/query/lru_key_value.h"
-#include "mongo/db/query/plan_cache_indexability.h"
-#include "mongo/db/query/plan_ranking_decision.h"
-#include "mongo/db/query/query_planner_params.h"
-#include "mongo/platform/atomic_word.h"
+#include "mongo/db/query/plan_cache_callbacks.h"
+#include "mongo/db/query/plan_cache_debug_info.h"
#include "mongo/platform/mutex.h"
#include "mongo/util/container_size_helper.h"
namespace mongo {
-
-namespace plan_cache_detail {
-/**
- * Serializes indexability discriminators, appending them to keyBuilder. This function is used
- * during the computation of a query's plan cache key to ensure that two queries with different
- * index eligibilities will have different cache keys.
- */
-void encodeIndexability(const MatchExpression* tree,
- const PlanCacheIndexabilityState& indexabilityState,
- StringBuilder* keyBuilder);
-} // namespace plan_cache_detail
-
-// The logging facility enforces the rule that logging should not be done in a header file. Since
-// template classes and functions below must be defined in the header file and since they use the
-// logging facility, we have to define the helper functions below to perform the actual logging
-// operation from template code.
-namespace log_detail {
-void logInactiveCacheEntry(const std::string& key);
-void logCacheEviction(NamespaceString nss, std::string&& evictedEntry);
-void logCreateInactiveCacheEntry(std::string&& query,
- std::string&& queryHash,
- std::string&& planCacheKey,
- size_t newWorks);
-void logReplaceActiveCacheEntry(std::string&& query,
- std::string&& queryHash,
- std::string&& planCacheKey,
- size_t works,
- size_t newWorks);
-void logNoop(std::string&& query,
- std::string&& queryHash,
- std::string&& planCacheKey,
- size_t works,
- size_t newWorks);
-void logIncreasingWorkValue(std::string&& query,
- std::string&& queryHash,
- std::string&& planCacheKey,
- size_t works,
- size_t increasedWorks);
-void logPromoteCacheEntry(std::string&& query,
- std::string&& queryHash,
- std::string&& planCacheKey,
- size_t works,
- size_t newWorks);
-} // namespace log_detail
-
class QuerySolution;
struct QuerySolutionNode;
@@ -123,99 +71,17 @@ public:
template <class CachedPlanType>
class PlanCacheEntryBase {
public:
- /**
- * A description of the query from which a 'PlanCacheEntryBase' was created.
- */
- struct CreatedFromQuery {
- /**
- * Returns an estimate of the size of this object, including the memory allocated elsewhere
- * that it owns, in bytes.
- */
- uint64_t estimateObjectSizeInBytes() const {
- uint64_t size = 0;
- size += filter.objsize();
- size += sort.objsize();
- size += projection.objsize();
- size += collation.objsize();
- return size;
- }
-
- std::string debugString() const {
- return str::stream() << "query: " << filter.toString() << "; sort: " << sort.toString()
- << "; projection: " << projection.toString()
- << "; collation: " << collation.toString();
- }
-
- BSONObj filter;
- BSONObj sort;
- BSONObj projection;
- BSONObj collation;
- };
-
- /**
- * Per-plan cache entry information that is used strictly as debug information (e.g. is intended
- * for display by the $planCacheStats aggregation source). In order to save memory, this
- * information is sometimes discarded instead of kept in the plan cache entry. Therefore, this
- * information may not be used for any purpose outside displaying debug info, such as recovering
- * a plan from the cache or determining whether or not the cache entry is active.
- */
- struct DebugInfo {
- DebugInfo(CreatedFromQuery createdFromQuery,
- std::unique_ptr<const plan_ranker::PlanRankingDecision> decision)
- : createdFromQuery(std::move(createdFromQuery)), decision(std::move(decision)) {
- invariant(this->decision);
- }
-
- /**
- * 'DebugInfo' is copy-constructible, copy-assignable, move-constructible, and
- * move-assignable.
- */
- DebugInfo(const DebugInfo& other)
- : createdFromQuery(other.createdFromQuery), decision(other.decision->clone()) {}
-
- DebugInfo& operator=(const DebugInfo& other) {
- createdFromQuery = other.createdFromQuery;
- decision = other.decision->clone();
- return *this;
- }
-
- DebugInfo(DebugInfo&&) = default;
- DebugInfo& operator=(DebugInfo&&) = default;
-
- ~DebugInfo() = default;
-
- /**
- * Returns an estimate of the size of this object, including the memory allocated elsewhere
- * that it owns, in bytes.
- */
- uint64_t estimateObjectSizeInBytes() const {
- uint64_t size = 0;
- size += createdFromQuery.estimateObjectSizeInBytes();
- size += decision->estimateObjectSizeInBytes();
- return size;
- }
-
- CreatedFromQuery createdFromQuery;
-
- // Information that went into picking the winning plan and also why the other plans lost.
- // Never nullptr.
- std::unique_ptr<const plan_ranker::PlanRankingDecision> decision;
- };
-
- /**
- * Create a new PlanCacheEntrBase.
- * Grabs any planner-specific data required from the solutions.
- */
+ template <typename KeyType>
static std::unique_ptr<PlanCacheEntryBase<CachedPlanType>> create(
const std::vector<QuerySolution*>& solutions,
std::unique_ptr<const plan_ranker::PlanRankingDecision> decision,
- const CanonicalQuery& query,
std::unique_ptr<CachedPlanType> cachedPlan,
uint32_t queryHash,
uint32_t planCacheKey,
Date_t timeOfCreation,
bool isActive,
- size_t works) {
+ size_t works,
+ const PlanCacheCallbacks<KeyType, CachedPlanType>* callbacks) {
invariant(decision);
// If the cumulative size of the plan caches is estimated to remain within a predefined
@@ -226,25 +92,9 @@ public:
const bool includeDebugInfo = planCacheTotalSizeEstimateBytes.get() <
internalQueryCacheMaxSizeBytesBeforeStripDebugInfo.load();
- boost::optional<DebugInfo> debugInfo;
- if (includeDebugInfo) {
- // Strip projections on $-prefixed fields, as these are added by internal callers of the
- // system and are not considered part of the user projection.
- const FindCommandRequest& findCommand = query.getFindCommandRequest();
- BSONObjBuilder projBuilder;
- for (auto elem : findCommand.getProjection()) {
- if (elem.fieldName()[0] == '$') {
- continue;
- }
- projBuilder.append(elem);
- }
-
- CreatedFromQuery createdFromQuery{
- findCommand.getFilter(),
- findCommand.getSort(),
- projBuilder.obj(),
- query.getCollator() ? query.getCollator()->getSpec().toBSON() : BSONObj()};
- debugInfo.emplace(std::move(createdFromQuery), std::move(decision));
+ boost::optional<plan_cache_debug_info::DebugInfo> debugInfo;
+ if (includeDebugInfo && callbacks) {
+ debugInfo.emplace(callbacks->buildDebugInfo(std::move(decision)));
}
return std::unique_ptr<PlanCacheEntryBase<CachedPlanType>>(
@@ -265,7 +115,7 @@ public:
* Make a deep copy.
*/
std::unique_ptr<PlanCacheEntryBase<CachedPlanType>> clone() const {
- boost::optional<DebugInfo> debugInfoCopy;
+ boost::optional<plan_cache_debug_info::DebugInfo> debugInfoCopy;
if (debugInfo) {
debugInfoCopy.emplace(*debugInfo);
}
@@ -321,7 +171,7 @@ public:
//
// Once the estimated cumulative size of the mongod's plan caches exceeds a threshold, this
// debug info is omitted from new plan cache entries.
- const boost::optional<DebugInfo> debugInfo;
+ const boost::optional<plan_cache_debug_info::DebugInfo> debugInfo;
// An estimate of the size in bytes of this plan cache entry. This is the "deep size",
// calculated by recursively incorporating the size of owned objects, the objects that they in
@@ -343,7 +193,7 @@ private:
uint32_t planCacheKey,
bool isActive,
size_t works,
- boost::optional<DebugInfo> debugInfo)
+ boost::optional<plan_cache_debug_info::DebugInfo> debugInfo)
: cachedPlan(std::move(cachedPlan)),
timeOfCreation(timeOfCreation),
queryHash(queryHash),
@@ -375,9 +225,11 @@ private:
};
/**
- * Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution)
- * mapping, the cache contains information on why that mapping was made and statistics on the
- * cache entry's actual performance on subsequent runs.
+ * A data structure for caching execution plans, to avoid repeatedly performing query optimization
+ * and plan compilation on each invocation of a query. The cache is logically a mapping from
+ * 'KeyType' to 'CachedPlanType'. The cache key is derived from the query, and can be used to
+ * determine whether a cached plan is available. The cache has an LRU replacement policy, so it only
+ * keeps the most recently used plans.
*/
template <class KeyType,
class CachedPlanType,
@@ -419,55 +271,6 @@ public:
std::unique_ptr<CachedPlanHolder<CachedPlanType>> cachedPlanHolder;
};
- /**
- * We don't want to cache every possible query. This function
- * encapsulates the criteria for what makes a canonical query
- * suitable for lookup/inclusion in the cache.
- */
- static bool shouldCacheQuery(const CanonicalQuery& query) {
- const FindCommandRequest& findCommand = query.getFindCommandRequest();
- const MatchExpression* expr = query.root();
-
- // Collection scan
- // No sort order requested
- if (!query.getSortPattern() && expr->matchType() == MatchExpression::AND &&
- expr->numChildren() == 0) {
- return false;
- }
-
- // Hint provided
- if (!findCommand.getHint().isEmpty()) {
- return false;
- }
-
- // Min provided
- // Min queries are a special case of hinted queries.
- if (!findCommand.getMin().isEmpty()) {
- return false;
- }
-
- // Max provided
- // Similar to min, max queries are a special case of hinted queries.
- if (!findCommand.getMax().isEmpty()) {
- return false;
- }
-
- // We don't read or write from the plan cache for explain. This ensures
- // that explain queries don't affect cache state, and it also makes
- // sure that we can always generate information regarding rejected plans
- // and/or trial period execution of candidate plans.
- if (query.getExplain()) {
- return false;
- }
-
- // Tailable cursors won't get cached, just turn into collscans.
- if (query.getFindCommandRequest().getTailable()) {
- return false;
- }
-
- return true;
- }
-
PlanCacheBase(size_t size) : PlanCacheBase(BudgetTracker(size)) {}
PlanCacheBase(BudgetTracker&& budgetTracker) : _cache{std::move(budgetTracker)} {}
@@ -486,14 +289,18 @@ public:
* an inactive cache entry. If boost::none is provided, the function will use
* 'internalQueryCacheWorksGrowthCoefficient'.
*
+ * A 'callbacks' argument can be provided to perform some custom actions when the state of the
+ * plan cache or a plan cache entry has been changed.
+ *
* If the mapping was set successfully, returns Status::OK(), even if it evicted another entry.
*/
- Status set(const CanonicalQuery& query,
+ Status set(const KeyType& key,
std::unique_ptr<CachedPlanType> cachedPlan,
const std::vector<QuerySolution*>& solns,
std::unique_ptr<plan_ranker::PlanRankingDecision> why,
Date_t now,
- boost::optional<double> worksGrowthCoefficient = boost::none) {
+ boost::optional<double> worksGrowthCoefficient = boost::none,
+ const PlanCacheCallbacks<KeyType, CachedPlanType>* callbacks = nullptr) {
invariant(why);
invariant(cachedPlan);
@@ -528,56 +335,57 @@ public:
details.candidatePlanStats[0].get());
}},
why->stats);
- const auto key = computeKey(query);
+
stdx::lock_guard<Latch> cacheLock(_cacheMutex);
- bool isNewEntryActive = false;
- uint32_t queryHash;
- uint32_t planCacheKey;
- if (internalQueryCacheDisableInactiveEntries.load()) {
- // All entries are always active.
- isNewEntryActive = true;
- planCacheKey = key.planCacheKeyHash();
- queryHash = key.queryHash();
- } else {
- Entry* oldEntry = nullptr;
- Status cacheStatus = _cache.get(key, &oldEntry);
- invariant(cacheStatus.isOK() || cacheStatus == ErrorCodes::NoSuchKey);
- if (oldEntry) {
- queryHash = oldEntry->queryHash;
- planCacheKey = oldEntry->planCacheKey;
+ auto [queryHash, planCacheKey, isNewEntryActive, shouldBeCreated] = [&]() {
+ if (internalQueryCacheDisableInactiveEntries.load()) {
+ // All entries are always active.
+ return std::make_tuple(key.queryHash(),
+ key.planCacheKeyHash(),
+ true /* isNewEntryActive */,
+ true /* shouldBeCreated */);
} else {
- planCacheKey = key.planCacheKeyHash();
- queryHash = key.queryHash();
+ Entry* oldEntry = nullptr;
+ Status cacheStatus = _cache.get(key, &oldEntry);
+ invariant(cacheStatus.isOK() || cacheStatus == ErrorCodes::NoSuchKey);
+
+ const auto newState = getNewEntryState(
+ key,
+ oldEntry,
+ newWorks,
+ worksGrowthCoefficient.get_value_or(internalQueryCacheWorksGrowthCoefficient),
+ callbacks);
+
+ // Avoid recomputing the hashes if we've got an old entry to grab them from.
+ return oldEntry ? std::make_tuple(oldEntry->queryHash,
+ oldEntry->planCacheKey,
+ newState.shouldBeActive,
+ newState.shouldBeCreated)
+ : std::make_tuple(key.queryHash(),
+ key.planCacheKeyHash(),
+ newState.shouldBeActive,
+ newState.shouldBeCreated);
}
+ }();
- const auto newState = getNewEntryState(
- query,
- queryHash,
- planCacheKey,
- oldEntry,
- newWorks,
- worksGrowthCoefficient.get_value_or(internalQueryCacheWorksGrowthCoefficient));
-
- if (!newState.shouldBeCreated) {
- return Status::OK();
- }
- isNewEntryActive = newState.shouldBeActive;
+ if (!shouldBeCreated) {
+ return Status::OK();
}
auto newEntry(Entry::create(solns,
std::move(why),
- query,
std::move(cachedPlan),
queryHash,
planCacheKey,
now,
isNewEntryActive,
- newWorks));
+ newWorks,
+ callbacks));
auto evictedEntry = _cache.add(key, newEntry.release());
- if (nullptr != evictedEntry.get()) {
- log_detail::logCacheEviction(query.nss(), evictedEntry->debugString());
+ if (evictedEntry && callbacks) {
+ callbacks->onCacheEviction(*evictedEntry);
}
return Status::OK();
@@ -588,13 +396,12 @@ public:
* when the associated plan starts to perform poorly, we deactivate it, so that plans which
* perform even worse than the one already in the cache may not easily take its place.
*/
- void deactivate(const CanonicalQuery& query) {
+ void deactivate(const KeyType& key) {
if (internalQueryCacheDisableInactiveEntries.load()) {
// This is a noop if inactive entries are disabled.
return;
}
- KeyType key = computeKey(query);
stdx::lock_guard<Latch> cacheLock(_cacheMutex);
Entry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
@@ -607,18 +414,6 @@ public:
}
/**
- * Look up the cached data access for the provided 'query'. Used by the query planner
- * to shortcut planning.
- *
- * The return value will provide the "state" of the cache entry, as well as the CachedSolution
- * for the query (if there is one).
- */
- GetResult get(const CanonicalQuery& query) const {
- KeyType key = computeKey(query);
- return get(key);
- }
-
- /**
* Look up the cached data access for the provided key. Circumvents the recalculation
* of a plan cache key.
*
@@ -656,12 +451,12 @@ public:
}
/**
- * Remove the entry corresponding to 'cq' from the cache. Returns Status::OK() if the plan
- * was present and removed and an error status otherwise.
+ * Remove the entry with the 'key' from the cache. If there is no entry for the given key in
+ * the cache, this call is a no-op.
*/
- Status remove(const CanonicalQuery& cq) {
+ void remove(const KeyType& key) {
stdx::lock_guard<Latch> cacheLock(_cacheMutex);
- return _cache.remove(computeKey(cq));
+ [[maybe_unused]] auto ret = _cache.remove(key);
}
/**
@@ -673,33 +468,11 @@ public:
}
/**
- * Get the cache key corresponding to the given canonical query. The query need not already
- * be cached.
- *
- * This is provided in the public API simply as a convenience for consumers who need some
- * description of query shape (e.g. index filters).
- *
- * Callers must hold the collection lock when calling this method.
- */
- KeyType computeKey(const CanonicalQuery& cq) const {
- const auto shapeString = cq.encodeKey();
-
- StringBuilder indexabilityKeyBuilder;
- plan_cache_detail::encodeIndexability(
- cq.root(), _indexabilityState, &indexabilityKeyBuilder);
- return KeyType(std::move(shapeString),
- indexabilityKeyBuilder.str(),
- cq.getEnableSlotBasedExecutionEngine());
- }
-
- /**
- * Returns a copy of a cache entry, looked up by CanonicalQuery.
+ * Returns a copy of a cache entry, looked up by the plan cache key.
*
* If there is no entry in the cache for the 'query', returns an error Status.
*/
- StatusWith<std::unique_ptr<Entry>> getEntry(const CanonicalQuery& cq) const {
- KeyType key = computeKey(cq);
-
+ StatusWith<std::unique_ptr<Entry>> getEntry(const KeyType& key) const {
stdx::lock_guard<Latch> cacheLock(_cacheMutex);
Entry* entry;
Status cacheStatus = _cache.get(key, &entry);
@@ -737,16 +510,6 @@ public:
}
/**
- * Updates internal state kept about the collection's indexes. Must be called when the set
- * of indexes on the associated collection have changed.
- *
- * Callers must hold the collection lock in exclusive mode when calling this method.
- */
- void notifyOfIndexUpdates(const std::vector<CoreIndexInfo>& indexCores) {
- _indexabilityState.updateDiscriminators(indexCores);
- }
-
- /**
* Iterates over the plan cache. For each entry, serializes the PlanCacheEntryBase according to
* 'serializationFunc'. Returns a vector of all serialized entries which match 'filterFunc'.
*/
@@ -779,18 +542,16 @@ private:
* - We should create a new entry
* - The new entry should be marked 'active'
*/
- NewEntryState getNewEntryState(const CanonicalQuery& query,
- uint32_t queryHash,
- uint32_t planCacheKey,
+ NewEntryState getNewEntryState(const KeyType& key,
Entry* oldEntry,
size_t newWorks,
- double growthCoefficient) {
+ double growthCoefficient,
+ const PlanCacheCallbacks<KeyType, CachedPlanType>* callbacks) {
NewEntryState res;
if (!oldEntry) {
- log_detail::logCreateInactiveCacheEntry(query.toStringShort(),
- zeroPaddedHex(queryHash),
- zeroPaddedHex(planCacheKey),
- newWorks);
+ if (callbacks) {
+ callbacks->onCreateInactiveCacheEntry(key, oldEntry, newWorks);
+ }
res.shouldBeCreated = true;
res.shouldBeActive = false;
return res;
@@ -799,19 +560,15 @@ private:
if (oldEntry->isActive && newWorks <= oldEntry->works) {
// The new plan did better than the currently stored active plan. This case may
// occur if many MultiPlanners are run simultaneously.
- log_detail::logReplaceActiveCacheEntry(query.toStringShort(),
- zeroPaddedHex(queryHash),
- zeroPaddedHex(planCacheKey),
- oldEntry->works,
- newWorks);
+ if (callbacks) {
+ callbacks->onReplaceActiveCacheEntry(key, oldEntry, newWorks);
+ }
res.shouldBeCreated = true;
res.shouldBeActive = true;
} else if (oldEntry->isActive) {
- log_detail::logNoop(query.toStringShort(),
- zeroPaddedHex(queryHash),
- zeroPaddedHex(planCacheKey),
- oldEntry->works,
- newWorks);
+ if (callbacks) {
+ callbacks->onNoopActiveCacheEntry(key, oldEntry, newWorks);
+ }
// There is already an active cache entry with a lower works value.
// We do nothing.
res.shouldBeCreated = false;
@@ -827,11 +584,9 @@ private:
const double increasedWorks = std::max(
oldEntry->works + 1u, static_cast<size_t>(oldEntry->works * growthCoefficient));
- log_detail::logIncreasingWorkValue(query.toStringShort(),
- zeroPaddedHex(queryHash),
- zeroPaddedHex(planCacheKey),
- oldEntry->works,
- increasedWorks);
+ if (callbacks) {
+ callbacks->onIncreasingWorkValue(key, oldEntry, increasedWorks);
+ }
oldEntry->works = increasedWorks;
// Don't create a new entry.
@@ -840,11 +595,9 @@ private:
// This plan performed just as well or better than we expected, based on the
// inactive entry's works. We use this as an indicator that it's safe to
// cache (as an active entry) the plan this query used for the future.
- log_detail::logPromoteCacheEntry(query.toStringShort(),
- zeroPaddedHex(queryHash),
- zeroPaddedHex(planCacheKey),
- oldEntry->works,
- newWorks);
+ if (callbacks) {
+ callbacks->onPromoteCacheEntry(key, oldEntry, newWorks);
+ }
// We'll replace the old inactive entry with an active entry.
res.shouldBeCreated = true;
res.shouldBeActive = true;
@@ -857,12 +610,6 @@ private:
// Protects _cache.
mutable Mutex _cacheMutex = MONGO_MAKE_LATCH("PlanCache::_cacheMutex");
-
- // Holds computed information about the collection's indexes. Used for generating plan
- // cache keys.
- //
- // Concurrent access is synchronized by the collection lock. Multiple concurrent readers
- // are allowed.
- PlanCacheIndexabilityState _indexabilityState;
};
+
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache_callbacks.cpp
index d3284dca268..57303224309 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache_callbacks.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2018-present MongoDB, Inc.
+ * Copyright (C) 2021-present MongoDB, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the Server Side Public License, version 1,
@@ -29,14 +29,11 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-#include "mongo/db/query/plan_cache.h"
+#include "mongo/db/query/plan_cache_callbacks.h"
-#include "mongo/db/query/plan_cache_indexability.h"
-#include "mongo/db/query/planner_ixselect.h"
#include "mongo/logv2/log.h"
-namespace mongo {
-namespace log_detail {
+namespace mongo::log_detail {
void logInactiveCacheEntry(const std::string& key) {
LOGV2_DEBUG(
20936, 2, "Not using cached entry since it is inactive", "cacheKey"_attr = redact(key));
@@ -123,48 +120,39 @@ void logPromoteCacheEntry(std::string&& query,
"oldWorks"_attr = works,
"newWorks"_attr = newWorks);
}
-} // namespace log_detail
-namespace plan_cache_detail {
-// Delimiters for cache key encoding.
-const char kEncodeDiscriminatorsBegin = '<';
-const char kEncodeDiscriminatorsEnd = '>';
-
-void encodeIndexabilityForDiscriminators(const MatchExpression* tree,
- const IndexToDiscriminatorMap& discriminators,
- StringBuilder* keyBuilder) {
- for (auto&& indexAndDiscriminatorPair : discriminators) {
- *keyBuilder << indexAndDiscriminatorPair.second.isMatchCompatibleWithIndex(tree);
- }
+void logRemoveCacheEntry(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& projection,
+ const BSONObj& sort,
+ const BSONObj& collation) {
+ LOGV2_DEBUG(23907,
+ 1,
+ "{namespace}: Removed plan cache entry - {query}"
+ "(sort: {sort}; projection: {projection}; collation: {collation})",
+ "Removed plan cache entry",
+ "namespace"_attr = ns,
+ "query"_attr = redact(query),
+ "sort"_attr = sort,
+ "projection"_attr = projection,
+ "collation"_attr = collation);
}
-void encodeIndexability(const MatchExpression* tree,
- const PlanCacheIndexabilityState& indexabilityState,
- StringBuilder* keyBuilder) {
- if (!tree->path().empty()) {
- const IndexToDiscriminatorMap& discriminators =
- indexabilityState.getDiscriminators(tree->path());
- IndexToDiscriminatorMap wildcardDiscriminators =
- indexabilityState.buildWildcardDiscriminators(tree->path());
- if (!discriminators.empty() || !wildcardDiscriminators.empty()) {
- *keyBuilder << kEncodeDiscriminatorsBegin;
- // For each discriminator on this path, append the character '0' or '1'.
- encodeIndexabilityForDiscriminators(tree, discriminators, keyBuilder);
- encodeIndexabilityForDiscriminators(tree, wildcardDiscriminators, keyBuilder);
-
- *keyBuilder << kEncodeDiscriminatorsEnd;
- }
- } else if (tree->matchType() == MatchExpression::MatchType::NOT) {
- // If the node is not compatible with any type of index, add a single '0' discriminator
- // here. Otherwise add a '1'.
- *keyBuilder << kEncodeDiscriminatorsBegin;
- *keyBuilder << QueryPlannerIXSelect::logicalNodeMayBeSupportedByAnIndex(tree);
- *keyBuilder << kEncodeDiscriminatorsEnd;
- }
-
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- encodeIndexability(tree->getChild(i), indexabilityState, keyBuilder);
- }
+void logMissingCacheEntry(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& projection,
+ const BSONObj& sort,
+ const BSONObj& collation) {
+ LOGV2_DEBUG(23906,
+ 1,
+ "{namespace}: Query shape doesn't exist in PlanCache - {query}"
+ "(sort: {sort}; projection: {projection}; collation: {collation})",
+ "Query shape doesn't exist in PlanCache",
+ "namespace"_attr = ns,
+ "query"_attr = redact(query),
+ "sort"_attr = sort,
+ "projection"_attr = projection,
+ "collation"_attr = collation);
}
-} // namespace plan_cache_detail
-} // namespace mongo
+
+} // namespace mongo::log_detail
diff --git a/src/mongo/db/query/plan_cache_callbacks.h b/src/mongo/db/query/plan_cache_callbacks.h
new file mode 100644
index 00000000000..728000987f8
--- /dev/null
+++ b/src/mongo/db/query/plan_cache_callbacks.h
@@ -0,0 +1,184 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/query/plan_cache_debug_info.h"
+
+namespace mongo {
+// The logging facility enforces the rule that logging should not be done in a header file. Since
+// template classes and functions below must be defined in the header file and since they use the
+// logging facility, we have to define the helper functions below to perform the actual logging
+// operation from template code.
+namespace log_detail {
+void logInactiveCacheEntry(const std::string& key);
+void logCacheEviction(NamespaceString nss, std::string&& evictedEntry);
+void logCreateInactiveCacheEntry(std::string&& query,
+ std::string&& queryHash,
+ std::string&& planCacheKey,
+ size_t newWorks);
+void logReplaceActiveCacheEntry(std::string&& query,
+ std::string&& queryHash,
+ std::string&& planCacheKey,
+ size_t works,
+ size_t newWorks);
+void logNoop(std::string&& query,
+ std::string&& queryHash,
+ std::string&& planCacheKey,
+ size_t works,
+ size_t newWorks);
+void logIncreasingWorkValue(std::string&& query,
+ std::string&& queryHash,
+ std::string&& planCacheKey,
+ size_t works,
+ size_t increasedWorks);
+void logPromoteCacheEntry(std::string&& query,
+ std::string&& queryHash,
+ std::string&& planCacheKey,
+ size_t works,
+ size_t newWorks);
+} // namespace log_detail
+
+template <class CachedPlanType>
+class PlanCacheEntryBase;
+
+/**
+ * Encapsulates callback functions used to perform a custom action when the plan cache state
+ * changes.
+ */
+template <typename KeyType, typename CachedPlanType>
+class PlanCacheCallbacks {
+public:
+ virtual ~PlanCacheCallbacks() = default;
+
+ virtual void onCacheEviction(const PlanCacheEntryBase<CachedPlanType>& entry) const = 0;
+ virtual void onCreateInactiveCacheEntry(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const = 0;
+ virtual void onReplaceActiveCacheEntry(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const = 0;
+ virtual void onNoopActiveCacheEntry(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const = 0;
+ virtual void onIncreasingWorkValue(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const = 0;
+ virtual void onPromoteCacheEntry(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const = 0;
+ virtual plan_cache_debug_info::DebugInfo buildDebugInfo(
+ std::unique_ptr<const plan_ranker::PlanRankingDecision> decision) const = 0;
+};
+
+/**
+ * Simple logging callbacks for the plan cache.
+ */
+template <typename KeyType, typename CachedPlanType>
+class PlanCacheLoggingCallbacks : public PlanCacheCallbacks<KeyType, CachedPlanType> {
+public:
+ PlanCacheLoggingCallbacks(const CanonicalQuery& cq) : _cq{cq} {}
+
+ void onCacheEviction(const PlanCacheEntryBase<CachedPlanType>& entry) const final {
+ log_detail::logCacheEviction(_cq.nss(), entry.debugString());
+ }
+
+ void onCreateInactiveCacheEntry(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const final {
+ auto&& [queryHash, planCacheKey] = hashes(key, oldEntry);
+ log_detail::logCreateInactiveCacheEntry(
+ _cq.toStringShort(), std::move(queryHash), std::move(planCacheKey), newWorks);
+ }
+
+ void onReplaceActiveCacheEntry(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const final {
+ invariant(oldEntry);
+ auto&& [queryHash, planCacheKey] = hashes(key, oldEntry);
+ log_detail::logReplaceActiveCacheEntry(_cq.toStringShort(),
+ std::move(queryHash),
+ std::move(planCacheKey),
+ oldEntry->works,
+ newWorks);
+ }
+
+ void onNoopActiveCacheEntry(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const final {
+ invariant(oldEntry);
+ auto&& [queryHash, planCacheKey] = hashes(key, oldEntry);
+ log_detail::logNoop(_cq.toStringShort(),
+ std::move(queryHash),
+ std::move(planCacheKey),
+ oldEntry->works,
+ newWorks);
+ }
+
+ void onIncreasingWorkValue(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const final {
+ invariant(oldEntry);
+ auto&& [queryHash, planCacheKey] = hashes(key, oldEntry);
+ log_detail::logIncreasingWorkValue(_cq.toStringShort(),
+ std::move(queryHash),
+ std::move(planCacheKey),
+ oldEntry->works,
+ newWorks);
+ }
+
+ void onPromoteCacheEntry(const KeyType& key,
+ const PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ size_t newWorks) const final {
+ invariant(oldEntry);
+ auto&& [queryHash, planCacheKey] = hashes(key, oldEntry);
+ log_detail::logPromoteCacheEntry(_cq.toStringShort(),
+ std::move(queryHash),
+ std::move(planCacheKey),
+ oldEntry->works,
+ newWorks);
+ }
+
+ plan_cache_debug_info::DebugInfo buildDebugInfo(
+ std::unique_ptr<const plan_ranker::PlanRankingDecision> decision) const final {
+ return plan_cache_debug_info::buildDebugInfo(_cq, std::move(decision));
+ }
+
+private:
+ auto hashes(const KeyType& key, const PlanCacheEntryBase<CachedPlanType>* oldEntry) const {
+ // Avoid recomputing the hashes if we've got an old entry to grab them from.
+ return oldEntry
+ ? std::make_pair(zeroPaddedHex(oldEntry->queryHash),
+ zeroPaddedHex(oldEntry->planCacheKey))
+ : std::make_pair(zeroPaddedHex(key.queryHash()), zeroPaddedHex(key.planCacheKeyHash()));
+ }
+
+ const CanonicalQuery& _cq;
+};
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_debug_info.cpp b/src/mongo/db/query/plan_cache_debug_info.cpp
new file mode 100644
index 00000000000..4b7651e95db
--- /dev/null
+++ b/src/mongo/db/query/plan_cache_debug_info.cpp
@@ -0,0 +1,54 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/query/plan_cache_debug_info.h"
+
+namespace mongo::plan_cache_debug_info {
+DebugInfo buildDebugInfo(const CanonicalQuery& query,
+ std::unique_ptr<const plan_ranker::PlanRankingDecision> decision) {
+ // Strip projections on $-prefixed fields, as these are added by internal callers of the
+ // system and are not considered part of the user projection.
+ const FindCommandRequest& findCommand = query.getFindCommandRequest();
+ BSONObjBuilder projBuilder;
+ for (auto elem : findCommand.getProjection()) {
+ if (elem.fieldName()[0] == '$') {
+ continue;
+ }
+ projBuilder.append(elem);
+ }
+
+ CreatedFromQuery createdFromQuery{findCommand.getFilter(),
+ findCommand.getSort(),
+ projBuilder.obj(),
+ query.getCollator() ? query.getCollator()->getSpec().toBSON()
+ : BSONObj()};
+
+ return {std::move(createdFromQuery), std::move(decision)};
+}
+} // namespace mongo::plan_cache_debug_info
diff --git a/src/mongo/db/query/plan_cache_debug_info.h b/src/mongo/db/query/plan_cache_debug_info.h
new file mode 100644
index 00000000000..611e947c234
--- /dev/null
+++ b/src/mongo/db/query/plan_cache_debug_info.h
@@ -0,0 +1,117 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/plan_ranking_decision.h"
+
+namespace mongo::plan_cache_debug_info {
+/**
+ * A description of the query from which a paln cache entry was created.
+ */
+struct CreatedFromQuery {
+ /**
+ * Returns an estimate of the size of this object, including the memory allocated elsewhere
+ * that it owns, in bytes.
+ */
+ uint64_t estimateObjectSizeInBytes() const {
+ uint64_t size = 0;
+ size += filter.objsize();
+ size += sort.objsize();
+ size += projection.objsize();
+ size += collation.objsize();
+ return size;
+ }
+
+ std::string debugString() const {
+ return str::stream() << "query: " << filter.toString() << "; sort: " << sort.toString()
+ << "; projection: " << projection.toString()
+ << "; collation: " << collation.toString();
+ }
+
+ BSONObj filter;
+ BSONObj sort;
+ BSONObj projection;
+ BSONObj collation;
+};
+
+/**
+ * Per-plan cache entry information that is used strictly as debug information (e.g. is intended
+ * for display by the $planCacheStats aggregation source). In order to save memory, this
+ * information is sometimes discarded instead of kept in the plan cache entry. Therefore, this
+ * information may not be used for any purpose outside displaying debug info, such as recovering
+ * a plan from the cache or determining whether or not the cache entry is active.
+ */
+struct DebugInfo {
+ DebugInfo(CreatedFromQuery createdFromQuery,
+ std::unique_ptr<const plan_ranker::PlanRankingDecision> decision)
+ : createdFromQuery(std::move(createdFromQuery)), decision(std::move(decision)) {
+ invariant(this->decision);
+ }
+
+ /**
+ * 'DebugInfo' is copy-constructible, copy-assignable, move-constructible, and
+ * move-assignable.
+ */
+ DebugInfo(const DebugInfo& other)
+ : createdFromQuery(other.createdFromQuery), decision(other.decision->clone()) {}
+
+ DebugInfo& operator=(const DebugInfo& other) {
+ createdFromQuery = other.createdFromQuery;
+ decision = other.decision->clone();
+ return *this;
+ }
+
+ DebugInfo(DebugInfo&&) = default;
+ DebugInfo& operator=(DebugInfo&&) = default;
+
+ ~DebugInfo() = default;
+
+ /**
+ * Returns an estimate of the size of this object, including the memory allocated elsewhere
+ * that it owns, in bytes.
+ */
+ uint64_t estimateObjectSizeInBytes() const {
+ uint64_t size = 0;
+ size += createdFromQuery.estimateObjectSizeInBytes();
+ size += decision->estimateObjectSizeInBytes();
+ return size;
+ }
+
+ CreatedFromQuery createdFromQuery;
+
+ // Information that went into picking the winning plan and also why the other plans lost.
+ // Never nullptr.
+ std::unique_ptr<const plan_ranker::PlanRankingDecision> decision;
+};
+
+DebugInfo buildDebugInfo(const CanonicalQuery& query,
+ std::unique_ptr<const plan_ranker::PlanRankingDecision> decision);
+} // namespace mongo::plan_cache_debug_info
diff --git a/src/mongo/db/query/plan_cache_key_factory.cpp b/src/mongo/db/query/plan_cache_key_factory.cpp
new file mode 100644
index 00000000000..1341ef06d3a
--- /dev/null
+++ b/src/mongo/db/query/plan_cache_key_factory.cpp
@@ -0,0 +1,96 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/query/plan_cache_key_factory.h"
+
+#include "mongo/db/query/planner_ixselect.h"
+
+namespace mongo::plan_cache_detail {
+// Delimiters for cache key encoding.
+const char kEncodeDiscriminatorsBegin = '<';
+const char kEncodeDiscriminatorsEnd = '>';
+
+void encodeIndexabilityForDiscriminators(const MatchExpression* tree,
+ const IndexToDiscriminatorMap& discriminators,
+ StringBuilder* keyBuilder) {
+ for (auto&& indexAndDiscriminatorPair : discriminators) {
+ *keyBuilder << indexAndDiscriminatorPair.second.isMatchCompatibleWithIndex(tree);
+ }
+}
+
+void encodeIndexability(const MatchExpression* tree,
+ const PlanCacheIndexabilityState& indexabilityState,
+ StringBuilder* keyBuilder) {
+ if (!tree->path().empty()) {
+ const IndexToDiscriminatorMap& discriminators =
+ indexabilityState.getDiscriminators(tree->path());
+ IndexToDiscriminatorMap wildcardDiscriminators =
+ indexabilityState.buildWildcardDiscriminators(tree->path());
+ if (!discriminators.empty() || !wildcardDiscriminators.empty()) {
+ *keyBuilder << kEncodeDiscriminatorsBegin;
+ // For each discriminator on this path, append the character '0' or '1'.
+ encodeIndexabilityForDiscriminators(tree, discriminators, keyBuilder);
+ encodeIndexabilityForDiscriminators(tree, wildcardDiscriminators, keyBuilder);
+
+ *keyBuilder << kEncodeDiscriminatorsEnd;
+ }
+ } else if (tree->matchType() == MatchExpression::MatchType::NOT) {
+ // If the node is not compatible with any type of index, add a single '0' discriminator
+ // here. Otherwise add a '1'.
+ *keyBuilder << kEncodeDiscriminatorsBegin;
+ *keyBuilder << QueryPlannerIXSelect::logicalNodeMayBeSupportedByAnIndex(tree);
+ *keyBuilder << kEncodeDiscriminatorsEnd;
+ }
+
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ encodeIndexability(tree->getChild(i), indexabilityState, keyBuilder);
+ }
+}
+
+PlanCacheKey make(const CanonicalQuery& query,
+ const CollectionPtr& collection,
+ PlanCacheKeyTag<PlanCacheKey> tag) {
+ const auto shapeString = query.encodeKey();
+
+ StringBuilder indexabilityKeyBuilder;
+ plan_cache_detail::encodeIndexability(
+ query.root(),
+ CollectionQueryInfo::get(collection).getPlanCacheIndexabilityState(),
+ &indexabilityKeyBuilder);
+
+ return PlanCacheKey(
+ shapeString, indexabilityKeyBuilder.str(), query.getEnableSlotBasedExecutionEngine());
+}
+
+sbe::PlanCacheKey make(const CanonicalQuery& query,
+ const CollectionPtr& collection,
+ PlanCacheKeyTag<sbe::PlanCacheKey> tag) {
+ return sbe::PlanCacheKey(query.getQueryObj());
+}
+} // namespace mongo::plan_cache_detail
diff --git a/src/mongo/db/query/plan_cache_key_factory.h b/src/mongo/db/query/plan_cache_key_factory.h
new file mode 100644
index 00000000000..220eb410c4b
--- /dev/null
+++ b/src/mongo/db/query/plan_cache_key_factory.h
@@ -0,0 +1,78 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/classic_plan_cache.h"
+#include "mongo/db/query/collection_query_info.h"
+#include "mongo/db/query/sbe_plan_cache.h"
+
+namespace mongo {
+namespace plan_cache_detail {
+/**
+ * Serializes indexability discriminators, appending them to keyBuilder. This function is used
+ * during the computation of a query's plan cache key to ensure that two queries with different
+ * index eligibilities will have different cache keys.
+ */
+void encodeIndexability(const MatchExpression* tree,
+ const PlanCacheIndexabilityState& indexabilityState,
+ StringBuilder* keyBuilder);
+
+/**
+ * A dispatch tag for the factory functions below.
+ */
+template <typename KeyType>
+struct PlanCacheKeyTag {};
+
+/**
+ * Creates a key for the classic plan cache from the canonical query and collection instances.
+ */
+PlanCacheKey make(const CanonicalQuery& query,
+ const CollectionPtr& collection,
+ PlanCacheKeyTag<PlanCacheKey> tag);
+
+/**
+ * Creates a key for the SBE plan cache from the canonical query and collection instances.
+ */
+sbe::PlanCacheKey make(const CanonicalQuery& query,
+ const CollectionPtr& collection,
+ PlanCacheKeyTag<sbe::PlanCacheKey> tag);
+} // namespace plan_cache_detail
+
+namespace plan_cache_key_factory {
+/**
+ * A factory helper to make a plan cache key of the given type.
+ */
+template <typename K>
+K make(const CanonicalQuery& query, const CollectionPtr& collection) {
+ return plan_cache_detail::make(query, collection, plan_cache_detail::PlanCacheKeyTag<K>{});
+}
+} // namespace plan_cache_key_factory
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 1931f797320..f3ae88d7f11 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/query/canonical_query_encoder.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_planner.h"
@@ -71,6 +72,16 @@ using std::vector;
static const NamespaceString nss("test.collection");
+PlanCacheKey makeKey(const CanonicalQuery& cq, const std::vector<CoreIndexInfo>& indexCores = {}) {
+ PlanCacheIndexabilityState indexabilityState;
+ indexabilityState.updateDiscriminators(indexCores);
+
+ StringBuilder indexabilityKeyBuilder;
+ plan_cache_detail::encodeIndexability(cq.root(), indexabilityState, &indexabilityKeyBuilder);
+
+ return {cq.encodeKey(), indexabilityKeyBuilder.str(), cq.getEnableSlotBasedExecutionEngine()};
+}
+
/**
* Utility functions to create a CanonicalQuery
*/
@@ -352,7 +363,7 @@ std::unique_ptr<plan_ranker::PlanRankingDecision> createDecision(size_t numPlans
* in the planner cache.
*/
void assertShouldCacheQuery(const CanonicalQuery& query) {
- if (PlanCache::shouldCacheQuery(query)) {
+ if (shouldCacheQuery(query)) {
return;
}
str::stream ss;
@@ -361,7 +372,7 @@ void assertShouldCacheQuery(const CanonicalQuery& query) {
}
void assertShouldNotCacheQuery(const CanonicalQuery& query) {
- if (!PlanCache::shouldCacheQuery(query)) {
+ if (!shouldCacheQuery(query)) {
return;
}
str::stream ss;
@@ -529,7 +540,7 @@ TEST(PlanCacheTest, AddEmptySolutions) {
unique_ptr<plan_ranker::PlanRankingDecision> decision(createDecision(1U));
QueryTestServiceContext serviceContext;
ASSERT_NOT_OK(planCache.set(
- *cq, std::make_unique<SolutionCacheData>(), solns, std::move(decision), Date_t{}));
+ makeKey(*cq), std::make_unique<SolutionCacheData>(), solns, std::move(decision), Date_t{}));
}
void addCacheEntryForShape(const CanonicalQuery& cq, PlanCache* planCache) {
@@ -537,7 +548,8 @@ void addCacheEntryForShape(const CanonicalQuery& cq, PlanCache* planCache) {
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
- ASSERT_OK(planCache->set(cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(
+ planCache->set(makeKey(cq), qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
}
TEST(PlanCacheTest, InactiveEntriesDisabled) {
@@ -549,27 +561,27 @@ TEST(PlanCacheTest, InactiveEntriesDisabled) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
+ auto key = makeKey(*cq);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
// After add, the planCache should have an _active_ entry.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentActive);
// Call deactivate(). It should be a noop.
- planCache.deactivate(*cq);
+ planCache.deactivate(key);
// The entry should still be active.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentActive);
// remove() the entry.
- ASSERT_OK(planCache.remove(*cq));
+ planCache.remove(key);
ASSERT_EQ(planCache.size(), 0U);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
-
TEST(PlanCacheTest, PlanCacheLRUPolicyRemovesInactiveEntries) {
// Use a tiny cache size.
const size_t kCacheSize = 2;
@@ -577,31 +589,33 @@ TEST(PlanCacheTest, PlanCacheLRUPolicyRemovesInactiveEntries) {
QueryTestServiceContext serviceContext;
unique_ptr<CanonicalQuery> cqA(canonicalize("{a: 1}"));
- ASSERT_EQ(planCache.get(*cqA).state, PlanCache::CacheEntryState::kNotPresent);
+ auto keyA = makeKey(*cqA);
+ ASSERT_EQ(planCache.get(keyA).state, PlanCache::CacheEntryState::kNotPresent);
addCacheEntryForShape(*cqA.get(), &planCache);
-
// After add, the planCache should have an inactive entry.
- ASSERT_EQ(planCache.get(*cqA).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(planCache.get(keyA).state, PlanCache::CacheEntryState::kPresentInactive);
// Add a cache entry for another shape.
unique_ptr<CanonicalQuery> cqB(canonicalize("{b: 1}"));
- ASSERT_EQ(planCache.get(*cqB).state, PlanCache::CacheEntryState::kNotPresent);
+ auto keyB = makeKey(*cqB);
+ ASSERT_EQ(planCache.get(keyB).state, PlanCache::CacheEntryState::kNotPresent);
addCacheEntryForShape(*cqB.get(), &planCache);
- ASSERT_EQ(planCache.get(*cqB).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(planCache.get(keyB).state, PlanCache::CacheEntryState::kPresentInactive);
// Access the cached solution for the {a: 1} shape. Now the entry for {b: 1} will be the least
// recently used.
- ASSERT_EQ(planCache.get(*cqA).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(planCache.get(keyA).state, PlanCache::CacheEntryState::kPresentInactive);
// Insert another entry. Since the cache size is 2, we expect the {b: 1} entry to be ejected.
unique_ptr<CanonicalQuery> cqC(canonicalize("{c: 1}"));
- ASSERT_EQ(planCache.get(*cqC).state, PlanCache::CacheEntryState::kNotPresent);
+ auto keyC = makeKey(*cqC);
+ ASSERT_EQ(planCache.get(keyC).state, PlanCache::CacheEntryState::kNotPresent);
addCacheEntryForShape(*cqC.get(), &planCache);
// Check that {b: 1} is gone, but {a: 1} and {c: 1} both still have entries.
- ASSERT_EQ(planCache.get(*cqB).state, PlanCache::CacheEntryState::kNotPresent);
- ASSERT_EQ(planCache.get(*cqA).state, PlanCache::CacheEntryState::kPresentInactive);
- ASSERT_EQ(planCache.get(*cqC).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(planCache.get(keyB).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(keyA).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(planCache.get(keyC).state, PlanCache::CacheEntryState::kPresentInactive);
}
TEST(PlanCacheTest, PlanCacheRemoveDeletesInactiveEntries) {
@@ -609,18 +623,19 @@ TEST(PlanCacheTest, PlanCacheRemoveDeletesInactiveEntries) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
+ auto key = makeKey(*cq);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
// After add, the planCache should have an inactive entry.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
// remove() the entry.
- ASSERT_OK(planCache.remove(*cq));
+ planCache.remove(key);
ASSERT_EQ(planCache.size(), 0U);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
TEST(PlanCacheTest, PlanCacheFlushDeletesInactiveEntries) {
@@ -628,18 +643,19 @@ TEST(PlanCacheTest, PlanCacheFlushDeletesInactiveEntries) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
+ auto key = makeKey(*cq);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
// After add, the planCache should have an inactive entry.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
// Clear the plan cache. The inactive entry should now be removed.
planCache.clear();
ASSERT_EQ(planCache.size(), 0U);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
TEST(PlanCacheTest, AddActiveCacheEntry) {
@@ -647,25 +663,26 @@ TEST(PlanCacheTest, AddActiveCacheEntry) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
+ auto key = makeKey(*cq);
// Check if key is in cache before and after set().
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 20), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 20), Date_t{}));
// After add, the planCache should have an inactive entry.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
// Calling set() again, with a solution that had a lower works value should create an active
// entry.
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 10), Date_t{}));
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 10), Date_t{}));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentActive);
ASSERT_EQUALS(planCache.size(), 1U);
// Clear the plan cache. The active entry should now be removed.
planCache.clear();
ASSERT_EQ(planCache.size(), 0U);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
TEST(PlanCacheTest, WorksValueIncreases) {
@@ -673,45 +690,59 @@ TEST(PlanCacheTest, WorksValueIncreases) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
+ auto key = makeKey(*cq);
+ PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{*cq};
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 10), Date_t{}));
+ ASSERT_OK(planCache.set(key,
+ qs->cacheData->clone(),
+ solns,
+ createDecision(1U, 10),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
// After add, the planCache should have an inactive entry.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
- auto entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
+ auto entry = assertGet(planCache.getEntry(key));
ASSERT_EQ(entry->works, 10U);
ASSERT_FALSE(entry->isActive);
// Calling set() again, with a solution that had a higher works value. This should cause the
// works on the original entry to be increased.
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}));
// The entry should still be inactive. Its works should double though.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
- entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
+ entry = assertGet(planCache.getEntry(key));
ASSERT_FALSE(entry->isActive);
ASSERT_EQ(entry->works, 20U);
// Calling set() again, with a solution that had a higher works value. This should cause the
// works on the original entry to be increased.
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 30), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 30), Date_t{}));
// The entry should still be inactive. Its works should have doubled again.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
- entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
+ entry = assertGet(planCache.getEntry(key));
ASSERT_FALSE(entry->isActive);
ASSERT_EQ(entry->works, 40U);
// Calling set() again, with a solution that has a lower works value than what's currently in
// the cache.
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 25), Date_t{}));
+ ASSERT_OK(planCache.set(key,
+ qs->cacheData->clone(),
+ solns,
+ createDecision(1U, 25),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
// The solution just run should now be in an active cache entry, with a works
// equal to the number of works the solution took.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
- entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT(entry->debugInfo);
@@ -725,7 +756,7 @@ TEST(PlanCacheTest, WorksValueIncreases) {
// Clear the plan cache. The active entry should now be removed.
planCache.clear();
ASSERT_EQ(planCache.size(), 0U);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
@@ -736,14 +767,15 @@ TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
+ auto key = makeKey(*cq);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 3), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 3), Date_t{}));
// After add, the planCache should have an inactive entry.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
- auto entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
+ auto entry = assertGet(planCache.getEntry(key));
ASSERT_EQ(entry->works, 3U);
ASSERT_FALSE(entry->isActive);
@@ -752,18 +784,18 @@ TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
// multiplying by the value 1.10 will give a value of 3 (static_cast<size_t>(1.1 * 3) == 3).
// We check that the works value is increased 1 instead.
ASSERT_OK(planCache.set(
- *cq, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}, kWorksCoeff));
+ key, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}, kWorksCoeff));
// The entry should still be inactive. Its works should increase by 1.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
- entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
+ entry = assertGet(planCache.getEntry(key));
ASSERT_FALSE(entry->isActive);
ASSERT_EQ(entry->works, 4U);
// Clear the plan cache. The inactive entry should now be removed.
planCache.clear();
ASSERT_EQ(planCache.size(), 0U);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
@@ -771,30 +803,31 @@ TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
+ auto key = makeKey(*cq);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}));
// After add, the planCache should have an inactive entry.
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
- auto entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
+ auto entry = assertGet(planCache.getEntry(key));
ASSERT_EQ(entry->works, 50U);
ASSERT_FALSE(entry->isActive);
// Call set() again, with a solution that has a lower works value. This will result in an
// active entry being created.
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 20), Date_t{}));
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
- entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 20), Date_t{}));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_EQ(entry->works, 20U);
// Now call set() again, but with a solution that has a higher works value. This should be
// a noop.
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 100), Date_t{}));
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
- entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 100), Date_t{}));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_EQ(entry->works, 20U);
}
@@ -804,29 +837,30 @@ TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
+ auto key = makeKey(*cq);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}));
// After add, the planCache should have an inactive entry.
- auto entry = assertGet(planCache.getEntry(*cq));
+ auto entry = assertGet(planCache.getEntry(key));
ASSERT_EQ(entry->works, 50U);
ASSERT_FALSE(entry->isActive);
// Call set() again, with a solution that has a lower works value. This will result in an
// active entry being created.
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 20), Date_t{}));
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
- entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 20), Date_t{}));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_EQ(entry->works, 20U);
// Now call set() again, with a solution that has a lower works value. The current active entry
// should be overwritten.
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 10), Date_t{}));
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
- entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 10), Date_t{}));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_EQ(entry->works, 10U);
}
@@ -836,29 +870,30 @@ TEST(PlanCacheTest, DeactivateCacheEntry) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
+ auto key = makeKey(*cq);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 50), Date_t{}));
// After add, the planCache should have an inactive entry.
- auto entry = assertGet(planCache.getEntry(*cq));
+ auto entry = assertGet(planCache.getEntry(key));
ASSERT_EQ(entry->works, 50U);
ASSERT_FALSE(entry->isActive);
// Call set() again, with a solution that has a lower works value. This will result in an
// active entry being created.
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 20), Date_t{}));
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
- entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_OK(planCache.set(key, qs->cacheData->clone(), solns, createDecision(1U, 20), Date_t{}));
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_EQ(entry->works, 20U);
- planCache.deactivate(*cq);
- ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ planCache.deactivate(key);
+ ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
// Be sure the entry has the same works value.
- entry = assertGet(planCache.getEntry(*cq));
+ entry = assertGet(planCache.getEntry(key));
ASSERT_FALSE(entry->isActive);
ASSERT_EQ(entry->works, 20U);
}
@@ -871,8 +906,8 @@ TEST(PlanCacheTest, GetMatchingStatsMatchesAndSerializesCorrectly) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
- ASSERT_OK(
- planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 5), Date_t{}));
+ ASSERT_OK(planCache.set(
+ makeKey(*cq), qs->cacheData->clone(), solns, createDecision(1U, 5), Date_t{}));
}
// Create a second cache entry with 3 works.
@@ -880,8 +915,8 @@ TEST(PlanCacheTest, GetMatchingStatsMatchesAndSerializesCorrectly) {
unique_ptr<CanonicalQuery> cq(canonicalize("{b: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
- ASSERT_OK(
- planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U, 3), Date_t{}));
+ ASSERT_OK(planCache.set(
+ makeKey(*cq), qs->cacheData->clone(), solns, createDecision(1U, 3), Date_t{}));
}
// Verify that the cache entries have been created.
@@ -1184,17 +1219,17 @@ protected:
std::vector<QuerySolution*> solutions;
solutions.push_back(&qs);
- uint32_t queryHash = canonical_query_encoder::computeHash(ck.stringData());
+ uint32_t queryHash = ck.queryHash();
uint32_t planCacheKey = queryHash;
- auto entry = PlanCacheEntry::create(solutions,
- createDecision(1U),
- *scopedCq,
- qs.cacheData->clone(),
- queryHash,
- planCacheKey,
- Date_t(),
- false,
- 0);
+ auto entry = PlanCacheEntry::create<PlanCacheKey>(solutions,
+ createDecision(1U),
+ qs.cacheData->clone(),
+ queryHash,
+ planCacheKey,
+ Date_t(),
+ false /* isActive */,
+ 0 /* works */,
+ nullptr /* callbacks */);
CachedSolution cachedSoln(*entry);
auto statusWithQs = QueryPlanner::planFromCache(*scopedCq, params, cachedSoln);
@@ -1873,11 +1908,11 @@ TEST_F(CachePlanSelectionTest, ContainedOrAndIntersection) {
TEST(PlanCacheTest, ComputeKeySparseIndex) {
PlanCache planCache(5000);
const auto keyPattern = BSON("a" << 1);
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- true, // sparse
- IndexEntry::Identifier{""})}); // name
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ true, // sparse
+ IndexEntry::Identifier{""})}; // name
unique_ptr<CanonicalQuery> cqEqNumber(canonicalize("{a: 0}}"));
unique_ptr<CanonicalQuery> cqEqString(canonicalize("{a: 'x'}}"));
@@ -1885,12 +1920,12 @@ TEST(PlanCacheTest, ComputeKeySparseIndex) {
// 'cqEqNumber' and 'cqEqString' get the same key, since both are compatible with this
// index.
- const auto eqNumberKey = planCache.computeKey(*cqEqNumber);
- const auto eqStringKey = planCache.computeKey(*cqEqString);
+ const auto eqNumberKey = makeKey(*cqEqNumber, indexCores);
+ const auto eqStringKey = makeKey(*cqEqString, indexCores);
ASSERT_EQ(eqNumberKey, eqStringKey);
// 'cqEqNull' gets a different key, since it is not compatible with this index.
- const auto eqNullKey = planCache.computeKey(*cqEqNull);
+ const auto eqNullKey = makeKey(*cqEqNull, indexCores);
ASSERT_NOT_EQUALS(eqNullKey, eqNumberKey);
assertPlanCacheKeysUnequalDueToDiscriminators(eqNullKey, eqNumberKey);
@@ -1906,23 +1941,23 @@ TEST(PlanCacheTest, ComputeKeyPartialIndex) {
PlanCache planCache(5000);
const auto keyPattern = BSON("a" << 1);
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""}, // name
- filterExpr.get())}); // filterExpr
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""}, // name
+ filterExpr.get())}; // filterExpr
unique_ptr<CanonicalQuery> cqGtNegativeFive(canonicalize("{f: {$gt: -5}}"));
unique_ptr<CanonicalQuery> cqGtZero(canonicalize("{f: {$gt: 0}}"));
unique_ptr<CanonicalQuery> cqGtFive(canonicalize("{f: {$gt: 5}}"));
// 'cqGtZero' and 'cqGtFive' get the same key, since both are compatible with this index.
- ASSERT_EQ(planCache.computeKey(*cqGtZero), planCache.computeKey(*cqGtFive));
+ ASSERT_EQ(makeKey(*cqGtZero, indexCores), makeKey(*cqGtFive, indexCores));
// 'cqGtNegativeFive' gets a different key, since it is not compatible with this index.
- assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*cqGtNegativeFive),
- planCache.computeKey(*cqGtZero));
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*cqGtNegativeFive, indexCores),
+ makeKey(*cqGtZero, indexCores));
}
// Query shapes should get the same plan cache key if they have the same collation indexability.
@@ -1931,13 +1966,13 @@ TEST(PlanCacheTest, ComputeKeyCollationIndex) {
PlanCache planCache(5000);
const auto keyPattern = BSON("a" << 1);
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""}, // name
- nullptr, // filterExpr
- &collator)}); // collation
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""}, // name
+ nullptr, // filterExpr
+ &collator)}; // collation
unique_ptr<CanonicalQuery> containsString(canonicalize("{a: 'abc'}"));
unique_ptr<CanonicalQuery> containsObject(canonicalize("{a: {b: 'abc'}}"));
@@ -1948,20 +1983,20 @@ TEST(PlanCacheTest, ComputeKeyCollationIndex) {
// 'containsString', 'containsObject', and 'containsArray' have the same key, since none are
// compatible with the index.
- ASSERT_EQ(planCache.computeKey(*containsString), planCache.computeKey(*containsObject));
- ASSERT_EQ(planCache.computeKey(*containsString), planCache.computeKey(*containsArray));
+ ASSERT_EQ(makeKey(*containsString, indexCores), makeKey(*containsObject, indexCores));
+ ASSERT_EQ(makeKey(*containsString, indexCores), makeKey(*containsArray, indexCores));
// 'noStrings' gets a different key since it is compatible with the index.
- assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*containsString),
- planCache.computeKey(*noStrings));
- ASSERT_EQ(planCache.computeKey(*containsString).getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(planCache.computeKey(*noStrings).getIndexabilityDiscriminators(), "<1>");
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*containsString, indexCores),
+ makeKey(*noStrings, indexCores));
+ ASSERT_EQ(makeKey(*containsString, indexCores).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(makeKey(*noStrings, indexCores).getIndexabilityDiscriminators(), "<1>");
// 'noStrings' and 'containsStringHasCollation' get different keys, since the collation
// specified in the query is considered part of its shape. However, they have the same index
// compatibility, so the unstable part of their PlanCacheKeys should be the same.
- PlanCacheKey noStringKey = planCache.computeKey(*noStrings);
- PlanCacheKey withStringAndCollationKey = planCache.computeKey(*containsStringHasCollation);
+ PlanCacheKey noStringKey = makeKey(*noStrings, indexCores);
+ PlanCacheKey withStringAndCollationKey = makeKey(*containsStringHasCollation, indexCores);
ASSERT_NE(noStringKey, withStringAndCollationKey);
ASSERT_EQ(noStringKey.getUnstablePart(), withStringAndCollationKey.getUnstablePart());
ASSERT_NE(noStringKey.getStableKeyStringData(),
@@ -1976,28 +2011,28 @@ TEST(PlanCacheTest, ComputeKeyCollationIndex) {
// 'inContainsString', 'inContainsObject', and 'inContainsArray' have the same key, since none
// are compatible with the index.
- ASSERT_EQ(planCache.computeKey(*inContainsString), planCache.computeKey(*inContainsObject));
- ASSERT_EQ(planCache.computeKey(*inContainsString), planCache.computeKey(*inContainsArray));
+ ASSERT_EQ(makeKey(*inContainsString, indexCores), makeKey(*inContainsObject, indexCores));
+ ASSERT_EQ(makeKey(*inContainsString, indexCores), makeKey(*inContainsArray, indexCores));
// 'inNoStrings' gets a different key since it is compatible with the index.
- assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*inContainsString),
- planCache.computeKey(*inNoStrings));
- ASSERT_EQ(planCache.computeKey(*inContainsString).getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(planCache.computeKey(*inNoStrings).getIndexabilityDiscriminators(), "<1>");
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*inContainsString, indexCores),
+ makeKey(*inNoStrings, indexCores));
+ ASSERT_EQ(makeKey(*inContainsString, indexCores).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(makeKey(*inNoStrings, indexCores).getIndexabilityDiscriminators(), "<1>");
// 'inNoStrings' and 'inContainsStringHasCollation' get the same key since they compatible with
// the index.
- ASSERT_NE(planCache.computeKey(*inNoStrings),
- planCache.computeKey(*inContainsStringHasCollation));
- ASSERT_EQ(planCache.computeKey(*inNoStrings).getUnstablePart(),
- planCache.computeKey(*inContainsStringHasCollation).getUnstablePart());
+ ASSERT_NE(makeKey(*inNoStrings, indexCores),
+ makeKey(*inContainsStringHasCollation, indexCores));
+ ASSERT_EQ(makeKey(*inNoStrings, indexCores).getUnstablePart(),
+ makeKey(*inContainsStringHasCollation, indexCores).getUnstablePart());
}
TEST(PlanCacheTest, ComputeKeyWildcardIndex) {
auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
PlanCache planCache(5000);
- planCache.notifyOfIndexUpdates({entryProjUpdatePair.first});
+ const std::vector<CoreIndexInfo> indexCores = {entryProjUpdatePair.first};
// Used to check that two queries have the same shape when no indexes are present.
PlanCache planCacheWithNoIndexes(5000);
@@ -2014,30 +2049,27 @@ TEST(PlanCacheTest, ComputeKeyWildcardIndex) {
unique_ptr<CanonicalQuery> doesNotUsePath(canonicalize("{b: 1234}"));
// Check that the queries which are compatible with the index have the same key.
- ASSERT_EQ(planCache.computeKey(*usesPathWithScalar),
- planCache.computeKey(*usesPathWithEmptyArray));
+ ASSERT_EQ(makeKey(*usesPathWithScalar, indexCores),
+ makeKey(*usesPathWithEmptyArray, indexCores));
// Check that the queries which have the same path as the index, but aren't supported, have
// different keys.
- ASSERT_EQ(planCacheWithNoIndexes.computeKey(*usesPathWithScalar),
- planCacheWithNoIndexes.computeKey(*usesPathWithObject));
- assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*usesPathWithScalar),
- planCache.computeKey(*usesPathWithObject));
- ASSERT_EQ(planCache.computeKey(*usesPathWithScalar).getIndexabilityDiscriminators(), "<1>");
- ASSERT_EQ(planCache.computeKey(*usesPathWithObject).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(makeKey(*usesPathWithScalar), makeKey(*usesPathWithObject));
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*usesPathWithScalar, indexCores),
+ makeKey(*usesPathWithObject, indexCores));
+ ASSERT_EQ(makeKey(*usesPathWithScalar, indexCores).getIndexabilityDiscriminators(), "<1>");
+ ASSERT_EQ(makeKey(*usesPathWithObject, indexCores).getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(planCache.computeKey(*usesPathWithObject), planCache.computeKey(*usesPathWithArray));
- ASSERT_EQ(planCache.computeKey(*usesPathWithObject),
- planCache.computeKey(*usesPathWithArrayContainingObject));
+ ASSERT_EQ(makeKey(*usesPathWithObject, indexCores), makeKey(*usesPathWithArray, indexCores));
+ ASSERT_EQ(makeKey(*usesPathWithObject, indexCores),
+ makeKey(*usesPathWithArrayContainingObject, indexCores));
// The query on 'b' should have a completely different plan cache key (both with and without a
// wildcard index).
- ASSERT_NE(planCacheWithNoIndexes.computeKey(*usesPathWithScalar),
- planCacheWithNoIndexes.computeKey(*doesNotUsePath));
- ASSERT_NE(planCache.computeKey(*usesPathWithScalar), planCache.computeKey(*doesNotUsePath));
- ASSERT_NE(planCacheWithNoIndexes.computeKey(*usesPathWithObject),
- planCacheWithNoIndexes.computeKey(*doesNotUsePath));
- ASSERT_NE(planCache.computeKey(*usesPathWithObject), planCache.computeKey(*doesNotUsePath));
+ ASSERT_NE(makeKey(*usesPathWithScalar), makeKey(*doesNotUsePath));
+ ASSERT_NE(makeKey(*usesPathWithScalar, indexCores), makeKey(*doesNotUsePath, indexCores));
+ ASSERT_NE(makeKey(*usesPathWithObject), makeKey(*doesNotUsePath));
+ ASSERT_NE(makeKey(*usesPathWithObject, indexCores), makeKey(*doesNotUsePath, indexCores));
// More complex queries with similar shapes. This is to ensure that plan cache key encoding
// correctly traverses the expression tree.
@@ -2047,14 +2079,13 @@ TEST(PlanCacheTest, ComputeKeyWildcardIndex) {
canonicalize("{$or: [{a: {someobject: 1}}, {a: {$gt: [1,2]}}]}");
// The two queries should have the same shape when no indexes are present, but different shapes
// when a $** index is present.
- ASSERT_EQ(planCacheWithNoIndexes.computeKey(*orQueryWithOneBranchAllowed),
- planCacheWithNoIndexes.computeKey(*orQueryWithNoBranchesAllowed));
+ ASSERT_EQ(makeKey(*orQueryWithOneBranchAllowed), makeKey(*orQueryWithNoBranchesAllowed));
assertPlanCacheKeysUnequalDueToDiscriminators(
- planCache.computeKey(*orQueryWithOneBranchAllowed),
- planCache.computeKey(*orQueryWithNoBranchesAllowed));
- ASSERT_EQ(planCache.computeKey(*orQueryWithOneBranchAllowed).getIndexabilityDiscriminators(),
+ makeKey(*orQueryWithOneBranchAllowed, indexCores),
+ makeKey(*orQueryWithNoBranchesAllowed, indexCores));
+ ASSERT_EQ(makeKey(*orQueryWithOneBranchAllowed, indexCores).getIndexabilityDiscriminators(),
"<1><0>");
- ASSERT_EQ(planCache.computeKey(*orQueryWithNoBranchesAllowed).getIndexabilityDiscriminators(),
+ ASSERT_EQ(makeKey(*orQueryWithNoBranchesAllowed, indexCores).getIndexabilityDiscriminators(),
"<0><0>");
}
@@ -2062,23 +2093,23 @@ TEST(PlanCacheTest, ComputeKeyWildcardIndexDiscriminatesEqualityToEmptyObj) {
auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
PlanCache planCache(5000);
- planCache.notifyOfIndexUpdates({entryProjUpdatePair.first});
+ const std::vector<CoreIndexInfo> indexCores = {entryProjUpdatePair.first};
// Equality to empty obj and equality to non-empty obj have different plan cache keys.
std::unique_ptr<CanonicalQuery> equalsEmptyObj(canonicalize("{a: {}}"));
std::unique_ptr<CanonicalQuery> equalsNonEmptyObj(canonicalize("{a: {b: 1}}"));
- assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*equalsEmptyObj),
- planCache.computeKey(*equalsNonEmptyObj));
- ASSERT_EQ(planCache.computeKey(*equalsNonEmptyObj).getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(planCache.computeKey(*equalsEmptyObj).getIndexabilityDiscriminators(), "<1>");
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*equalsEmptyObj, indexCores),
+ makeKey(*equalsNonEmptyObj, indexCores));
+ ASSERT_EQ(makeKey(*equalsNonEmptyObj, indexCores).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(makeKey(*equalsEmptyObj, indexCores).getIndexabilityDiscriminators(), "<1>");
// $in with empty obj and $in with non-empty obj have different plan cache keys.
std::unique_ptr<CanonicalQuery> inWithEmptyObj(canonicalize("{a: {$in: [{}]}}"));
std::unique_ptr<CanonicalQuery> inWithNonEmptyObj(canonicalize("{a: {$in: [{b: 1}]}}"));
- assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*inWithEmptyObj),
- planCache.computeKey(*inWithNonEmptyObj));
- ASSERT_EQ(planCache.computeKey(*inWithNonEmptyObj).getIndexabilityDiscriminators(), "<0>");
- ASSERT_EQ(planCache.computeKey(*inWithEmptyObj).getIndexabilityDiscriminators(), "<1>");
+ assertPlanCacheKeysUnequalDueToDiscriminators(makeKey(*inWithEmptyObj, indexCores),
+ makeKey(*inWithNonEmptyObj, indexCores));
+ ASSERT_EQ(makeKey(*inWithNonEmptyObj, indexCores).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(makeKey(*inWithEmptyObj, indexCores).getIndexabilityDiscriminators(), "<1>");
}
TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilterExpression) {
@@ -2090,15 +2121,15 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilter
indexInfo.filterExpr = filterExpr.get();
PlanCache planCache(5000);
- planCache.notifyOfIndexUpdates({indexInfo});
+ const std::vector<CoreIndexInfo> indexCores = {indexInfo};
// Test that queries on field 'x' are discriminated based on their relationship with the partial
// filter expression.
{
auto compatibleWithFilter = canonicalize("{x: {$eq: 5}}");
auto incompatibleWithFilter = canonicalize("{x: {$eq: -5}}");
- auto compatibleKey = planCache.computeKey(*compatibleWithFilter);
- auto incompatibleKey = planCache.computeKey(*incompatibleWithFilter);
+ auto compatibleKey = makeKey(*compatibleWithFilter, indexCores);
+ auto incompatibleKey = makeKey(*incompatibleWithFilter, indexCores);
assertPlanCacheKeysUnequalDueToDiscriminators(compatibleKey, incompatibleKey);
// The discriminator strings have the format "<xx>". That is, there are two discriminator
@@ -2113,8 +2144,8 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilter
{
auto compatibleWithFilter = canonicalize("{x: {$eq: 5}, y: 1}");
auto incompatibleWithFilter = canonicalize("{x: {$eq: -5}, y: 1}");
- auto compatibleKey = planCache.computeKey(*compatibleWithFilter);
- auto incompatibleKey = planCache.computeKey(*incompatibleWithFilter);
+ auto compatibleKey = makeKey(*compatibleWithFilter, indexCores);
+ auto incompatibleKey = makeKey(*incompatibleWithFilter, indexCores);
assertPlanCacheKeysUnequalDueToDiscriminators(compatibleKey, incompatibleKey);
// The discriminator strings have the format "<xx><y>". That is, there are two discriminator
@@ -2129,8 +2160,8 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilter
{
auto compatibleQuery = canonicalize("{x: {$eq: 5}, y: 1}");
auto incompatibleQuery = canonicalize("{x: {$eq: 5}, y: null}");
- auto compatibleKey = planCache.computeKey(*compatibleQuery);
- auto incompatibleKey = planCache.computeKey(*incompatibleQuery);
+ auto compatibleKey = makeKey(*compatibleQuery, indexCores);
+ auto incompatibleKey = makeKey(*incompatibleQuery, indexCores);
assertPlanCacheKeysUnequalDueToDiscriminators(compatibleKey, incompatibleKey);
// The discriminator strings have the format "<xx><y>". That is, there are two discriminator
@@ -2145,7 +2176,7 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilter
// indexes, and the predicate is not compatible with the partial filter expression. This should
// result in two "0" bits inside the discriminator string.
{
- auto key = planCache.computeKey(*canonicalize("{x: {$eq: null}}"));
+ auto key = makeKey(*canonicalize("{x: {$eq: null}}"), indexCores);
ASSERT_EQ(key.getIndexabilityDiscriminators(), "<00>");
}
}
@@ -2160,13 +2191,13 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAnd
indexInfo.filterExpr = filterExpr.get();
PlanCache planCache(5000);
- planCache.notifyOfIndexUpdates({indexInfo});
+ const std::vector<CoreIndexInfo> indexCores = {indexInfo};
{
// The discriminators should have the format <xx><yy><z>. The 'z' predicate has just one
// discriminator because it is not referenced in the partial filter expression. All
// predicates are compatible.
- auto key = planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"));
+ auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"), indexCores);
ASSERT_EQ(key.getIndexabilityDiscriminators(), "<11><11><1>");
}
@@ -2174,7 +2205,7 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAnd
// The discriminators should have the format <xx><yy><z>. The 'y' predicate is not
// compatible with the partial filter expression, leading to one of the 'y' bits being set
// to zero.
- auto key = planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: -2}, z: {$eq: 3}}"));
+ auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: -2}, z: {$eq: 3}}"), indexCores);
ASSERT_EQ(key.getIndexabilityDiscriminators(), "<11><01><1>");
}
}
@@ -2188,19 +2219,19 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterOnN
indexInfo.filterExpr = filterExpr.get();
PlanCache planCache(5000);
- planCache.notifyOfIndexUpdates({indexInfo});
+ const std::vector<CoreIndexInfo> indexCores = {indexInfo};
{
// The discriminators have the format <x><(x.y)(x.y)<y>. All predicates are compatible
auto key =
- planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: 3}}"));
+ makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: 3}}"), indexCores);
ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><11><1>");
}
{
// Here, the predicate on "x.y" is not compatible with the partial filter expression.
auto key =
- planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: -3}}"));
+ makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: -3}}"), indexCores);
ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><01><1>");
}
}
@@ -2214,27 +2245,26 @@ TEST(PlanCacheTest, ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndWildcard
indexInfo.filterExpr = filterExpr.get();
PlanCache planCache(5000);
- planCache.notifyOfIndexUpdates({indexInfo});
+ const std::vector<CoreIndexInfo> indexCores = {indexInfo};
{
// The discriminators have the format <x><y>. The discriminator for 'x' indicates whether
// the predicate is compatible with the partial filter expression, whereas the disciminator
// for 'y' is about compatibility with the wildcard index.
- auto key = planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"));
+ auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"), indexCores);
ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><1>");
}
{
// Similar to the previous case, except with an 'x' predicate that is incompatible with the
// partial filter expression.
- auto key = planCache.computeKey(*canonicalize("{x: {$eq: -1}, y: {$eq: 2}, z: {$eq: 3}}"));
+ auto key = makeKey(*canonicalize("{x: {$eq: -1}, y: {$eq: 2}, z: {$eq: 3}}"), indexCores);
ASSERT_EQ(key.getIndexabilityDiscriminators(), "<0><1>");
}
{
// Case where the 'y' predicate is not compatible with the wildcard index.
- auto key =
- planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: null}, z: {$eq: 3}}"));
+ auto key = makeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: null}, z: {$eq: 3}}"), indexCores);
ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><0>");
}
}
@@ -2242,19 +2272,19 @@ TEST(PlanCacheTest, ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndWildcard
TEST(PlanCacheTest, StableKeyDoesNotChangeAcrossIndexCreation) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 0}}"));
- const PlanCacheKey preIndexKey = planCache.computeKey(*cq);
+ const PlanCacheKey preIndexKey = makeKey(*cq);
const auto preIndexStableKey = preIndexKey.getStableKey();
ASSERT_EQ(preIndexKey.getIndexabilityDiscriminators(), "");
const auto keyPattern = BSON("a" << 1);
// Create a sparse index (which requires a discriminator).
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- true, // sparse
- IndexEntry::Identifier{""})}); // name
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ true, // sparse
+ IndexEntry::Identifier{""})}; // name
- const PlanCacheKey postIndexKey = planCache.computeKey(*cq);
+ const PlanCacheKey postIndexKey = makeKey(*cq, indexCores);
const auto postIndexStableKey = postIndexKey.getStableKey();
ASSERT_NE(preIndexKey, postIndexKey);
ASSERT_EQ(preIndexStableKey, postIndexStableKey);
@@ -2266,22 +2296,22 @@ TEST(PlanCacheTest, ComputeKeyNotEqualsArray) {
unique_ptr<CanonicalQuery> cqNeArray(canonicalize("{a: {$ne: [1]}}"));
unique_ptr<CanonicalQuery> cqNeScalar(canonicalize("{a: {$ne: 123}}"));
- const PlanCacheKey noIndexNeArrayKey = planCache.computeKey(*cqNeArray);
- const PlanCacheKey noIndexNeScalarKey = planCache.computeKey(*cqNeScalar);
+ const PlanCacheKey noIndexNeArrayKey = makeKey(*cqNeArray);
+ const PlanCacheKey noIndexNeScalarKey = makeKey(*cqNeScalar);
ASSERT_EQ(noIndexNeArrayKey.getIndexabilityDiscriminators(), "<0>");
ASSERT_EQ(noIndexNeScalarKey.getIndexabilityDiscriminators(), "<1>");
ASSERT_EQ(noIndexNeScalarKey.getStableKey(), noIndexNeArrayKey.getStableKey());
const auto keyPattern = BSON("a" << 1);
// Create a normal btree index. It will have a discriminator.
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""})}); // name
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""})}; // name*/
- const PlanCacheKey withIndexNeArrayKey = planCache.computeKey(*cqNeArray);
- const PlanCacheKey withIndexNeScalarKey = planCache.computeKey(*cqNeScalar);
+ const PlanCacheKey withIndexNeArrayKey = makeKey(*cqNeArray, indexCores);
+ const PlanCacheKey withIndexNeScalarKey = makeKey(*cqNeScalar, indexCores);
ASSERT_NE(noIndexNeArrayKey, withIndexNeArrayKey);
ASSERT_EQ(noIndexNeArrayKey.getStableKey(), withIndexNeArrayKey.getStableKey());
@@ -2300,22 +2330,22 @@ TEST(PlanCacheTest, ComputeKeyNinArray) {
unique_ptr<CanonicalQuery> cqNinArray(canonicalize("{a: {$nin: [123, [1]]}}"));
unique_ptr<CanonicalQuery> cqNinScalar(canonicalize("{a: {$nin: [123, 456]}}"));
- const PlanCacheKey noIndexNinArrayKey = planCache.computeKey(*cqNinArray);
- const PlanCacheKey noIndexNinScalarKey = planCache.computeKey(*cqNinScalar);
+ const PlanCacheKey noIndexNinArrayKey = makeKey(*cqNinArray);
+ const PlanCacheKey noIndexNinScalarKey = makeKey(*cqNinScalar);
ASSERT_EQ(noIndexNinArrayKey.getIndexabilityDiscriminators(), "<0>");
ASSERT_EQ(noIndexNinScalarKey.getIndexabilityDiscriminators(), "<1>");
ASSERT_EQ(noIndexNinScalarKey.getStableKey(), noIndexNinArrayKey.getStableKey());
const auto keyPattern = BSON("a" << 1);
// Create a normal btree index. It will have a discriminator.
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""})}); // name
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""})}; // name
- const PlanCacheKey withIndexNinArrayKey = planCache.computeKey(*cqNinArray);
- const PlanCacheKey withIndexNinScalarKey = planCache.computeKey(*cqNinScalar);
+ const PlanCacheKey withIndexNinArrayKey = makeKey(*cqNinArray, indexCores);
+ const PlanCacheKey withIndexNinScalarKey = makeKey(*cqNinScalar, indexCores);
// The unstable part of the key for $nin: [<array>] should have changed. The stable part,
// however, should not.
@@ -2341,21 +2371,19 @@ TEST(PlanCacheTest, PlanCacheKeyCollision) {
unique_ptr<CanonicalQuery> cqNeA(canonicalize("{$or: [{a: {$ne: 5}}, {a: {$ne: [12]}}]}"));
unique_ptr<CanonicalQuery> cqNeB(canonicalize("{$or: [{a: {$ne: [12]}}, {a: {$ne: 5}}]}"));
- const PlanCacheKey keyA = planCache.computeKey(*cqNeA);
- const PlanCacheKey keyB = planCache.computeKey(*cqNeB);
+ const PlanCacheKey keyA = makeKey(*cqNeA);
+ const PlanCacheKey keyB = makeKey(*cqNeB);
ASSERT_EQ(keyA.getStableKey(), keyB.getStableKey());
ASSERT_NE(keyA.getUnstablePart(), keyB.getUnstablePart());
-
const auto keyPattern = BSON("a" << 1);
// Create a normal btree index. It will have a discriminator.
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""})}); // name
-
- const PlanCacheKey keyAWithIndex = planCache.computeKey(*cqNeA);
- const PlanCacheKey keyBWithIndex = planCache.computeKey(*cqNeB);
+ std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""})}; // name
+ const PlanCacheKey keyAWithIndex = makeKey(*cqNeA, indexCores);
+ const PlanCacheKey keyBWithIndex = makeKey(*cqNeB, indexCores);
ASSERT_EQ(keyAWithIndex.getStableKey(), keyBWithIndex.getStableKey());
ASSERT_NE(keyAWithIndex.getUnstablePart(), keyBWithIndex.getUnstablePart());
@@ -2367,26 +2395,52 @@ TEST(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
long long previousSize, originalSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ auto key = makeKey(*cq);
+ PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{*cq};
// Verify that the plan cache size increases after adding new entry to cache.
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache.set(key,
+ qs->cacheData->clone(),
+ solns,
+ createDecision(1U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
// Verify that trying to set the same entry won't change the plan cache size.
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache.set(key,
+ qs->cacheData->clone(),
+ solns,
+ createDecision(1U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
// Verify that the plan cache size increases after updating the same entry with more solutions.
solns.push_back(qs.get());
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(2U), Date_t{}));
+ ASSERT_OK(planCache.set(key,
+ qs->cacheData->clone(),
+ solns,
+ createDecision(2U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
// Verify that the plan cache size decreases after updating the same entry with fewer solutions.
solns.erase(solns.end() - 1);
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache.set(key,
+ qs->cacheData->clone(),
+ solns,
+ createDecision(1U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
@@ -2398,8 +2452,13 @@ TEST(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
queryString[1] = 'b' + i;
unique_ptr<CanonicalQuery> query(canonicalize(queryString));
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(
- planCache.set(*query, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache.set(makeKey(*query),
+ qs->cacheData->clone(),
+ solns,
+ createDecision(1U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
}
@@ -2409,7 +2468,7 @@ TEST(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
queryString[1] = 'b' + i;
unique_ptr<CanonicalQuery> query(canonicalize(queryString));
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(planCache.remove(*query));
+ planCache.remove(makeKey(*query));
ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
}
// Verify that size is reset to the size when there is only entry.
@@ -2418,11 +2477,11 @@ TEST(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
// Verify that trying to remove a non-existing key won't change the plan cache size.
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
unique_ptr<CanonicalQuery> newQuery(canonicalize("{a: 1}"));
- ASSERT_NOT_OK(planCache.remove(*newQuery));
+ planCache.remove(makeKey(*newQuery));
ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
// Verify that the plan cache size goes back to original size when the entry is removed.
- ASSERT_OK(planCache.remove(*cq));
+ planCache.remove(key);
ASSERT_EQ(planCache.size(), 0U);
ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
}
@@ -2435,6 +2494,7 @@ TEST(PlanCacheTest, PlanCacheSizeWithEviction) {
std::vector<QuerySolution*> solns = {qs.get(), qs.get()};
long long originalSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
long long previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ auto key = makeKey(*cq);
// Add entries until plan cache is full and verify that the size keeps increasing.
std::string queryString = "{a: 1, c: 1}";
@@ -2443,50 +2503,91 @@ TEST(PlanCacheTest, PlanCacheSizeWithEviction) {
queryString[1]++;
unique_ptr<CanonicalQuery> query(canonicalize(queryString));
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(
- planCache.set(*query, qs->cacheData->clone(), solns, createDecision(2U), Date_t{}));
+ PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{*cq};
+ ASSERT_OK(planCache.set(makeKey(*query),
+ qs->cacheData->clone(),
+ solns,
+ createDecision(2U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
}
// Verify that adding entry of same size as evicted entry wouldn't change the plan cache size.
- queryString = "{k: 1, c: 1}";
- cq = unique_ptr<CanonicalQuery>(canonicalize(queryString));
- previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_EQ(planCache.size(), kCacheSize);
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(2U), Date_t{}));
- ASSERT_EQ(planCache.size(), kCacheSize);
- ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ {
+ queryString = "{k: 1, c: 1}";
+ cq = unique_ptr<CanonicalQuery>(canonicalize(queryString));
+ PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{*cq};
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_EQ(planCache.size(), kCacheSize);
+ ASSERT_OK(planCache.set(key,
+ qs->cacheData->clone(),
+ solns,
+ createDecision(2U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
+ ASSERT_EQ(planCache.size(), kCacheSize);
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ }
// Verify that adding entry with query bigger than the evicted entry's key should change the
// plan cache size.
- queryString = "{k: 1, c: 1, extraField: 1}";
- unique_ptr<CanonicalQuery> queryBiggerKey(canonicalize(queryString));
- previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(planCache.set(
- *queryBiggerKey, qs->cacheData->clone(), solns, createDecision(2U), Date_t{}));
- ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ {
+ queryString = "{k: 1, c: 1, extraField: 1}";
+ unique_ptr<CanonicalQuery> queryBiggerKey(canonicalize(queryString));
+ PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{*queryBiggerKey};
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache.set(makeKey(*queryBiggerKey),
+ qs->cacheData->clone(),
+ solns,
+ createDecision(2U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ }
// Verify that adding entry with query solutions larger than the evicted entry's query solutions
// should increase the plan cache size.
- queryString = "{l: 1, c: 1}";
- cq = unique_ptr<CanonicalQuery>(canonicalize(queryString));
- previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- solns.push_back(qs.get());
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(3U), Date_t{}));
- ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ {
+ queryString = "{l: 1, c: 1}";
+ cq = unique_ptr<CanonicalQuery>(canonicalize(queryString));
+ PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{*cq};
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ solns.push_back(qs.get());
+ ASSERT_OK(planCache.set(key,
+ qs->cacheData->clone(),
+ solns,
+ createDecision(3U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ }
// Verify that adding entry with query solutions smaller than the evicted entry's query
// solutions should decrease the plan cache size.
- queryString = "{m: 1, c: 1}";
- cq = unique_ptr<CanonicalQuery>(canonicalize(queryString));
- previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- solns = {qs.get()};
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
- ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ {
+ queryString = "{m: 1, c: 1}";
+ cq = unique_ptr<CanonicalQuery>(canonicalize(queryString));
+ PlanCacheLoggingCallbacks<PlanCacheKey, SolutionCacheData> callbacks{*cq};
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ solns = {qs.get()};
+ ASSERT_OK(planCache.set(key,
+ qs->cacheData->clone(),
+ solns,
+ createDecision(1U),
+ Date_t{},
+ boost::none /* worksGrowthCoefficient */,
+ &callbacks));
+ ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
- // clear() should reset the size.
- planCache.clear();
- ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
+ // clear() should reset the size.
+ planCache.clear();
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
+ }
}
TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
@@ -2504,13 +2605,13 @@ TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
queryString[1] = 'b' + i;
unique_ptr<CanonicalQuery> query(canonicalize(queryString));
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(
- planCache1.set(*query, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache1.set(
+ makeKey(*query), qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(
- planCache2.set(*query, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache2.set(
+ makeKey(*query), qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
}
@@ -2520,7 +2621,7 @@ TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
queryString[1] = 'b' + i;
unique_ptr<CanonicalQuery> query(canonicalize(queryString));
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(planCache1.remove(*query));
+ planCache1.remove(makeKey(*query));
ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
}
@@ -2529,7 +2630,8 @@ TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
{
PlanCache planCache(5000);
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
- ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache.set(
+ makeKey(*cq), qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
}
@@ -2547,28 +2649,24 @@ TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
}
TEST(PlanCacheTest, DifferentQueryEngines) {
+ const auto keyPattern = BSON("a" << 1);
+ const std::vector<CoreIndexInfo> indexCores = {
+ CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""})}; // name
+
// Helper to construct a plan cache key given the 'enableSlotBasedExecutionEngine' flag.
- auto constructPlanCacheKey = [](const PlanCache& pc,
- bool enableSlotBasedExecutionEngine) -> PlanCacheKey {
+ auto constructPlanCacheKey = [&](bool enableSlotBasedExecutionEngine) {
RAIIServerParameterControllerForTest controller{
"internalQueryEnableSlotBasedExecutionEngine", enableSlotBasedExecutionEngine};
const auto queryStr = "{a: 0}";
unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
- return pc.computeKey(*cq);
+ return makeKey(*cq, indexCores);
};
- PlanCache planCache(5000);
- const auto keyPattern = BSON("a" << 1);
-
- // Create a normal btree index. It will have a discriminator.
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{""})}); // name
-
- const auto classicEngineKey = constructPlanCacheKey(planCache, false);
- const auto slotBasedExecutionEngineKey = constructPlanCacheKey(planCache, true);
+ const auto classicEngineKey = constructPlanCacheKey(false);
+ const auto slotBasedExecutionEngineKey = constructPlanCacheKey(true);
// Check that the two plan cache keys are not equal because the plans were created under
// different engines.
diff --git a/src/mongo/db/query/plan_executor_factory.cpp b/src/mongo/db/query/plan_executor_factory.cpp
index 7384e6ce716..bd11182daa2 100644
--- a/src/mongo/db/query/plan_executor_factory.cpp
+++ b/src/mongo/db/query/plan_executor_factory.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/pipeline/plan_executor_pipeline.h"
#include "mongo/db/query/plan_executor_impl.h"
#include "mongo/db/query/plan_executor_sbe.h"
+#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/util/make_data_structure.h"
#include "mongo/logv2/log.h"
diff --git a/src/mongo/db/query/plan_explainer.h b/src/mongo/db/query/plan_explainer.h
index b521cd6c9ff..922210fc80d 100644
--- a/src/mongo/db/query/plan_explainer.h
+++ b/src/mongo/db/query/plan_explainer.h
@@ -119,7 +119,8 @@ public:
* the plan cache.
*/
virtual std::vector<PlanStatsDetails> getCachedPlanStats(
- const PlanCacheEntry::DebugInfo& debugInfo, ExplainOptions::Verbosity verbosity) const = 0;
+ const plan_cache_debug_info::DebugInfo& debugInfo,
+ ExplainOptions::Verbosity verbosity) const = 0;
/**
* Returns an object containing what query knobs the planner hit during plan enumeration.
diff --git a/src/mongo/db/query/plan_explainer_impl.cpp b/src/mongo/db/query/plan_explainer_impl.cpp
index 9720f628292..04cd97980e9 100644
--- a/src/mongo/db/query/plan_explainer_impl.cpp
+++ b/src/mongo/db/query/plan_explainer_impl.cpp
@@ -794,7 +794,7 @@ std::vector<PlanExplainer::PlanStatsDetails> PlanExplainerImpl::getRejectedPlans
}
std::vector<PlanExplainer::PlanStatsDetails> PlanExplainerImpl::getCachedPlanStats(
- const PlanCacheEntry::DebugInfo& debugInfo, ExplainOptions::Verbosity verbosity) const {
+ const plan_cache_debug_info::DebugInfo& debugInfo, ExplainOptions::Verbosity verbosity) const {
const auto& decision = *debugInfo.decision;
std::vector<PlanStatsDetails> res;
auto winningPlanIdx = getWinningPlanIdx(_root);
diff --git a/src/mongo/db/query/plan_explainer_impl.h b/src/mongo/db/query/plan_explainer_impl.h
index 3ecdaa83588..73ef81ae825 100644
--- a/src/mongo/db/query/plan_explainer_impl.h
+++ b/src/mongo/db/query/plan_explainer_impl.h
@@ -56,7 +56,7 @@ public:
PlanStatsDetails getWinningPlanTrialStats() const final;
std::vector<PlanStatsDetails> getRejectedPlansStats(
ExplainOptions::Verbosity verbosity) const final;
- std::vector<PlanStatsDetails> getCachedPlanStats(const PlanCacheEntry::DebugInfo&,
+ std::vector<PlanStatsDetails> getCachedPlanStats(const plan_cache_debug_info::DebugInfo&,
ExplainOptions::Verbosity) const final;
private:
diff --git a/src/mongo/db/query/plan_explainer_sbe.cpp b/src/mongo/db/query/plan_explainer_sbe.cpp
index 442411e0123..1a6fff86019 100644
--- a/src/mongo/db/query/plan_explainer_sbe.cpp
+++ b/src/mongo/db/query/plan_explainer_sbe.cpp
@@ -540,7 +540,7 @@ std::vector<PlanExplainer::PlanStatsDetails> PlanExplainerSBE::getRejectedPlansS
}
std::vector<PlanExplainer::PlanStatsDetails> PlanExplainerSBE::getCachedPlanStats(
- const PlanCacheEntry::DebugInfo& debugInfo, ExplainOptions::Verbosity verbosity) const {
+ const plan_cache_debug_info::DebugInfo& debugInfo, ExplainOptions::Verbosity verbosity) const {
const auto& decision = *debugInfo.decision;
std::vector<PlanStatsDetails> res;
diff --git a/src/mongo/db/query/plan_explainer_sbe.h b/src/mongo/db/query/plan_explainer_sbe.h
index 08b05e97aa5..bff25e5db60 100644
--- a/src/mongo/db/query/plan_explainer_sbe.h
+++ b/src/mongo/db/query/plan_explainer_sbe.h
@@ -63,7 +63,7 @@ public:
PlanStatsDetails getWinningPlanTrialStats() const final;
std::vector<PlanStatsDetails> getRejectedPlansStats(
ExplainOptions::Verbosity verbosity) const final;
- std::vector<PlanStatsDetails> getCachedPlanStats(const PlanCacheEntry::DebugInfo&,
+ std::vector<PlanStatsDetails> getCachedPlanStats(const plan_cache_debug_info::DebugInfo&,
ExplainOptions::Verbosity) const final;
private:
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index fc1c8b7cd51..600f106f25f 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/matcher/expression_text.h"
#include "mongo/db/pipeline/document_source_group.h"
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/classic_plan_cache.h"
#include "mongo/db/query/collation/collation_index_key.h"
#include "mongo/db/query/collation/collator_interface.h"
#include "mongo/db/query/plan_cache.h"
@@ -58,6 +59,36 @@
#include "mongo/logv2/log.h"
namespace mongo {
+namespace log_detail {
+void logSubplannerIndexEntry(const IndexEntry& entry, size_t childIndex) {
+ LOGV2_DEBUG(20598,
+ 5,
+ "Subplanner: index number and entry",
+ "indexNumber"_attr = childIndex,
+ "indexEntry"_attr = entry);
+}
+
+void logCachedPlanFound(size_t numChildren, size_t childIndex) {
+ LOGV2_DEBUG(20599,
+ 5,
+ "Subplanner: cached plan found",
+ "childIndex"_attr = childIndex,
+ "numChildren"_attr = numChildren);
+}
+
+void logCachedPlanNotFound(size_t numChildren, size_t childIndex) {
+ LOGV2_DEBUG(20600,
+ 5,
+ "Subplanner: planning child",
+ "childIndex"_attr = childIndex,
+ "numChildren"_attr = numChildren);
+}
+
+void logNumberOfSolutions(size_t numSolutions) {
+ LOGV2_DEBUG(20601, 5, "Subplanner: number of solutions", "numSolutions"_attr = numSolutions);
+}
+} // namespace log_detail
+
namespace {
/**
* On success, applies the index tags from 'branchCacheData' (which represent the winning
@@ -479,7 +510,7 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
invariant(cachedSoln.cachedPlan);
// A query not suitable for caching should not have made its way into the cache.
- invariant(PlanCache::shouldCacheQuery(query));
+ invariant(shouldCacheQuery(query));
// Look up winning solution in cached solution's array.
const auto& winnerCacheData = *cachedSoln.cachedPlan;
@@ -1143,95 +1174,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::planForMul
return {std::move(out)};
}
-StatusWith<QueryPlanner::SubqueriesPlanningResult> QueryPlanner::planSubqueries(
- OperationContext* opCtx,
- const CollectionPtr& collection,
- const PlanCache* planCache,
- const CanonicalQuery& query,
- const QueryPlannerParams& params) {
- invariant(query.root()->matchType() == MatchExpression::OR);
- invariant(query.root()->numChildren(), "Cannot plan subqueries for an $or with no children");
-
- SubqueriesPlanningResult planningResult{query.root()->shallowClone()};
- for (size_t i = 0; i < params.indices.size(); ++i) {
- const IndexEntry& ie = params.indices[i];
- const auto insertionRes = planningResult.indexMap.insert(std::make_pair(ie.identifier, i));
- // Be sure the key was not already in the map.
- invariant(insertionRes.second);
- LOGV2_DEBUG(20598,
- 5,
- "Subplanner: index number and entry",
- "indexNumber"_attr = i,
- "indexEntry"_attr = ie);
- }
-
- for (size_t i = 0; i < planningResult.orExpression->numChildren(); ++i) {
- // We need a place to shove the results from planning this branch.
- planningResult.branches.push_back(
- std::make_unique<SubqueriesPlanningResult::BranchPlanningResult>());
- auto branchResult = planningResult.branches.back().get();
- auto orChild = planningResult.orExpression->getChild(i);
-
- // Turn the i-th child into its own query.
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, query, orChild);
- if (!statusWithCQ.isOK()) {
- str::stream ss;
- ss << "Can't canonicalize subchild " << orChild->debugString() << " "
- << statusWithCQ.getStatus().reason();
- return Status(ErrorCodes::BadValue, ss);
- }
-
- branchResult->canonicalQuery = std::move(statusWithCQ.getValue());
-
- // Plan the i-th child. We might be able to find a plan for the i-th child in the plan
- // cache. If there's no cached plan, then we generate and rank plans using the MPS.
-
- // Populate branchResult->cachedSolution if an active cachedSolution entry exists.
- if (planCache && planCache->shouldCacheQuery(*branchResult->canonicalQuery)) {
- auto planCacheKey = planCache->computeKey(*branchResult->canonicalQuery);
- if (auto cachedSol = planCache->getCacheEntryIfActive(planCacheKey)) {
- // We have a CachedSolution. Store it for later.
- LOGV2_DEBUG(20599,
- 5,
- "Subplanner: cached plan found",
- "childIndex"_attr = i,
- "numChildren"_attr = planningResult.orExpression->numChildren());
-
- branchResult->cachedSolution = std::move(cachedSol);
- }
- }
-
- if (!branchResult->cachedSolution) {
- // No CachedSolution found. We'll have to plan from scratch.
- LOGV2_DEBUG(20600,
- 5,
- "Subplanner: planning child",
- "childIndex"_attr = i,
- "numChildren"_attr = planningResult.orExpression->numChildren());
-
- // We don't set NO_TABLE_SCAN because peeking at the cache data will keep us from
- // considering any plan that's a collscan.
- invariant(branchResult->solutions.empty());
- auto statusWithMultiPlanSolns =
- QueryPlanner::planForMultiPlanner(*branchResult->canonicalQuery, params);
- if (!statusWithMultiPlanSolns.isOK()) {
- str::stream ss;
- ss << "Can't plan for subchild " << branchResult->canonicalQuery->toString() << " "
- << statusWithMultiPlanSolns.getStatus().reason();
- return Status(ErrorCodes::BadValue, ss);
- }
- branchResult->solutions = std::move(statusWithMultiPlanSolns.getValue());
-
- LOGV2_DEBUG(20601,
- 5,
- "Subplanner: number of solutions",
- "numSolutions"_attr = branchResult->solutions.size());
- }
- }
-
- return std::move(planningResult);
-}
-
StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::choosePlanForSubqueries(
const CanonicalQuery& query,
const QueryPlannerParams& params,
diff --git a/src/mongo/db/query/query_planner.h b/src/mongo/db/query/query_planner.h
index 6d4fc4d1500..817afff0dea 100644
--- a/src/mongo/db/query/query_planner.h
+++ b/src/mongo/db/query/query_planner.h
@@ -36,6 +36,16 @@
#include "mongo/db/query/query_solution.h"
namespace mongo {
+// The logging facility enforces the rule that logging should not be done in a header file. Since
+// template classes and functions below must be defined in the header file and since they use the
+// logging facility, we have to define the helper functions below to perform the actual logging
+// operation from template code.
+namespace log_detail {
+void logSubplannerIndexEntry(const IndexEntry& entry, size_t childIndex);
+void logCachedPlanFound(size_t numChildren, size_t childIndex);
+void logCachedPlanNotFound(size_t numChildren, size_t childIndex);
+void logNumberOfSolutions(size_t numSolutions);
+} // namespace log_detail
class Collection;
class CollectionPtr;
@@ -119,14 +129,21 @@ public:
const CachedSolution& cachedSoln);
/**
- * Plan each branch of the rooted $or query independently, and store the resulting
+ * Plan each branch of the rooted $or query independently, and return the resulting
* lists of query solutions in 'SubqueriesPlanningResult'.
+ *
+ * The 'createPlanCacheKey' callback is used to create a plan cache key of the specified
+ * 'KeyType' for each of the branches to look up the plan in the 'planCache'.
*/
- static StatusWith<SubqueriesPlanningResult> planSubqueries(OperationContext* opCtx,
- const CollectionPtr& collection,
- const PlanCache* planCache,
- const CanonicalQuery& query,
- const QueryPlannerParams& params);
+ template <typename KeyType, typename... Args>
+ static StatusWith<SubqueriesPlanningResult> planSubqueries(
+ OperationContext* opCtx,
+ const PlanCacheBase<KeyType, Args...>* planCache,
+ std::function<KeyType(const CanonicalQuery& cq, const CollectionPtr& coll)>
+ createPlanCacheKey,
+ const CollectionPtr& collection,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params);
/**
* Generates and returns the index tag tree that will be inserted into the plan cache. This data
@@ -174,4 +191,80 @@ public:
std::function<StatusWith<std::unique_ptr<QuerySolution>>(
CanonicalQuery* cq, std::vector<std::unique_ptr<QuerySolution>>)> multiplanCallback);
};
+
+template <typename KeyType, typename... Args>
+StatusWith<QueryPlanner::SubqueriesPlanningResult> QueryPlanner::planSubqueries(
+ OperationContext* opCtx,
+ const PlanCacheBase<KeyType, Args...>* planCache,
+ std::function<KeyType(const CanonicalQuery& cq, const CollectionPtr& coll)> createPlanCacheKey,
+ const CollectionPtr& collection,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params) {
+ invariant(query.root()->matchType() == MatchExpression::OR);
+ invariant(query.root()->numChildren(), "Cannot plan subqueries for an $or with no children");
+
+ SubqueriesPlanningResult planningResult{query.root()->shallowClone()};
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ const IndexEntry& ie = params.indices[i];
+ const auto insertionRes = planningResult.indexMap.insert(std::make_pair(ie.identifier, i));
+ // Be sure the key was not already in the map.
+ invariant(insertionRes.second);
+ log_detail::logSubplannerIndexEntry(ie, i);
+ }
+
+ for (size_t i = 0; i < planningResult.orExpression->numChildren(); ++i) {
+ // We need a place to shove the results from planning this branch.
+ planningResult.branches.push_back(
+ std::make_unique<SubqueriesPlanningResult::BranchPlanningResult>());
+ auto branchResult = planningResult.branches.back().get();
+ auto orChild = planningResult.orExpression->getChild(i);
+
+ // Turn the i-th child into its own query.
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, query, orChild);
+ if (!statusWithCQ.isOK()) {
+ str::stream ss;
+ ss << "Can't canonicalize subchild " << orChild->debugString() << " "
+ << statusWithCQ.getStatus().reason();
+ return Status(ErrorCodes::BadValue, ss);
+ }
+
+ branchResult->canonicalQuery = std::move(statusWithCQ.getValue());
+
+ // Plan the i-th child. We might be able to find a plan for the i-th child in the plan
+ // cache. If there's no cached plan, then we generate and rank plans using the MPS.
+
+ // Populate branchResult->cachedSolution if an active cachedSolution entry exists.
+ if (planCache && shouldCacheQuery(*branchResult->canonicalQuery)) {
+ if (auto cachedSol = planCache->getCacheEntryIfActive(
+ createPlanCacheKey(*branchResult->canonicalQuery, collection))) {
+ // We have a CachedSolution. Store it for later.
+ log_detail::logCachedPlanFound(planningResult.orExpression->numChildren(), i);
+
+ branchResult->cachedSolution = std::move(cachedSol);
+ }
+ }
+
+ if (!branchResult->cachedSolution) {
+ // No CachedSolution found. We'll have to plan from scratch.
+ log_detail::logCachedPlanNotFound(planningResult.orExpression->numChildren(), i);
+
+ // We don't set NO_TABLE_SCAN because peeking at the cache data will keep us from
+ // considering any plan that's a collscan.
+ invariant(branchResult->solutions.empty());
+ auto statusWithMultiPlanSolns =
+ QueryPlanner::planForMultiPlanner(*branchResult->canonicalQuery, params);
+ if (!statusWithMultiPlanSolns.isOK()) {
+ str::stream ss;
+ ss << "Can't plan for subchild " << branchResult->canonicalQuery->toString() << " "
+ << statusWithMultiPlanSolns.getStatus().reason();
+ return Status(ErrorCodes::BadValue, ss);
+ }
+ branchResult->solutions = std::move(statusWithMultiPlanSolns.getValue());
+
+ log_detail::logNumberOfSolutions(branchResult->solutions.size());
+ }
+ }
+
+ return std::move(planningResult);
+}
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h
index b18e32d0f81..bd810e67760 100644
--- a/src/mongo/db/query/query_planner_test_fixture.h
+++ b/src/mongo/db/query/query_planner_test_fixture.h
@@ -39,6 +39,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/query/collation/collator_interface.h"
+#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/query_test_service_context.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/query/sbe_cached_solution_planner.cpp b/src/mongo/db/query/sbe_cached_solution_planner.cpp
index 1ce175b001e..c5970636a1d 100644
--- a/src/mongo/db/query/sbe_cached_solution_planner.cpp
+++ b/src/mongo/db/query/sbe_cached_solution_planner.cpp
@@ -34,6 +34,7 @@
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/query/explain.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/sbe_multi_planner.h"
#include "mongo/db/query/stage_builder_util.h"
@@ -132,7 +133,7 @@ CandidatePlans CachedSolutionPlanner::replan(bool shouldCache, std::string reaso
if (shouldCache) {
// Deactivate the current cache entry.
auto cache = CollectionQueryInfo::get(_collection).getPlanCache();
- cache->deactivate(_cq);
+ cache->deactivate(plan_cache_key_factory::make<mongo::PlanCacheKey>(_cq, _collection));
}
auto buildExecutableTree = [&](const QuerySolution& sol) {
diff --git a/src/mongo/db/query/sbe_cached_solution_planner.h b/src/mongo/db/query/sbe_cached_solution_planner.h
index 18767bcc07c..8381d453672 100644
--- a/src/mongo/db/query/sbe_cached_solution_planner.h
+++ b/src/mongo/db/query/sbe_cached_solution_planner.h
@@ -30,6 +30,7 @@
#pragma once
#include "mongo/db/query/all_indices_required_checker.h"
+#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/sbe_plan_ranker.h"
#include "mongo/db/query/sbe_runtime_planner.h"
diff --git a/src/mongo/db/query/sbe_sub_planner.cpp b/src/mongo/db/query/sbe_sub_planner.cpp
index 28f2b401de7..a0ee086864f 100644
--- a/src/mongo/db/query/sbe_sub_planner.cpp
+++ b/src/mongo/db/query/sbe_sub_planner.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/query/sbe_sub_planner.h"
#include "mongo/db/query/collection_query_info.h"
+#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/sbe_multi_planner.h"
#include "mongo/db/query/stage_builder_util.h"
@@ -40,11 +41,17 @@ namespace mongo::sbe {
CandidatePlans SubPlanner::plan(
std::vector<std::unique_ptr<QuerySolution>> solutions,
std::vector<std::pair<std::unique_ptr<PlanStage>, stage_builder::PlanStageData>> roots) {
+ std::function<mongo::PlanCacheKey(const CanonicalQuery& cq, const CollectionPtr& coll)>
+ createPlanCacheKey = [](const CanonicalQuery& cq, const CollectionPtr& coll) {
+ return plan_cache_key_factory::make<mongo::PlanCacheKey>(cq, coll);
+ };
+
// Plan each branch of the $or.
auto subplanningStatus =
QueryPlanner::planSubqueries(_opCtx,
- _collection,
CollectionQueryInfo::get(_collection).getPlanCache(),
+ createPlanCacheKey,
+ _collection,
_cq,
_queryParams);
if (!subplanningStatus.isOK()) {
diff --git a/src/mongo/db/query/sbe_sub_planner.h b/src/mongo/db/query/sbe_sub_planner.h
index 272c9cb2e94..e9c03d3db4b 100644
--- a/src/mongo/db/query/sbe_sub_planner.h
+++ b/src/mongo/db/query/sbe_sub_planner.h
@@ -30,6 +30,7 @@
#pragma once
#include "mongo/db/query/all_indices_required_checker.h"
+#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/sbe_plan_ranker.h"
#include "mongo/db/query/sbe_runtime_planner.h"