summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/core/plan_cache_list_plans.js21
-rw-r--r--jstests/core/profile_find.js9
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp6
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp20
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp79
-rw-r--r--src/mongo/db/exec/cached_plan.cpp13
-rw-r--r--src/mongo/db/exec/cached_plan.h2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp4
-rw-r--r--src/mongo/db/exec/subplan.cpp10
-rw-r--r--src/mongo/db/query/get_executor.cpp7
-rw-r--r--src/mongo/db/query/plan_cache.cpp188
-rw-r--r--src/mongo/db/query/plan_cache.h101
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp347
-rw-r--r--src/mongo/db/query/query_knobs.cpp11
-rw-r--r--src/mongo/db/query/query_knobs.h7
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp392
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp296
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp4
18 files changed, 1165 insertions, 352 deletions
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index caa1cc9cd55..9c34205c0c4 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -13,14 +13,13 @@
let t = db.jstests_plan_cache_list_plans;
t.drop();
- // Utility function to list plans for a query.
- function getPlans(query, sort, projection) {
+ function getPlansForCacheEntry(query, sort, projection) {
let key = {query: query, sort: sort, projection: projection};
let res = t.runCommand('planCacheListPlans', key);
assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
assert(res.hasOwnProperty('plans'),
'plans missing from planCacheListPlans(' + tojson(key, '', true) + ') result');
- return res.plans;
+ return res;
}
// Assert that timeOfCreation exists in the cache entry. The difference between the current time
@@ -48,8 +47,8 @@
t.ensureIndex({a: 1, b: 1});
// Invalid key should be an error.
- assert.eq(0,
- getPlans({unknownfield: 1}, {}, {}),
+ assert.eq([],
+ getPlansForCacheEntry({unknownfield: 1}, {}, {}).plans,
'planCacheListPlans should return empty results on unknown query shape');
// Create a cache entry.
@@ -61,7 +60,12 @@
checkTimeOfCreation({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1}, now);
// Retrieve plans for valid cache entry.
- let plans = getPlans({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1});
+ let entry = getPlansForCacheEntry({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1});
+ assert(entry.hasOwnProperty('works'),
+ 'works missing from planCacheListPlans() result ' + tojson(entry));
+ assert.eq(entry.isActive, false);
+
+ let plans = entry.plans;
assert.eq(2, plans.length, 'unexpected number of plans cached for query');
// Print every plan
@@ -90,7 +94,10 @@
now = (new Date()).getTime();
checkTimeOfCreation({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1}, now);
- plans = getPlans({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1});
+ entry = getPlansForCacheEntry({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1});
+ assert(entry.hasOwnProperty('works'), 'works missing from planCacheListPlans() result');
+ assert.eq(entry.isActive, true);
+ plans = entry.plans;
// This should be obvious but feedback is available only for the first (winning) plan.
print('planCacheListPlans result (after adding indexes and completing 20 executions):');
diff --git a/jstests/core/profile_find.js b/jstests/core/profile_find.js
index a87a84c34ce..6c89781bd33 100644
--- a/jstests/core/profile_find.js
+++ b/jstests/core/profile_find.js
@@ -111,7 +111,16 @@
assert.writeOK(coll.insert({a: 5, b: i}));
assert.writeOK(coll.insert({a: i, b: 10}));
}
+
+ // Until we get the failpoint described in the above comment (regarding SERVER-23620), we must
+ // run the query twice. The first time will create an inactive cache entry. The second run will
+ // take the same number of works, and create an active cache entry.
+ assert.neq(coll.findOne({a: 5, b: 15}), null);
assert.neq(coll.findOne({a: 5, b: 15}), null);
+
+ // Run a query with the same shape, but with different parameters. The plan cached for the
+ // query above will perform poorly (since the selectivities are different) and we will be
+ // forced to replan.
assert.neq(coll.findOne({a: 15, b: 10}), null);
profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index da0b0b919d0..46423af1ed3 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -103,7 +103,7 @@ vector<BSONObj> getFilters(const QuerySettings& querySettings) {
/**
* Utility function to create a PlanRankingDecision
*/
-PlanRankingDecision* createDecision(size_t numPlans) {
+std::unique_ptr<PlanRankingDecision> createDecision(size_t numPlans) {
unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
@@ -113,7 +113,7 @@ PlanRankingDecision* createDecision(size_t numPlans) {
why->scores.push_back(0U);
why->candidateOrder.push_back(i);
}
- return why.release();
+ return why;
}
/**
@@ -140,7 +140,7 @@ void addQueryShapeToPlanCache(OperationContext* opCtx,
qs.cacheData->tree.reset(new PlanCacheIndexTree());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- ASSERT_OK(planCache->add(*cq,
+ ASSERT_OK(planCache->set(*cq,
solns,
createDecision(1U),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 703526343e7..9237700b70b 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -310,7 +310,7 @@ Status PlanCacheClear::clear(OperationContext* opCtx,
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- if (!planCache->contains(*cq)) {
+ if (planCache->get(*cq).state == PlanCache::CacheEntryState::kNotPresent) {
// Log if asked to clear non-existent query shape.
LOG(1) << ns << ": query shape doesn't exist in PlanCache - "
<< redact(cq->getQueryObj()) << "(sort: " << cq->getQueryRequest().getSort()
@@ -381,20 +381,18 @@ Status PlanCacheListPlans::list(OperationContext* opCtx,
}
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- if (!planCache.contains(*cq)) {
+ auto lookupResult = planCache.getEntry(*cq);
+
+ if (lookupResult == ErrorCodes::NoSuchKey) {
// Return empty plans in results if query shape does not
// exist in plan cache.
BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
plansBuilder.doneFast();
return Status::OK();
+ } else if (!lookupResult.isOK()) {
+ return lookupResult.getStatus();
}
-
- PlanCacheEntry* entryRaw;
- Status result = planCache.getEntry(*cq, &entryRaw);
- if (!result.isOK()) {
- return result;
- }
- unique_ptr<PlanCacheEntry> entry(entryRaw);
+ std::unique_ptr<PlanCacheEntry> entry = std::move(lookupResult.getValue());
BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
size_t numPlans = entry->plannerData.size();
@@ -443,6 +441,10 @@ Status PlanCacheListPlans::list(OperationContext* opCtx,
// Append the time the entry was inserted into the plan cache.
bob->append("timeOfCreation", entry->timeOfCreation);
+ // Append whether or not the entry is active.
+ bob->append("isActive", entry->isActive);
+ bob->append("works", static_cast<long long>(entry->works));
+
return Status::OK();
}
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 43d0f7d3830..89f0cc8b0f0 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -112,17 +112,18 @@ SolutionCacheData* createSolutionCacheData() {
/**
* Utility function to create a PlanRankingDecision
*/
-PlanRankingDecision* createDecision(size_t numPlans) {
+std::unique_ptr<PlanRankingDecision> createDecision(size_t numPlans, size_t works = 0) {
unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
auto stats = stdx::make_unique<PlanStageStats>(common, STAGE_COLLSCAN);
stats->specific.reset(new CollectionScanStats());
why->stats.push_back(std::move(stats));
+ why->stats[i]->common.works = works;
why->scores.push_back(0U);
why->candidateOrder.push_back(i);
}
- return why.release();
+ return why;
}
TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) {
@@ -151,7 +152,7 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cq,
+ ASSERT_OK(planCache.set(*cq,
solns,
createDecision(1U),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
@@ -186,7 +187,7 @@ TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cq,
+ ASSERT_OK(planCache.set(*cq,
solns,
createDecision(1U),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
@@ -327,11 +328,11 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cqA,
+ ASSERT_OK(planCache.set(*cqA,
solns,
createDecision(1U),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
- ASSERT_OK(planCache.add(*cqB,
+ ASSERT_OK(planCache.set(*cqB,
solns,
createDecision(1U),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
@@ -390,11 +391,11 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cq,
+ ASSERT_OK(planCache.set(*cq,
solns,
createDecision(1U),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
- ASSERT_OK(planCache.add(*cqCollation,
+ ASSERT_OK(planCache.set(*cqCollation,
solns,
createDecision(1U),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
@@ -467,14 +468,12 @@ BSONObj getPlan(const BSONElement& elt) {
return obj.getOwned();
}
-/**
- * Utility function to get list of plan IDs for a query in the cache.
- */
-vector<BSONObj> getPlans(const PlanCache& planCache,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& projection,
- const BSONObj& collation) {
+BSONObj getCmdResult(const PlanCache& planCache,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection,
+ const BSONObj& collation) {
+
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
@@ -489,6 +488,22 @@ vector<BSONObj> getPlans(const PlanCache& planCache,
BSONObj cmdObj = cmdObjBuilder.obj();
ASSERT_OK(PlanCacheListPlans::list(opCtx.get(), planCache, nss.ns(), cmdObj, &bob));
BSONObj resultObj = bob.obj();
+
+ return resultObj;
+}
+
+/**
+ * Utility function to get list of plan IDs for a query in the cache.
+ */
+vector<BSONObj> getPlans(const PlanCache& planCache,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection,
+ const BSONObj& collation) {
+ BSONObj resultObj = getCmdResult(planCache, query, sort, projection, collation);
+ ASSERT_TRUE(resultObj.hasField("isActive"));
+ ASSERT_TRUE(resultObj.hasField("works"));
+
BSONElement plansElt = resultObj.getField("plans");
ASSERT_EQUALS(plansElt.type(), mongo::Array);
vector<BSONElement> planEltArray = plansElt.Array();
@@ -539,17 +554,20 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cq,
+ ASSERT_OK(planCache.set(*cq,
solns,
- createDecision(1U),
+ createDecision(1U, 123),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
- vector<BSONObj> plans = getPlans(planCache,
+ BSONObj resultObj = getCmdResult(planCache,
cq->getQueryObj(),
cq->getQueryRequest().getSort(),
cq->getQueryRequest().getProj(),
cq->getQueryRequest().getCollation());
- ASSERT_EQUALS(plans.size(), 1U);
+
+ ASSERT_EQ(resultObj["plans"].Array().size(), 1u);
+ ASSERT_EQ(resultObj.getBoolField("isActive"), false);
+ ASSERT_EQ(resultObj.getIntField("works"), 123L);
}
TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
@@ -571,17 +589,20 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cq,
+ ASSERT_OK(planCache.set(*cq,
solns,
- createDecision(2U),
+ createDecision(2U, 333),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
- vector<BSONObj> plans = getPlans(planCache,
+ BSONObj resultObj = getCmdResult(planCache,
cq->getQueryObj(),
cq->getQueryRequest().getSort(),
cq->getQueryRequest().getProj(),
cq->getQueryRequest().getCollation());
- ASSERT_EQUALS(plans.size(), 2U);
+
+ ASSERT_EQ(resultObj["plans"].Array().size(), 2u);
+ ASSERT_EQ(resultObj.getBoolField("isActive"), false);
+ ASSERT_EQ(resultObj.getIntField("works"), 333);
}
@@ -611,14 +632,14 @@ TEST(PlanCacheCommandsTest, planCacheListPlansCollation) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cq,
+ ASSERT_OK(planCache.set(*cq,
solns,
createDecision(1U),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
std::vector<QuerySolution*> twoSolns;
twoSolns.push_back(&qs);
twoSolns.push_back(&qs);
- ASSERT_OK(planCache.add(*cqCollation,
+ ASSERT_OK(planCache.set(*cqCollation,
twoSolns,
createDecision(2U),
opCtx->getServiceContext()->getPreciseClockSource()->now()));
@@ -658,11 +679,9 @@ TEST(PlanCacheCommandsTest, planCacheListPlansTimeOfCreationIsCorrect) {
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
auto now = opCtx->getServiceContext()->getPreciseClockSource()->now();
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U), now));
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U), now));
- PlanCacheEntry* out;
- ASSERT_OK(planCache.getEntry(*cq, &out));
- unique_ptr<PlanCacheEntry> entry(out);
+ auto entry = unittest::assertGet(planCache.getEntry(*cq));
ASSERT_EQ(entry->timeOfCreation, now);
}
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 071f6cf7b25..33a9759f9a8 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -200,6 +200,12 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
_specificStats.replanned = true;
+ if (shouldCache) {
+ // Deactivate the current cache entry.
+ PlanCache* cache = _collection->infoCache()->getPlanCache();
+ cache->deactivate(*_canonicalQuery);
+ }
+
// Use the query planning module to plan the whole query.
auto statusWithSolutions = QueryPlanner::plan(*_canonicalQuery, _plannerParams);
if (!statusWithSolutions.isOK()) {
@@ -220,13 +226,6 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
}
if (1 == solutions.size()) {
- // If there's only one solution, it won't get cached. Make sure to evict the existing
- // cache entry if requested by the caller.
- if (shouldCache) {
- PlanCache* cache = _collection->infoCache()->getPlanCache();
- cache->remove(*_canonicalQuery).transitional_ignore();
- }
-
PlanStage* newRoot;
// Only one possible plan. Build the stages from the solution.
verify(StageBuilder::build(
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 658312db4a0..fa2f80a5e48 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -103,7 +103,7 @@ private:
* We fallback to a new plan if updatePlanCache() tells us that the performance was worse
* than anticipated during the trial period.
*
- * We only write the result of re-planning to the plan cache if 'shouldCache' is true.
+ * We only modify the plan cache if 'shouldCache' is true.
*/
Status replan(PlanYieldPolicy* yieldPolicy, bool shouldCache);
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 5dc4bd0b863..2b026af10a2 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -326,9 +326,9 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
if (validSolutions) {
_collection->infoCache()
->getPlanCache()
- ->add(*_query,
+ ->set(*_query,
solutions,
- ranking.release(),
+ std::move(ranking),
getOpCtx()->getServiceContext()->getPreciseClockSource()->now())
.transitional_ignore();
}
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 36526c713a7..532dea64044 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -133,17 +133,13 @@ Status SubplanStage::planSubqueries() {
// Plan the i-th child. We might be able to find a plan for the i-th child in the plan
// cache. If there's no cached plan, then we generate and rank plans using the MPS.
- CachedSolution* rawCS;
- if (PlanCache::shouldCacheQuery(*branchResult->canonicalQuery) &&
- _collection->infoCache()
- ->getPlanCache()
- ->get(*branchResult->canonicalQuery, &rawCS)
- .isOK()) {
+ const auto* planCache = _collection->infoCache()->getPlanCache();
+ if (auto cachedSol = planCache->getCacheEntryIfCacheable(*branchResult->canonicalQuery)) {
// We have a CachedSolution. Store it for later.
LOG(5) << "Subplanner: cached plan found for child " << i << " of "
<< _orExpression->numChildren();
- branchResult->cachedSolution.reset(rawCS);
+ branchResult->cachedSolution = std::move(cachedSol);
} else {
// No CachedSolution found. We'll have to plan from scratch.
LOG(5) << "Subplanner: planning child " << i << " of " << _orExpression->numChildren();
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index e337f64f241..36ef683a600 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -119,7 +119,6 @@ void filterAllowedIndexEntries(const AllowedIndicesFilter& allowedIndicesFilter,
namespace {
// The body is below in the "count hack" section but getExecutor calls it.
bool turnIxscanIntoCount(QuerySolution* soln);
-
} // namespace
@@ -360,11 +359,9 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
}
// Try to look up a cached solution for the query.
- CachedSolution* rawCS;
- if (PlanCache::shouldCacheQuery(*canonicalQuery) &&
- collection->infoCache()->getPlanCache()->get(*canonicalQuery, &rawCS).isOK()) {
+ if (auto cs =
+ collection->infoCache()->getPlanCache()->getCacheEntryIfCacheable(*canonicalQuery)) {
// We have a CachedSolution. Have the planner turn it into a QuerySolution.
- unique_ptr<CachedSolution> cs(rawCS);
auto statusWithQs = QueryPlanner::planFromCache(*canonicalQuery, plannerParams, *cs);
if (statusWithQs.isOK()) {
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index 0a609fb43c3..6a788719f50 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -369,7 +369,7 @@ CachedSolution::CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& en
sort(entry.sort.getOwned()),
projection(entry.projection.getOwned()),
collation(entry.collation.getOwned()),
- decisionWorks(entry.decision->stats[0]->common.works) {
+ decisionWorks(entry.works) {
// CachedSolution should not having any references into
// cache entry. All relevant data should be cloned/copied.
for (size_t i = 0; i < entry.plannerData.size(); ++i) {
@@ -432,6 +432,8 @@ PlanCacheEntry* PlanCacheEntry::clone() const {
entry->projection = projection.getOwned();
entry->collation = collation.getOwned();
entry->timeOfCreation = timeOfCreation;
+ entry->isActive = isActive;
+ entry->works = works;
// Copy performance stats.
for (size_t i = 0; i < feedback.size(); ++i) {
@@ -554,12 +556,34 @@ std::string SolutionCacheData::toString() const {
// PlanCache
//
-PlanCache::PlanCache() : _cache(internalQueryCacheSize.load()) {}
+PlanCache::PlanCache() : PlanCache(internalQueryCacheSize.load()) {}
+
+PlanCache::PlanCache(size_t size) : _cache(size) {}
PlanCache::PlanCache(const std::string& ns) : _cache(internalQueryCacheSize.load()), _ns(ns) {}
PlanCache::~PlanCache() {}
+/*
+ * Determine whether or not the cache should be used. If it shouldn't be used because the cache
+ * entry exists but is inactive, log a message.
+ */
+std::unique_ptr<CachedSolution> PlanCache::getCacheEntryIfCacheable(
+ const CanonicalQuery& cq) const {
+ if (!PlanCache::shouldCacheQuery(cq)) {
+ return nullptr;
+ }
+
+ PlanCache::GetResult res = get(cq);
+ if (res.state == PlanCache::CacheEntryState::kPresentInactive) {
+ LOG(2) << "Not using cached entry for " << redact(cq.toStringShort())
+ << " since it is inactive";
+ return nullptr;
+ }
+
+ return std::move(res.cachedSolution);
+}
+
/**
* Traverses expression tree pre-order.
* Appends an encoding of each node's match type and path name
@@ -703,10 +727,80 @@ void PlanCache::encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuild
}
}
-Status PlanCache::add(const CanonicalQuery& query,
+/**
+ * Given a query, and an (optional) current cache entry for its shape ('oldEntry'), determine
+ * whether:
+ * - We should create a new entry
+ * - The new entry should be marked 'active'
+ */
+PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query,
+ PlanCacheEntry* oldEntry,
+ size_t newWorks,
+ double growthCoefficient) {
+ NewEntryState res;
+ if (!oldEntry) {
+ LOG(1) << "Creating inactive cache entry for query shape " << redact(query.toStringShort())
+ << " with works value " << newWorks;
+ res.shouldBeCreated = true;
+ res.shouldBeActive = false;
+ return res;
+ }
+
+ if (oldEntry->isActive && newWorks <= oldEntry->works) {
+ // The new plan did better than the currently stored active plan. This case may
+ // occur if many MultiPlanners are run simultaneously.
+
+ LOG(1) << "Replacing active cache entry for query " << redact(query.toStringShort())
+ << " with works " << oldEntry->works << " with a plan with works " << newWorks;
+ res.shouldBeCreated = true;
+ res.shouldBeActive = true;
+ } else if (oldEntry->isActive) {
+ LOG(1) << "Attempt to write to the planCache for query " << redact(query.toStringShort())
+ << "with a plan with works " << newWorks
+ << " is a noop, since there's already a plan with works value " << oldEntry->works;
+ // There is already an active cache entry with a higher works value.
+ // We do nothing.
+ res.shouldBeCreated = false;
+ } else if (newWorks > oldEntry->works) {
+ // This plan performed worse than expected. Rather than immediately overwriting the
+ // cache, lower the bar to what is considered good performance and keep the entry
+ // inactive.
+
+ // Be sure that 'works' always grows by at least 1, in case its current
+ // value and 'internalQueryCacheWorksGrowthCoefficient' are low enough that
+ // the old works * new works cast to size_t is the same as the previous value of
+ // 'works'.
+ const double increasedWorks = std::max(
+ oldEntry->works + 1u, static_cast<size_t>(oldEntry->works * growthCoefficient));
+
+ LOG(1) << "Increasing work value associated with cache entry for query "
+ << redact(query.toStringShort()) << " from " << oldEntry->works << " to "
+ << increasedWorks;
+ oldEntry->works = increasedWorks;
+
+ // Don't create a new entry.
+ res.shouldBeCreated = false;
+ } else {
+ // This plan performed just as well or better than we expected, based on the
+ // inactive entry's works. We use this as an indicator that it's safe to
+ // cache (as an active entry) the plan this query used for the future.
+ LOG(1) << "Inactive cache entry for query " << redact(query.toStringShort())
+ << " with works " << oldEntry->works
+ << " is being promoted to active entry with works value " << newWorks;
+ // We'll replace the old inactive entry with an active entry.
+ res.shouldBeCreated = true;
+ res.shouldBeActive = true;
+ }
+
+ return res;
+}
+
+
+Status PlanCache::set(const CanonicalQuery& query,
const std::vector<QuerySolution*>& solns,
- PlanRankingDecision* why,
- Date_t now) {
+ std::unique_ptr<PlanRankingDecision> why,
+ Date_t now,
+ boost::optional<double> worksGrowthCoefficient) {
invariant(why);
if (solns.empty()) {
@@ -726,14 +820,39 @@ Status PlanCache::add(const CanonicalQuery& query,
"candidate ordering entries in decision must match solutions");
}
- PlanCacheEntry* entry = new PlanCacheEntry(solns, why);
+ const auto key = computeKey(query);
+ const size_t newWorks = why->stats[0]->common.works;
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ bool isNewEntryActive = false;
+ if (internalQueryCacheDisableInactiveEntries.load()) {
+ // All entries are always active.
+ isNewEntryActive = true;
+ } else {
+ PlanCacheEntry* oldEntry = nullptr;
+ Status cacheStatus = _cache.get(key, &oldEntry);
+ invariant(cacheStatus.isOK() || cacheStatus == ErrorCodes::NoSuchKey);
+ auto newState = getNewEntryState(
+ query,
+ oldEntry,
+ newWorks,
+ worksGrowthCoefficient.get_value_or(internalQueryCacheWorksGrowthCoefficient));
+
+ if (!newState.shouldBeCreated) {
+ return Status::OK();
+ }
+ isNewEntryActive = newState.shouldBeActive;
+ }
+
+ auto newEntry = std::make_unique<PlanCacheEntry>(solns, why.release());
const QueryRequest& qr = query.getQueryRequest();
- entry->query = qr.getFilter().getOwned();
- entry->sort = qr.getSort().getOwned();
+ newEntry->query = qr.getFilter().getOwned();
+ newEntry->sort = qr.getSort().getOwned();
+ newEntry->isActive = isNewEntryActive;
+ newEntry->works = newWorks;
if (query.getCollator()) {
- entry->collation = query.getCollator()->getSpec().toBSON();
+ newEntry->collation = query.getCollator()->getSpec().toBSON();
}
- entry->timeOfCreation = now;
+ newEntry->timeOfCreation = now;
// Strip projections on $-prefixed fields, as these are added by internal callers of the query
@@ -745,10 +864,9 @@ Status PlanCache::add(const CanonicalQuery& query,
}
projBuilder.append(elem);
}
- entry->projection = projBuilder.obj();
+ newEntry->projection = projBuilder.obj();
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(computeKey(query), entry);
+ std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(key, newEntry.release());
if (NULL != evictedEntry.get()) {
LOG(1) << _ns << ": plan cache maximum size exceeded - "
@@ -758,21 +876,39 @@ Status PlanCache::add(const CanonicalQuery& query,
return Status::OK();
}
-Status PlanCache::get(const CanonicalQuery& query, CachedSolution** crOut) const {
- PlanCacheKey key = computeKey(query);
- verify(crOut);
+void PlanCache::deactivate(const CanonicalQuery& query) {
+ if (internalQueryCacheDisableInactiveEntries.load()) {
+ // This is a noop if inactive entries are disabled.
+ return;
+ }
+ PlanCacheKey key = computeKey(query);
stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- PlanCacheEntry* entry;
+ PlanCacheEntry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
- return cacheStatus;
+ invariant(cacheStatus == ErrorCodes::NoSuchKey);
+ return;
}
invariant(entry);
+ entry->isActive = false;
+}
- *crOut = new CachedSolution(key, *entry);
+PlanCache::GetResult PlanCache::get(const CanonicalQuery& query) const {
+ PlanCacheKey key = computeKey(query);
- return Status::OK();
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ PlanCacheEntry* entry = nullptr;
+ Status cacheStatus = _cache.get(key, &entry);
+ if (!cacheStatus.isOK()) {
+ invariant(cacheStatus == ErrorCodes::NoSuchKey);
+ return {CacheEntryState::kNotPresent, nullptr};
+ }
+ invariant(entry);
+
+ auto state =
+ entry->isActive ? CacheEntryState::kPresentActive : CacheEntryState::kPresentInactive;
+ return {state, stdx::make_unique<CachedSolution>(key, *entry)};
}
Status PlanCache::feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback) {
@@ -816,9 +952,8 @@ PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
return keyBuilder.str();
}
-Status PlanCache::getEntry(const CanonicalQuery& query, PlanCacheEntry** entryOut) const {
+StatusWith<std::unique_ptr<PlanCacheEntry>> PlanCache::getEntry(const CanonicalQuery& query) const {
PlanCacheKey key = computeKey(query);
- verify(entryOut);
stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
@@ -828,9 +963,7 @@ Status PlanCache::getEntry(const CanonicalQuery& query, PlanCacheEntry** entryOu
}
invariant(entry);
- *entryOut = entry->clone();
-
- return Status::OK();
+ return std::unique_ptr<PlanCacheEntry>(entry->clone());
}
std::vector<PlanCacheEntry*> PlanCache::getAllEntries() const {
@@ -845,11 +978,6 @@ std::vector<PlanCacheEntry*> PlanCache::getAllEntries() const {
return entries;
}
-bool PlanCache::contains(const CanonicalQuery& cq) const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- return _cache.hasKey(computeKey(cq));
-}
-
size_t PlanCache::size() const {
stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
return _cache.size();
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index e97e57db06b..3e32ee2b46c 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -280,19 +280,55 @@ public:
// Annotations from cached runs. The CachedPlanStage provides these stats about its
// runs when they complete.
std::vector<PlanCacheEntryFeedback*> feedback;
+
+ // Whether or not the cache entry is active. Inactive cache entries should not be used for
+ // planning.
+ bool isActive = false;
+
+ // The number of "works" required for a plan to run on this shape before it becomes
+ // active. This value is also used to determine the number of works necessary in order to
+ // trigger a replan. Running a query of the same shape while this cache entry is inactive may
+ // cause this value to be increased.
+ size_t works = 0;
};
/**
* Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution)
* mapping, the cache contains information on why that mapping was made and statistics on the
* cache entry's actual performance on subsequent runs.
- *
*/
class PlanCache {
private:
MONGO_DISALLOW_COPYING(PlanCache);
public:
+ // We have three states for a cache entry to be in. Rather than just 'present' or 'not
+ // present', we use a notion of 'inactive entries' as a way of remembering how performant our
+ // original solution to the query was. This information is useful to prevent much slower
+ // queries from putting their plans in the cache immediately, which could cause faster queries
+ // to run with a sub-optimal plan. Since cache entries must go through the "vetting" process of
+ // being inactive, we protect ourselves from the possibility of simply adding a cache entry
+ // with a very high works value which will never be evicted.
+ enum CacheEntryState {
+ // There is no cache entry for the given query shape.
+ kNotPresent,
+
+ // There is a cache entry for the given query shape, but it is inactive, meaning that it
+ // should not be used when planning.
+ kPresentInactive,
+
+ // There is a cache entry for the given query shape, and it is active.
+ kPresentActive,
+ };
+
+ /**
+ * Encapsulates the value returned from a call to get().
+ */
+ struct GetResult {
+ CacheEntryState state;
+ std::unique_ptr<CachedSolution> cachedSolution;
+ };
+
/**
* We don't want to cache every possible query. This function
* encapsulates the criteria for what makes a canonical query
@@ -305,6 +341,8 @@ public:
*/
PlanCache();
+ PlanCache(size_t size);
+
PlanCache(const std::string& ns);
~PlanCache();
@@ -317,26 +355,40 @@ public:
* for passing the current time so that the time the plan cache entry was created is stored
* in the plan cache.
*
- * Takes ownership of 'why'.
+ * 'worksGrowthCoefficient' specifies what multiplier to use when growing the 'works' value of
+ * an inactive cache entry. If boost::none is provided, the function will use
+ * 'internalQueryCacheWorksGrowthCoefficient'.
*
- * If the mapping was added successfully, returns Status::OK().
- * If the mapping already existed or some other error occurred, returns another Status.
+ * If the mapping was set successfully, returns Status::OK(), even if it evicted another entry.
*/
- Status add(const CanonicalQuery& query,
+ Status set(const CanonicalQuery& query,
const std::vector<QuerySolution*>& solns,
- PlanRankingDecision* why,
- Date_t now);
+ std::unique_ptr<PlanRankingDecision> why,
+ Date_t now,
+ boost::optional<double> worksGrowthCoefficient = boost::none);
+
+ /**
+ * Set a cache entry back to the 'inactive' state. Rather than completely evicting an entry
+ * when the associated plan starts to perform poorly, we deactivate it, so that plans which
+ * perform even worse than the one already in the cache may not easily take its place.
+ */
+ void deactivate(const CanonicalQuery& query);
/**
* Look up the cached data access for the provided 'query'. Used by the query planner
* to shortcut planning.
*
- * If there is no entry in the cache for the 'query', returns an error Status.
- *
- * If there is an entry in the cache, populates 'crOut' and returns Status::OK(). Caller
- * owns '*crOut'.
+ * The return value will provide the "state" of the cache entry, as well as the CachedSolution
+ * for the query (if there is one).
*/
- Status get(const CanonicalQuery& query, CachedSolution** crOut) const;
+ GetResult get(const CanonicalQuery& query) const;
+
+ /**
+ * Determine whether or not the cache should be used. If it shouldn't be used because the cache
+ * entry exists but is inactive, log a message. Returns nullptr if the cache should not be
+ * used, and a CachedSolution otherwise.
+ */
+ std::unique_ptr<CachedSolution> getCacheEntryIfCacheable(const CanonicalQuery& cq) const;
/**
* When the CachedPlanStage runs a plan out of the cache, we want to record data about the
@@ -378,13 +430,10 @@ public:
/**
* Returns a copy of a cache entry.
* Used by planCacheListPlans to display plan details.
- *
- * If there is no entry in the cache for the 'query', returns an error Status.
*
- * If there is an entry in the cache, populates 'entryOut' and returns Status::OK(). Caller
- * owns '*entryOut'.
+ * If there is no entry in the cache for the 'query', returns an error Status.
*/
- Status getEntry(const CanonicalQuery& cq, PlanCacheEntry** entryOut) const;
+ StatusWith<std::unique_ptr<PlanCacheEntry>> getEntry(const CanonicalQuery& cq) const;
/**
* Returns a vector of all cache entries.
@@ -395,13 +444,7 @@ public:
std::vector<PlanCacheEntry*> getAllEntries() const;
/**
- * Returns true if there is an entry in the cache for the 'query'.
- * Internally calls hasKey() on the LRU cache.
- */
- bool contains(const CanonicalQuery& cq) const;
-
- /**
- * Returns number of entries in cache.
+ * Returns number of entries in cache. Includes inactive entries.
* Used for testing.
*/
size_t size() const;
@@ -415,6 +458,16 @@ public:
void notifyOfIndexEntries(const std::vector<IndexEntry>& indexEntries);
private:
+ struct NewEntryState {
+ bool shouldBeCreated = false;
+ bool shouldBeActive = false;
+ };
+
+ NewEntryState getNewEntryState(const CanonicalQuery& query,
+ PlanCacheEntry* oldEntry,
+ size_t newWorks,
+ double growthCoefficient);
+
void encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) const;
void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const;
void encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) const;
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 30ec089160b..cb180ef7971 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -237,17 +237,18 @@ struct GenerateQuerySolution {
/**
* Utility function to create a PlanRankingDecision
*/
-PlanRankingDecision* createDecision(size_t numPlans) {
+std::unique_ptr<PlanRankingDecision> createDecision(size_t numPlans, size_t works = 0) {
unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
auto stats = stdx::make_unique<PlanStageStats>(common, STAGE_COLLSCAN);
stats->specific.reset(new CollectionScanStats());
why->stats.push_back(std::move(stats));
+ why->stats[i]->common.works = works;
why->scores.push_back(0U);
why->candidateOrder.push_back(i);
}
- return why.release();
+ return why;
}
/**
@@ -284,6 +285,13 @@ void assertShouldNotCacheQuery(const char* queryStr) {
assertShouldNotCacheQuery(*cq);
}
+std::unique_ptr<QuerySolution> getQuerySolutionForCaching() {
+ std::unique_ptr<QuerySolution> qs = std::make_unique<QuerySolution>();
+ qs->cacheData = stdx::make_unique<SolutionCacheData>();
+ qs->cacheData->tree = stdx::make_unique<PlanCacheIndexTree>();
+ return qs;
+}
+
/**
* Cacheable queries
* These queries will be added to the cache with run-time statistics
@@ -425,27 +433,336 @@ TEST(PlanCacheTest, AddEmptySolutions) {
std::vector<QuerySolution*> solns;
unique_ptr<PlanRankingDecision> decision(createDecision(1U));
QueryTestServiceContext serviceContext;
- ASSERT_NOT_OK(planCache.add(*cq, solns, decision.get(), Date_t{}));
+ ASSERT_NOT_OK(planCache.set(*cq, solns, std::move(decision), Date_t{}));
+}
+
+void addCacheEntryForShape(const CanonicalQuery& cq, PlanCache* planCache) {
+ invariant(planCache);
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+
+ ASSERT_OK(planCache->set(cq, solns, createDecision(1U), Date_t{}));
}
-TEST(PlanCacheTest, AddValidSolution) {
+TEST(PlanCacheTest, InactiveEntriesDisabled) {
+ // Set the global flag for disabling active entries.
+ internalQueryCacheDisableInactiveEntries.store(true);
+ ON_BLOCK_EXIT([] { internalQueryCacheDisableInactiveEntries.store(false); });
+
PlanCache planCache;
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- QuerySolution qs;
- qs.cacheData.reset(new SolutionCacheData());
- qs.cacheData->tree.reset(new PlanCacheIndexTree());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ QueryTestServiceContext serviceContext;
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U), Date_t{}));
+
+ // After add, the planCache should have an _active_ entry.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+
+ // Call deactivate(). It should be a noop.
+ planCache.deactivate(*cq);
+
+ // The entry should still be active.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+
+ // remove() the entry.
+ ASSERT_OK(planCache.remove(*cq));
+ ASSERT_EQ(planCache.size(), 0U);
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+}
+
+
+TEST(PlanCacheTest, PlanCacheLRUPolicyRemovesInactiveEntries) {
+ // Use a tiny cache size.
+ const size_t kCacheSize = 2;
+ PlanCache planCache(kCacheSize);
+ QueryTestServiceContext serviceContext;
+
+ unique_ptr<CanonicalQuery> cqA(canonicalize("{a: 1}"));
+ ASSERT_EQ(planCache.get(*cqA).state, PlanCache::CacheEntryState::kNotPresent);
+ addCacheEntryForShape(*cqA.get(), &planCache);
+
+ // After add, the planCache should have an inactive entry.
+ ASSERT_EQ(planCache.get(*cqA).state, PlanCache::CacheEntryState::kPresentInactive);
+
+ // Add a cache entry for another shape.
+ unique_ptr<CanonicalQuery> cqB(canonicalize("{b: 1}"));
+ ASSERT_EQ(planCache.get(*cqB).state, PlanCache::CacheEntryState::kNotPresent);
+ addCacheEntryForShape(*cqB.get(), &planCache);
+ ASSERT_EQ(planCache.get(*cqB).state, PlanCache::CacheEntryState::kPresentInactive);
+
+ // Access the cached solution for the {a: 1} shape. Now the entry for {b: 1} will be the least
+ // recently used.
+ ASSERT_EQ(planCache.get(*cqA).state, PlanCache::CacheEntryState::kPresentInactive);
+
+ // Insert another entry. Since the cache size is 2, we expect the {b: 1} entry to be ejected.
+ unique_ptr<CanonicalQuery> cqC(canonicalize("{c: 1}"));
+ ASSERT_EQ(planCache.get(*cqC).state, PlanCache::CacheEntryState::kNotPresent);
+ addCacheEntryForShape(*cqC.get(), &planCache);
+
+ // Check that {b: 1} is gone, but {a: 1} and {c: 1} both still have entries.
+ ASSERT_EQ(planCache.get(*cqB).state, PlanCache::CacheEntryState::kNotPresent);
+ ASSERT_EQ(planCache.get(*cqA).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(planCache.get(*cqC).state, PlanCache::CacheEntryState::kPresentInactive);
+}
+
+TEST(PlanCacheTest, PlanCacheRemoveDeletesInactiveEntries) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ QueryTestServiceContext serviceContext;
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U), Date_t{}));
+
+ // After add, the planCache should have an inactive entry.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+
+ // remove() the entry.
+ ASSERT_OK(planCache.remove(*cq));
+ ASSERT_EQ(planCache.size(), 0U);
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+}
+
+TEST(PlanCacheTest, PlanCacheFlushDeletesInactiveEntries) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ QueryTestServiceContext serviceContext;
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U), Date_t{}));
+
+ // After add, the planCache should have an inactive entry.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+
+ // Clear the plan cache. The inactive entry should now be removed.
+ planCache.clear();
+ ASSERT_EQ(planCache.size(), 0U);
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+}
+
+TEST(PlanCacheTest, AddActiveCacheEntry) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
- // Check if key is in cache before and after add().
- ASSERT_FALSE(planCache.contains(*cq));
+ // Check if key is in cache before and after set().
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
QueryTestServiceContext serviceContext;
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U), Date_t{}));
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 20), Date_t{}));
- ASSERT_TRUE(planCache.contains(*cq));
+ // After add, the planCache should have an inactive entry.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+
+ // Calling set() again, with a solution that had a lower works value should create an active
+ // entry.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 10), Date_t{}));
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
ASSERT_EQUALS(planCache.size(), 1U);
+
+ // Clear the plan cache. The active entry should now be removed.
+ planCache.clear();
+ ASSERT_EQ(planCache.size(), 0U);
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+}
+
+TEST(PlanCacheTest, WorksValueIncreases) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ QueryTestServiceContext serviceContext;
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 10), Date_t{}));
+
+ // After add, the planCache should have an inactive entry.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ auto entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(entry->works, 10U);
+ ASSERT_FALSE(entry->isActive);
+
+ // Calling set() again, with a solution that had a higher works value. This should cause the
+ // works on the original entry to be increased.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 50), Date_t{}));
+
+ // The entry should still be inactive. Its works should double though.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_FALSE(entry->isActive);
+ ASSERT_EQ(entry->works, 20U);
+
+ // Calling set() again, with a solution that had a higher works value. This should cause the
+ // works on the original entry to be increased.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 30), Date_t{}));
+
+ // The entry should still be inactive. Its works should have doubled again.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_FALSE(entry->isActive);
+ ASSERT_EQ(entry->works, 40U);
+
+ // Calling set() again, with a solution that has a lower works value than what's currently in
+ // the cache.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 25), Date_t{}));
+
+ // The solution just run should now be in an active cache entry, with a works
+ // equal to the number of works the solution took.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_TRUE(entry->isActive);
+ ASSERT_EQ(entry->decision->stats[0]->common.works, 25U);
+ ASSERT_EQ(entry->works, 25U);
+
+ ASSERT_EQUALS(planCache.size(), 1U);
+
+ // Clear the plan cache. The active entry should now be removed.
+ planCache.clear();
+ ASSERT_EQ(planCache.size(), 0U);
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+}
+
+TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
+ // Will use a very small growth coefficient.
+ const double kWorksCoeff = 1.10;
+
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ QueryTestServiceContext serviceContext;
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 3), Date_t{}));
+
+ // After add, the planCache should have an inactive entry.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ auto entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(entry->works, 3U);
+ ASSERT_FALSE(entry->isActive);
+
+ // Calling set() again, with a solution that had a higher works value. This should cause the
+ // works on the original entry to be increased. In this case, since nWorks is 3,
+ // multiplying by the value 1.10 will give a value of 3 (static_cast<size_t>(1.1 * 3) == 3).
+ // We check that the works value is increased 1 instead.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 50), Date_t{}, kWorksCoeff));
+
+ // The entry should still be inactive. Its works should increase by 1.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_FALSE(entry->isActive);
+ ASSERT_EQ(entry->works, 4U);
+
+ // Clear the plan cache. The inactive entry should now be removed.
+ planCache.clear();
+ ASSERT_EQ(planCache.size(), 0U);
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
}
+TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ QueryTestServiceContext serviceContext;
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 50), Date_t{}));
+
+ // After add, the planCache should have an inactive entry.
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+ auto entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(entry->works, 50U);
+ ASSERT_FALSE(entry->isActive);
+
+ // Call set() again, with a solution that has a lower works value. This will result in an
+ // active entry being created.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 20), Date_t{}));
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_TRUE(entry->isActive);
+ ASSERT_EQ(entry->works, 20U);
+
+ // Now call set() again, but with a solution that has a higher works value. This should be
+ // a noop.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 100), Date_t{}));
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_TRUE(entry->isActive);
+ ASSERT_EQ(entry->works, 20U);
+}
+
+TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ QueryTestServiceContext serviceContext;
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 50), Date_t{}));
+
+ // After add, the planCache should have an inactive entry.
+ auto entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(entry->works, 50U);
+ ASSERT_FALSE(entry->isActive);
+
+ // Call set() again, with a solution that has a lower works value. This will result in an
+ // active entry being created.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 20), Date_t{}));
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_TRUE(entry->isActive);
+ ASSERT_EQ(entry->works, 20U);
+
+ // Now call set() again, with a solution that has a lower works value. The current active entry
+ // should be overwritten.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 10), Date_t{}));
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_TRUE(entry->isActive);
+ ASSERT_EQ(entry->works, 10U);
+}
+
+TEST(PlanCacheTest, DeactivateCacheEntry) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+ QueryTestServiceContext serviceContext;
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 50), Date_t{}));
+
+ // After add, the planCache should have an inactive entry.
+ auto entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_EQ(entry->works, 50U);
+ ASSERT_FALSE(entry->isActive);
+
+ // Call set() again, with a solution that has a lower works value. This will result in an
+ // active entry being created.
+ ASSERT_OK(planCache.set(*cq, solns, createDecision(1U, 20), Date_t{}));
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_TRUE(entry->isActive);
+ ASSERT_EQ(entry->works, 20U);
+
+ planCache.deactivate(*cq);
+ ASSERT_EQ(planCache.get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+
+ // Be sure the entry has the same works value.
+ entry = assertGet(planCache.getEntry(*cq));
+ ASSERT_FALSE(entry->isActive);
+ ASSERT_EQ(entry->works, 20U);
+}
+
+
/**
* Each test in the CachePlanSelectionTest suite goes through
* the following flow:
@@ -557,7 +874,7 @@ protected:
// Clean up any previous state from a call to runQueryFull or runQueryAsCommand.
solns.clear();
- auto qr = stdx::make_unique<QueryRequest>(nss);
+ auto qr = std::make_unique<QueryRequest>(nss);
qr->setFilter(query);
qr->setSort(sort);
qr->setProj(proj);
@@ -685,7 +1002,7 @@ protected:
qs.cacheData.reset(soln.cacheData->clone());
std::vector<QuerySolution*> solutions;
solutions.push_back(&qs);
- PlanCacheEntry entry(solutions, createDecision(1U));
+ PlanCacheEntry entry(solutions, createDecision(1U).release());
CachedSolution cachedSoln(ck, entry);
auto statusWithQs = QueryPlanner::planFromCache(*scopedCq, params, cachedSoln);
diff --git a/src/mongo/db/query/query_knobs.cpp b/src/mongo/db/query/query_knobs.cpp
index 73f7e618769..920b04af17d 100644
--- a/src/mongo/db/query/query_knobs.cpp
+++ b/src/mongo/db/query/query_knobs.cpp
@@ -44,6 +44,17 @@ MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheFeedbacksStored, int, 20);
MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheEvictionRatio, double, 10.0);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheWorksGrowthCoefficient, double, 2.0)
+ ->withValidator([](const double& newVal) {
+ if (newVal <= 1.0) {
+ return Status(ErrorCodes::BadValue,
+ "internalQueryCacheWorksGrowthCoefficient must be > 1.0");
+ }
+ return Status::OK();
+ });
+
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheDisableInactiveEntries, bool, false);
+
MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerMaxIndexedSolutions, int, 64);
MONGO_EXPORT_SERVER_PARAMETER(internalQueryEnumerationMaxOrSolutions, int, 10);
diff --git a/src/mongo/db/query/query_knobs.h b/src/mongo/db/query/query_knobs.h
index 5b4d759b26a..a361e875dcc 100644
--- a/src/mongo/db/query/query_knobs.h
+++ b/src/mongo/db/query/query_knobs.h
@@ -72,6 +72,13 @@ extern AtomicInt32 internalQueryCacheFeedbacksStored;
// and replanning?
extern AtomicDouble internalQueryCacheEvictionRatio;
+// How quickly the the 'works' value in an inactive cache entry will grow. It grows
+// exponentially. The value of this server parameter is the base.
+extern AtomicDouble internalQueryCacheWorksGrowthCoefficient;
+
+// Whether or not cache entries can be marked as "inactive."
+extern AtomicBool internalQueryCacheDisableInactiveEntries;
+
//
// Planning and enumeration.
//
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index c626a272b9f..f202477b2af 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -52,9 +52,21 @@ namespace QueryStageCachedPlan {
static const NamespaceString nss("unittests.QueryStageCachedPlan");
-class QueryStageCachedPlanBase {
+namespace {
+std::unique_ptr<CanonicalQuery> canonicalQueryFromFilterObj(OperationContext* opCtx,
+ const NamespaceString& nss,
+ BSONObj filter) {
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(filter);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr));
+ uassertStatusOK(statusWithCQ.getStatus());
+ return std::move(statusWithCQ.getValue());
+}
+}
+
+class QueryStageCachedPlan : public unittest::Test {
public:
- QueryStageCachedPlanBase() {
+ void setUp() {
// If collection exists already, we need to drop it.
dropCollection();
@@ -102,109 +114,33 @@ public:
return &_opCtx;
}
-protected:
- const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_opCtxPtr;
- WorkingSet _ws;
-};
-
-/**
- * Test that on failure, the cached plan stage replans the query but does not create a new cache
- * entry.
- */
-class QueryStageCachedPlanFailure : public QueryStageCachedPlanBase {
-public:
- void run() {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- Collection* collection = ctx.getCollection();
- ASSERT(collection);
-
- // Query can be answered by either index on "a" or index on "b".
- auto qr = stdx::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr));
- ASSERT_OK(statusWithCQ.getStatus());
- const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
-
- // We shouldn't have anything in the plan cache for this shape yet.
- PlanCache* cache = collection->infoCache()->getPlanCache();
- ASSERT(cache);
- CachedSolution* rawCachedSolution;
- ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
-
- // Get planner params.
- QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
-
- // Queued data stage will return a failure during the cached plan trial period.
- auto mockChild = stdx::make_unique<QueuedDataStage>(&_opCtx, &_ws);
- mockChild->pushBack(PlanStage::FAILURE);
-
- // High enough so that we shouldn't trigger a replan based on works.
- const size_t decisionWorks = 50;
- CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
-
- // This should succeed after triggering a replan.
- PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
- _opCtx.getServiceContext()->getFastClockSource());
- ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
-
- // Make sure that we get 2 legit results back.
+ static size_t getNumResultsForStage(const WorkingSet& ws,
+ CachedPlanStage* cachedPlanStage,
+ CanonicalQuery* cq) {
size_t numResults = 0;
PlanStage::StageState state = PlanStage::NEED_TIME;
while (state != PlanStage::IS_EOF) {
WorkingSetID id = WorkingSet::INVALID_ID;
- state = cachedPlanStage.work(&id);
+ state = cachedPlanStage->work(&id);
ASSERT_NE(state, PlanStage::FAILURE);
ASSERT_NE(state, PlanStage::DEAD);
if (state == PlanStage::ADVANCED) {
- WorkingSetMember* member = _ws.get(id);
+ WorkingSetMember* member = ws.get(id);
ASSERT(cq->root()->matchesBSON(member->obj.value()));
numResults++;
}
}
- ASSERT_EQ(numResults, 2U);
-
- // Plan cache should still be empty, as we don't write to it when we replan a failed
- // query.
- ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
+ return numResults;
}
-};
-
-/**
- * Test that hitting the cached plan stage trial period's threshold for work cycles causes the
- * query to be replanned. Also verify that the replanning results in a new plan cache entry.
- */
-class QueryStageCachedPlanHitMaxWorks : public QueryStageCachedPlanBase {
-public:
- void run() {
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- Collection* collection = ctx.getCollection();
- ASSERT(collection);
-
- // Query can be answered by either index on "a" or index on "b".
- auto qr = stdx::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr));
- ASSERT_OK(statusWithCQ.getStatus());
- const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
-
- // We shouldn't have anything in the plan cache for this shape yet.
- PlanCache* cache = collection->infoCache()->getPlanCache();
- ASSERT(cache);
- CachedSolution* rawCachedSolution;
- ASSERT_NOT_OK(cache->get(*cq, &rawCachedSolution));
+ void forceReplanning(Collection* collection, CanonicalQuery* cq) {
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq, &plannerParams);
- // Set up queued data stage to take a long time before returning EOF. Should be long
- // enough to trigger a replan.
const size_t decisionWorks = 10;
const size_t mockWorks =
1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
@@ -214,49 +150,273 @@ public:
}
CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
+ &_opCtx, collection, &_ws, cq, plannerParams, decisionWorks, mockChild.release());
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
_opCtx.getServiceContext()->getFastClockSource());
ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
+ }
- // Make sure that we get 2 legit results back.
- size_t numResults = 0;
- PlanStage::StageState state = PlanStage::NEED_TIME;
- while (state != PlanStage::IS_EOF) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- state = cachedPlanStage.work(&id);
+protected:
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
+ WorkingSet _ws;
+};
- ASSERT_NE(state, PlanStage::FAILURE);
- ASSERT_NE(state, PlanStage::DEAD);
+/**
+ * Test that on failure, the cached plan stage replans the query but does not create a new cache
+ * entry.
+ */
+TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailure) {
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
+ Collection* collection = ctx.getCollection();
+ ASSERT(collection);
+
+ // Query can be answered by either index on "a" or index on "b".
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr));
+ ASSERT_OK(statusWithCQ.getStatus());
+ const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+
+ // We shouldn't have anything in the plan cache for this shape yet.
+ PlanCache* cache = collection->infoCache()->getPlanCache();
+ ASSERT(cache);
+ ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+
+ // Get planner params.
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
+
+ // Queued data stage will return a failure during the cached plan trial period.
+ auto mockChild = stdx::make_unique<QueuedDataStage>(&_opCtx, &_ws);
+ mockChild->pushBack(PlanStage::FAILURE);
+
+ // High enough so that we shouldn't trigger a replan based on works.
+ const size_t decisionWorks = 50;
+ CachedPlanStage cachedPlanStage(
+ &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
+
+ // This should succeed after triggering a replan.
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
+ _opCtx.getServiceContext()->getFastClockSource());
+ ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
+
+ ASSERT_EQ(getNumResultsForStage(_ws, &cachedPlanStage, cq.get()), 2U);
+
+ // Plan cache should still be empty, as we don't write to it when we replan a failed
+ // query.
+ ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+}
- if (state == PlanStage::ADVANCED) {
- WorkingSetMember* member = _ws.get(id);
- ASSERT(cq->root()->matchesBSON(member->obj.value()));
- numResults++;
- }
- }
+/**
+ * Test that hitting the cached plan stage trial period's threshold for work cycles causes the
+ * query to be replanned. Also verify that the replanning results in a new plan cache entry.
+ */
+TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
+ Collection* collection = ctx.getCollection();
+ ASSERT(collection);
+
+ // Query can be answered by either index on "a" or index on "b".
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr));
+ ASSERT_OK(statusWithCQ.getStatus());
+ const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+
+ // We shouldn't have anything in the plan cache for this shape yet.
+ PlanCache* cache = collection->infoCache()->getPlanCache();
+ ASSERT(cache);
+ ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kNotPresent);
+
+ // Get planner params.
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
+
+ // Set up queued data stage to take a long time before returning EOF. Should be long
+ // enough to trigger a replan.
+ const size_t decisionWorks = 10;
+ const size_t mockWorks =
+ 1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
+ auto mockChild = stdx::make_unique<QueuedDataStage>(&_opCtx, &_ws);
+ for (size_t i = 0; i < mockWorks; i++) {
+ mockChild->pushBack(PlanStage::NEED_TIME);
+ }
- ASSERT_EQ(numResults, 2U);
+ CachedPlanStage cachedPlanStage(
+ &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
- // This time we expect to find something in the plan cache. Replans after hitting the
- // works threshold result in a cache entry.
- ASSERT_OK(cache->get(*cq, &rawCachedSolution));
- const std::unique_ptr<CachedSolution> cachedSolution(rawCachedSolution);
- }
-};
+ // This should succeed after triggering a replan.
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
+ _opCtx.getServiceContext()->getFastClockSource());
+ ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
-class All : public Suite {
-public:
- All() : Suite("query_stage_cached_plan") {}
+ ASSERT_EQ(getNumResultsForStage(_ws, &cachedPlanStage, cq.get()), 2U);
- void setupTests() {
- add<QueryStageCachedPlanFailure>();
- add<QueryStageCachedPlanHitMaxWorks>();
+ // This time we expect to find something in the plan cache. Replans after hitting the
+ // works threshold result in a cache entry.
+ ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kPresentInactive);
+}
+
+/**
+ * Test the way cache entries are added (either "active" or "inactive") to the plan cache.
+ */
+TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
+ Collection* collection = ctx.getCollection();
+ ASSERT(collection);
+
+ // Never run - just used as a key for the cache's get() functions, since all of the other
+ // CanonicalQueries created in this test will have this shape.
+ const auto shapeCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 123}, b: {$gte: 123}}"));
+
+ // Query can be answered by either index on "a" or index on "b".
+ const auto noResultsCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 11}, b: {$gte: 11}}"));
+
+ // We shouldn't have anything in the plan cache for this shape yet.
+ PlanCache* cache = collection->infoCache()->getPlanCache();
+ ASSERT(cache);
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kNotPresent);
+
+ // Run the CachedPlanStage with a long-running child plan. Replanning should be
+ // triggered and an inactive entry will be added.
+ forceReplanning(collection, noResultsCq.get());
+
+ // Check for an inactive cache entry.
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
+
+ // The works should be 1 for the entry since the query we ran should not have any results.
+ auto entry = assertGet(cache->getEntry(*shapeCq));
+ size_t works = 1U;
+ ASSERT_EQ(entry->works, works);
+
+ const size_t kExpectedNumWorks = 10;
+ for (int i = 0; i < std::ceil(std::log(kExpectedNumWorks) / std::log(2)); ++i) {
+ works *= 2;
+ // Run another query of the same shape, which is less selective, and therefore takes
+ // longer).
+ auto someResultsCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 1}, b: {$gte: 0}}"));
+ forceReplanning(collection, someResultsCq.get());
+
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
+ // The works on the cache entry should have doubled.
+ entry = assertGet(cache->getEntry(*shapeCq));
+ ASSERT_EQ(entry->works, works);
}
-};
-SuiteInstance<All> all;
+ // Run another query which takes less time, and be sure an active entry is created.
+ auto fewResultsCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 6}, b: {$gte: 0}}"));
+ forceReplanning(collection, fewResultsCq.get());
+
+ // Now there should be an active cache entry.
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
+ entry = assertGet(cache->getEntry(*shapeCq));
+ // This will query will match {a: 6} through {a:9} (4 works), plus one for EOF = 5 works.
+ ASSERT_EQ(entry->works, 5U);
+}
+
+
+TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
+ Collection* collection = ctx.getCollection();
+ ASSERT(collection);
+
+ // Never run - just used as a key for the cache's get() functions, since all of the other
+ // CanonicalQueries created in this test will have this shape.
+ const auto shapeCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 123}, b: {$gte: 123}}"));
+
+ // Query can be answered by either index on "a" or index on "b".
+ const auto noResultsCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 11}, b: {$gte: 11}}"));
+
+ // We shouldn't have anything in the plan cache for this shape yet.
+ PlanCache* cache = collection->infoCache()->getPlanCache();
+ ASSERT(cache);
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kNotPresent);
+
+ // Run the CachedPlanStage with a long-running child plan. Replanning should be
+ // triggered and an inactive entry will be added.
+ forceReplanning(collection, noResultsCq.get());
+
+ // Check for an inactive cache entry.
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
+
+ // Run the plan again, to create an active entry.
+ forceReplanning(collection, noResultsCq.get());
+
+ // The works should be 1 for the entry since the query we ran should not have any results.
+ ASSERT_EQ(cache->get(*noResultsCq.get()).state, PlanCache::CacheEntryState::kPresentActive);
+ auto entry = assertGet(cache->getEntry(*shapeCq));
+ size_t works = 1U;
+ ASSERT_EQ(entry->works, works);
+
+ // Run another query which takes long enough to evict the active cache entry. The current
+ // cache entry's works value is a very low number. When replanning is triggered, the cache
+ // entry will be deactivated, but the new plan will not overwrite it, since the new plan will
+ // have a higher works. Therefore, we will be left in an inactive entry which has had its works
+ // value doubled from 1 to 2.
+ auto highWorksCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 0}, b: {$gte:0}}"));
+ forceReplanning(collection, highWorksCq.get());
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentInactive);
+ ASSERT_EQ(assertGet(cache->getEntry(*shapeCq))->works, 2U);
+
+ // Again, force replanning. This time run the initial query which finds no results. The multi
+ // planner will choose a plan with works value lower than the existing inactive
+ // entry. Replanning will thus deactivate the existing entry (it's already
+ // inactive so this is a noop), then create a new entry with a works value of 1.
+ forceReplanning(collection, noResultsCq.get());
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
+ ASSERT_EQ(assertGet(cache->getEntry(*shapeCq))->works, 1U);
+}
+
+TEST_F(QueryStageCachedPlan, EntriesAreNotDeactivatedWhenInactiveEntriesDisabled) {
+ // Set the global flag for disabling active entries.
+ internalQueryCacheDisableInactiveEntries.store(true);
+ ON_BLOCK_EXIT([] { internalQueryCacheDisableInactiveEntries.store(false); });
+
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
+ Collection* collection = ctx.getCollection();
+ ASSERT(collection);
+
+ // Never run - just used as a key for the cache's get() functions, since all of the other
+ // CanonicalQueries created in this test will have this shape.
+ const auto shapeCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 123}, b: {$gte: 123}}"));
+
+ // Query can be answered by either index on "a" or index on "b".
+ const auto noResultsCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 11}, b: {$gte: 11}}"));
+
+ // We shouldn't have anything in the plan cache for this shape yet.
+ PlanCache* cache = collection->infoCache()->getPlanCache();
+ ASSERT(cache);
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kNotPresent);
+
+ // Run the CachedPlanStage with a long-running child plan. Replanning should be
+ // triggered and an _active_ entry will be added (since the disableInactiveEntries flag is on).
+ forceReplanning(collection, noResultsCq.get());
+
+ // Check for an inactive cache entry.
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
+
+ // Run the plan again. The entry should still be active.
+ forceReplanning(collection, noResultsCq.get());
+ ASSERT_EQ(cache->get(*noResultsCq.get()).state, PlanCache::CacheEntryState::kPresentActive);
+
+ // Run another query which takes long enough to evict the active cache entry. After replanning
+ // is triggered, be sure that the the cache entry is still active.
+ auto highWorksCq =
+ canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 0}, b: {$gte:0}}"));
+ forceReplanning(collection, highWorksCq.get());
+ ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
+}
} // namespace QueryStageCachedPlan
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 7a5629b7117..bc12573b8aa 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -119,6 +119,101 @@ protected:
DBDirectClient _client;
};
+std::unique_ptr<CanonicalQuery> makeCanonicalQuery(OperationContext* opCtx,
+ NamespaceString nss,
+ BSONObj filter) {
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(filter);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr));
+ ASSERT_OK(statusWithCQ.getStatus());
+ unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ ASSERT(cq);
+ return cq;
+}
+
+unique_ptr<PlanStage> getIxScanPlan(OperationContext* opCtx,
+ const Collection* coll,
+ WorkingSet* sharedWs,
+ int desiredFooValue) {
+ std::vector<IndexDescriptor*> indexes;
+ coll->getIndexCatalog()->findIndexesByKeyPattern(opCtx, BSON("foo" << 1), false, &indexes);
+ ASSERT_EQ(indexes.size(), 1U);
+
+ IndexScanParams ixparams;
+ ixparams.descriptor = indexes[0];
+ ixparams.bounds.isSimpleRange = true;
+ ixparams.bounds.startKey = BSON("" << desiredFooValue);
+ ixparams.bounds.endKey = BSON("" << desiredFooValue);
+ ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
+ ixparams.direction = 1;
+
+ IndexScan* ix = new IndexScan(opCtx, ixparams, sharedWs, nullptr);
+ unique_ptr<PlanStage> root(new FetchStage(opCtx, sharedWs, ix, nullptr, coll));
+
+ return root;
+}
+
+unique_ptr<MatchExpression> makeMatchExpressionFromFilter(OperationContext* opCtx,
+ BSONObj filterObj) {
+ const CollatorInterface* collator = nullptr;
+ const boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator));
+ StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterObj, expCtx);
+ ASSERT_OK(statusWithMatcher.getStatus());
+ unique_ptr<MatchExpression> filter = std::move(statusWithMatcher.getValue());
+ ASSERT(filter);
+ return filter;
+}
+
+
+unique_ptr<PlanStage> getCollScanPlan(OperationContext* opCtx,
+ const Collection* coll,
+ WorkingSet* sharedWs,
+ MatchExpression* matchExpr) {
+ CollectionScanParams csparams;
+ csparams.collection = coll;
+ csparams.direction = CollectionScanParams::FORWARD;
+
+ unique_ptr<PlanStage> root(new CollectionScan(opCtx, csparams, sharedWs, matchExpr));
+
+ return root;
+}
+
+std::unique_ptr<MultiPlanStage> runMultiPlanner(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const Collection* coll,
+ int desiredFooValue) {
+ // Plan 0: IXScan over foo == desiredFooValue
+ // Every call to work() returns something so this should clearly win (by current scoring
+ // at least).
+ unique_ptr<WorkingSet> sharedWs(new WorkingSet());
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(opCtx, coll, sharedWs.get(), desiredFooValue);
+
+ // Plan 1: CollScan.
+ BSONObj filterObj = BSON("foo" << desiredFooValue);
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(opCtx, filterObj);
+ unique_ptr<PlanStage> collScanRoot = getCollScanPlan(opCtx, coll, sharedWs.get(), filter.get());
+
+ // Hand the plans off to the MPS.
+ auto cq = makeCanonicalQuery(opCtx, nss, BSON("foo" << desiredFooValue));
+
+ unique_ptr<MultiPlanStage> mps = make_unique<MultiPlanStage>(opCtx, coll, cq.get());
+ mps->addPlan(createQuerySolution(), ixScanRoot.release(), sharedWs.get());
+ mps->addPlan(createQuerySolution(), collScanRoot.release(), sharedWs.get());
+
+ // Plan 0 aka the first plan aka the index scan should be the best.
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
+ opCtx->getServiceContext()->getFastClockSource());
+ ASSERT_OK(mps->pickBestPlan(&yieldPolicy));
+ ASSERT(mps->bestPlanChosen());
+ ASSERT_EQUALS(0, mps->bestPlanIdx());
+
+ return mps;
+}
+
+size_t getBestPlanWorks(MultiPlanStage* mps) {
+ return mps->getChildren()[mps->bestPlanIdx()]->getStats()->common.works;
+}
+
// Basic ranking test: collection scan vs. highly selective index scan. Make sure we also get
// all expected results out as well.
@@ -136,52 +231,22 @@ TEST_F(QueryStageMultiPlanTest, MPSCollectionScanVsHighlySelectiveIXScan) {
// Plan 0: IXScan over foo == 7
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
- std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(
- _opCtx.get(), BSON("foo" << 1), false, &indexes);
- ASSERT_EQ(indexes.size(), 1U);
-
- IndexScanParams ixparams;
- ixparams.descriptor = indexes[0];
- ixparams.bounds.isSimpleRange = true;
- ixparams.bounds.startKey = BSON("" << 7);
- ixparams.bounds.endKey = BSON("" << 7);
- ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
- ixparams.direction = 1;
-
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- IndexScan* ix = new IndexScan(_opCtx.get(), ixparams, sharedWs.get(), NULL);
- unique_ptr<PlanStage> firstRoot(new FetchStage(_opCtx.get(), sharedWs.get(), ix, NULL, coll));
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7);
// Plan 1: CollScan with matcher.
- CollectionScanParams csparams;
- csparams.collection = coll;
- csparams.direction = CollectionScanParams::FORWARD;
-
- // Make the filter.
BSONObj filterObj = BSON("foo" << 7);
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(_opCtx.get(), collator));
- StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterObj, expCtx);
- verify(statusWithMatcher.isOK());
- unique_ptr<MatchExpression> filter = std::move(statusWithMatcher.getValue());
- // Make the stage.
- unique_ptr<PlanStage> secondRoot(
- new CollectionScan(_opCtx.get(), csparams, sharedWs.get(), filter.get()));
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj);
+ unique_ptr<PlanStage> collScanRoot =
+ getCollScanPlan(_opCtx.get(), coll, sharedWs.get(), filter.get());
// Hand the plans off to the MPS.
- auto qr = stdx::make_unique<QueryRequest>(nss);
- qr->setFilter(BSON("foo" << 7));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr));
- verify(statusWithCQ.isOK());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- verify(NULL != cq.get());
+ auto cq = makeCanonicalQuery(_opCtx.get(), nss, filterObj);
unique_ptr<MultiPlanStage> mps =
make_unique<MultiPlanStage>(_opCtx.get(), ctx.getCollection(), cq.get());
- mps->addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get());
- mps->addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
+ mps->addPlan(createQuerySolution(), ixScanRoot.release(), sharedWs.get());
+ mps->addPlan(createQuerySolution(), collScanRoot.release(), sharedWs.get());
// Plan 0 aka the first plan aka the index scan should be the best.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
@@ -211,6 +276,89 @@ TEST_F(QueryStageMultiPlanTest, MPSCollectionScanVsHighlySelectiveIXScan) {
ASSERT_EQUALS(results, N / 10);
}
+TEST_F(QueryStageMultiPlanTest, MPSDoesNotCreateActiveCacheEntryImmediately) {
+ const int N = 100;
+ for (int i = 0; i < N; ++i) {
+ // Have a larger proportion of 5's than anything else.
+ int toInsert = i % 10 >= 8 ? 5 : i % 10;
+ insert(BSON("foo" << toInsert));
+ }
+
+ addIndex(BSON("foo" << 1));
+
+ AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
+ const Collection* coll = ctx.getCollection();
+
+ const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
+
+ // Run an index scan and collection scan, searching for {foo: 7}.
+ auto mps = runMultiPlanner(_opCtx.get(), nss, coll, 7);
+
+ // Be sure that an inactive cache entry was added.
+ PlanCache* cache = coll->infoCache()->getPlanCache();
+ ASSERT_EQ(cache->size(), 1U);
+ auto entry = assertGet(cache->getEntry(*cq));
+ ASSERT_FALSE(entry->isActive);
+ const size_t firstQueryWorks = getBestPlanWorks(mps.get());
+ ASSERT_EQ(firstQueryWorks, entry->works);
+
+ // Run the multi-planner again. The index scan will again win, but the number of works
+ // will be greater, since {foo: 5} appears more frequently in the collection.
+ mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+
+ // The last plan run should have required far more works than the previous plan. This means
+ // that the 'works' in the cache entry should have doubled.
+ ASSERT_EQ(cache->size(), 1U);
+ entry = assertGet(cache->getEntry(*cq));
+ ASSERT_FALSE(entry->isActive);
+ ASSERT_EQ(firstQueryWorks * 2, entry->works);
+
+ // Run the exact same query again. This will still take more works than 'works', and
+ // should cause the cache entry's 'works' to be doubled again.
+ mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ ASSERT_EQ(cache->size(), 1U);
+ entry = assertGet(cache->getEntry(*cq));
+ ASSERT_FALSE(entry->isActive);
+ ASSERT_EQ(firstQueryWorks * 2 * 2, entry->works);
+
+ // Run the query yet again. This time, an active cache entry should be created.
+ mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ ASSERT_EQ(cache->size(), 1U);
+ entry = assertGet(cache->getEntry(*cq));
+ ASSERT_TRUE(entry->isActive);
+ ASSERT_EQ(getBestPlanWorks(mps.get()), entry->works);
+}
+
+TEST_F(QueryStageMultiPlanTest, MPSDoesCreatesActiveEntryWhenInactiveEntriesDisabled) {
+ // Set the global flag for disabling active entries.
+ internalQueryCacheDisableInactiveEntries.store(true);
+ ON_BLOCK_EXIT([] { internalQueryCacheDisableInactiveEntries.store(false); });
+
+ const int N = 100;
+ for (int i = 0; i < N; ++i) {
+ insert(BSON("foo" << i));
+ }
+
+ addIndex(BSON("foo" << 1));
+
+ AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
+ const Collection* coll = ctx.getCollection();
+
+ const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
+
+ // Run an index scan and collection scan, searching for {foo: 7}.
+ auto mps = runMultiPlanner(_opCtx.get(), nss, coll, 7);
+
+ // Be sure that an _active_ cache entry was added.
+ PlanCache* cache = coll->infoCache()->getPlanCache();
+ ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+
+ // Run the multi-planner again. The entry should still be active.
+ mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+
+ ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
+}
+
// Case in which we select a blocking plan as the winner, and a non-blocking plan
// is available as a backup.
TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
@@ -431,22 +579,8 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin
// Plan 0: IXScan over foo == 7
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
- std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(
- _opCtx.get(), BSON("foo" << 1), false, &indexes);
- ASSERT_EQ(indexes.size(), 1U);
-
- IndexScanParams ixparams;
- ixparams.descriptor = indexes[0];
- ixparams.bounds.isSimpleRange = true;
- ixparams.bounds.startKey = BSON("" << 7);
- ixparams.bounds.endKey = BSON("" << 7);
- ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
- ixparams.direction = 1;
-
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- IndexScan* ix = new IndexScan(_opCtx.get(), ixparams, sharedWs.get(), NULL);
- unique_ptr<PlanStage> firstRoot(new FetchStage(_opCtx.get(), sharedWs.get(), ix, NULL, coll));
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7);
// Plan 1: CollScan with matcher.
CollectionScanParams csparams;
@@ -455,26 +589,21 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin
// Make the filter.
BSONObj filterObj = BSON("foo" << 7);
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(_opCtx.get(), collator));
- StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterObj, expCtx);
- verify(statusWithMatcher.isOK());
- unique_ptr<MatchExpression> filter = std::move(statusWithMatcher.getValue());
- // Make the stage.
- unique_ptr<PlanStage> secondRoot(
- new CollectionScan(_opCtx.get(), csparams, sharedWs.get(), filter.get()));
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj);
+ unique_ptr<PlanStage> collScanRoot =
+ getCollScanPlan(_opCtx.get(), coll, sharedWs.get(), filter.get());
+
auto queryRequest = stdx::make_unique<QueryRequest>(nss);
- queryRequest->setFilter(BSON("foo" << 7));
+ queryRequest->setFilter(filterObj);
auto canonicalQuery =
uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
MultiPlanStage multiPlanStage(opCtx(),
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
- multiPlanStage.addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get());
- multiPlanStage.addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
+ multiPlanStage.addPlan(createQuerySolution(), ixScanRoot.release(), sharedWs.get());
+ multiPlanStage.addPlan(createQuerySolution(), collScanRoot.release(), sharedWs.get());
AlwaysTimeOutYieldPolicy alwaysTimeOutPolicy(serviceContext()->getFastClockSource());
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, multiPlanStage.pickBestPlan(&alwaysTimeOutPolicy));
@@ -496,39 +625,14 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfKilledDuringPlanning) {
// Plan 0: IXScan over foo == 7
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
- std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(
- _opCtx.get(), BSON("foo" << 1), false, &indexes);
- ASSERT_EQ(indexes.size(), 1U);
-
- IndexScanParams ixparams;
- ixparams.descriptor = indexes[0];
- ixparams.bounds.isSimpleRange = true;
- ixparams.bounds.startKey = BSON("" << 7);
- ixparams.bounds.endKey = BSON("" << 7);
- ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
- ixparams.direction = 1;
-
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- IndexScan* ix = new IndexScan(_opCtx.get(), ixparams, sharedWs.get(), NULL);
- unique_ptr<PlanStage> firstRoot(new FetchStage(_opCtx.get(), sharedWs.get(), ix, NULL, coll));
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7);
- // Plan 1: CollScan with matcher.
- CollectionScanParams csparams;
- csparams.collection = coll;
- csparams.direction = CollectionScanParams::FORWARD;
-
- // Make the filter.
+ // Plan 1: CollScan.
BSONObj filterObj = BSON("foo" << 7);
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(_opCtx.get(), collator));
- StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterObj, expCtx);
- verify(statusWithMatcher.isOK());
- unique_ptr<MatchExpression> filter = std::move(statusWithMatcher.getValue());
- // Make the stage.
- unique_ptr<PlanStage> secondRoot(
- new CollectionScan(_opCtx.get(), csparams, sharedWs.get(), filter.get()));
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj);
+ unique_ptr<PlanStage> collScanRoot =
+ getCollScanPlan(_opCtx.get(), coll, sharedWs.get(), filter.get());
auto queryRequest = stdx::make_unique<QueryRequest>(nss);
queryRequest->setFilter(BSON("foo" << BSON("$gte" << 0)));
@@ -538,8 +642,8 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfKilledDuringPlanning) {
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
- multiPlanStage.addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get());
- multiPlanStage.addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
+ multiPlanStage.addPlan(createQuerySolution(), ixScanRoot.release(), sharedWs.get());
+ multiPlanStage.addPlan(createQuerySolution(), collScanRoot.release(), sharedWs.get());
AlwaysPlanKilledYieldPolicy alwaysPlanKilledYieldPolicy(serviceContext()->getFastClockSource());
ASSERT_EQ(ErrorCodes::QueryPlanKilled,
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index bbee92fe89c..891493e99c1 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -176,6 +176,10 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanFromCache) {
QueryPlannerParams plannerParams;
fillOutPlannerParams(opCtx(), collection, cq.get(), &plannerParams);
+ // For the remainder of this test, ensure that cache entries are available immediately, and
+ // don't need go through an 'inactive' state before being usable.
+ internalQueryCacheDisableInactiveEntries.store(true);
+
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));