summaryrefslogtreecommitdiff
path: root/src/mongo/db/query/plan_cache_test.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/query/plan_cache_test.cpp')
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp2175
1 files changed, 1112 insertions, 1063 deletions
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 78b9bdf959c..da15528d243 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -50,1169 +50,1218 @@ using namespace mongo;
namespace {
- using std::string;
- using std::unique_ptr;
- using std::vector;
+using std::string;
+using std::unique_ptr;
+using std::vector;
- static const char* ns = "somebogusns";
+static const char* ns = "somebogusns";
- /**
- * Utility functions to create a CanonicalQuery
- */
- CanonicalQuery* canonicalize(const BSONObj& queryObj) {
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- CanonicalQuery* canonicalize(const char* queryStr) {
- BSONObj queryObj = fromjson(queryStr);
- return canonicalize(queryObj);
- }
-
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr,
- long long skip, long long limit,
- const char* hintStr,
- const char* minStr, const char* maxStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- BSONObj hintObj = fromjson(hintStr);
- BSONObj minObj = fromjson(minStr);
- BSONObj maxObj = fromjson(maxStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- skip, limit,
- hintObj,
- minObj, maxObj,
- false, // snapshot
- false, // explain
- &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr,
- long long skip, long long limit,
- const char* hintStr,
- const char* minStr, const char* maxStr,
- bool snapshot,
- bool explain) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- BSONObj hintObj = fromjson(hintStr);
- BSONObj minObj = fromjson(minStr);
- BSONObj maxObj = fromjson(maxStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- skip, limit,
- hintObj,
- minObj, maxObj,
- snapshot,
- explain,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- /**
- * Utility function to create MatchExpression
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- if (!status.isOK()) {
- str::stream ss;
- ss << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString();
- FAIL(ss);
- }
- MatchExpression* expr(status.getValue());
- return expr;
- }
+/**
+ * Utility functions to create a CanonicalQuery
+ */
+CanonicalQuery* canonicalize(const BSONObj& queryObj) {
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ return canonicalize(queryObj);
+}
+
+CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr, const char* projStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ BSONObj hintObj = fromjson(hintStr);
+ BSONObj minObj = fromjson(minStr);
+ BSONObj maxObj = fromjson(maxStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns,
+ queryObj,
+ sortObj,
+ projObj,
+ skip,
+ limit,
+ hintObj,
+ minObj,
+ maxObj,
+ false, // snapshot
+ false, // explain
+ &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr,
+ bool snapshot,
+ bool explain) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ BSONObj hintObj = fromjson(hintStr);
+ BSONObj minObj = fromjson(minStr);
+ BSONObj maxObj = fromjson(maxStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns,
+ queryObj,
+ sortObj,
+ projObj,
+ skip,
+ limit,
+ hintObj,
+ minObj,
+ maxObj,
+ snapshot,
+ explain,
+ &cq);
+ ASSERT_OK(result);
+ return cq;
+}
- void assertEquivalent(const char* queryStr, const MatchExpression* expected, const MatchExpression* actual) {
- if (actual->equivalent(expected)) {
- return;
- }
+/**
+ * Utility function to create MatchExpression
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ if (!status.isOK()) {
str::stream ss;
- ss << "Match expressions are not equivalent."
- << "\nOriginal query: " << queryStr
- << "\nExpected: " << expected->toString()
- << "\nActual: " << actual->toString();
+ ss << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString();
FAIL(ss);
}
-
- //
- // Tests for CachedSolution
- //
-
- /**
- * Generator for vector of QuerySolution shared pointers.
- */
- struct GenerateQuerySolution {
- QuerySolution* operator()() const {
- unique_ptr<QuerySolution> qs(new QuerySolution());
- qs->cacheData.reset(new SolutionCacheData());
- qs->cacheData->solnType = SolutionCacheData::COLLSCAN_SOLN;
- qs->cacheData->tree.reset(new PlanCacheIndexTree());
- return qs.release();
- }
- };
-
- /**
- * Utility function to create a PlanRankingDecision
- */
- PlanRankingDecision* createDecision(size_t numPlans) {
- unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
- for (size_t i = 0; i < numPlans; ++i) {
- CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
- stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
- why->scores.push_back(0U);
- why->candidateOrder.push_back(i);
- }
- return why.release();
+ MatchExpression* expr(status.getValue());
+ return expr;
+}
+
+void assertEquivalent(const char* queryStr,
+ const MatchExpression* expected,
+ const MatchExpression* actual) {
+ if (actual->equivalent(expected)) {
+ return;
}
+ str::stream ss;
+ ss << "Match expressions are not equivalent."
+ << "\nOriginal query: " << queryStr << "\nExpected: " << expected->toString()
+ << "\nActual: " << actual->toString();
+ FAIL(ss);
+}
- /**
- * Test functions for shouldCacheQuery
- * Use these functions to assert which categories
- * of canonicalized queries are suitable for inclusion
- * in the planner cache.
- */
- void assertShouldCacheQuery(const CanonicalQuery& query) {
- if (PlanCache::shouldCacheQuery(query)) {
- return;
- }
- str::stream ss;
- ss << "Canonical query should be cacheable: " << query.toString();
- FAIL(ss);
- }
+//
+// Tests for CachedSolution
+//
- void assertShouldNotCacheQuery(const CanonicalQuery& query) {
- if (!PlanCache::shouldCacheQuery(query)) {
- return;
- }
- str::stream ss;
- ss << "Canonical query should not be cacheable: " << query.toString();
- FAIL(ss);
- }
-
- void assertShouldNotCacheQuery(const BSONObj& query) {
- unique_ptr<CanonicalQuery> cq(canonicalize(query));
- assertShouldNotCacheQuery(*cq);
+/**
+ * Generator for vector of QuerySolution shared pointers.
+ */
+struct GenerateQuerySolution {
+ QuerySolution* operator()() const {
+ unique_ptr<QuerySolution> qs(new QuerySolution());
+ qs->cacheData.reset(new SolutionCacheData());
+ qs->cacheData->solnType = SolutionCacheData::COLLSCAN_SOLN;
+ qs->cacheData->tree.reset(new PlanCacheIndexTree());
+ return qs.release();
}
+};
- void assertShouldNotCacheQuery(const char* queryStr) {
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
- assertShouldNotCacheQuery(*cq);
+/**
+ * Utility function to create a PlanRankingDecision
+ */
+PlanRankingDecision* createDecision(size_t numPlans) {
+ unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
+ for (size_t i = 0; i < numPlans; ++i) {
+ CommonStats common("COLLSCAN");
+ unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ stats->specific.reset(new CollectionScanStats());
+ why->stats.mutableVector().push_back(stats.release());
+ why->scores.push_back(0U);
+ why->candidateOrder.push_back(i);
}
+ return why.release();
+}
- /**
- * Cacheable queries
- * These queries will be added to the cache with run-time statistics
- * and can be managed with the cache DB commands.
- */
-
- TEST(PlanCacheTest, ShouldCacheQueryBasic) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- assertShouldCacheQuery(*cq);
+/**
+ * Test functions for shouldCacheQuery
+ * Use these functions to assert which categories
+ * of canonicalized queries are suitable for inclusion
+ * in the planner cache.
+ */
+void assertShouldCacheQuery(const CanonicalQuery& query) {
+ if (PlanCache::shouldCacheQuery(query)) {
+ return;
}
-
- TEST(PlanCacheTest, ShouldCacheQuerySort) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}"));
- assertShouldCacheQuery(*cq);
+ str::stream ss;
+ ss << "Canonical query should be cacheable: " << query.toString();
+ FAIL(ss);
+}
+
+void assertShouldNotCacheQuery(const CanonicalQuery& query) {
+ if (!PlanCache::shouldCacheQuery(query)) {
+ return;
}
+ str::stream ss;
+ ss << "Canonical query should not be cacheable: " << query.toString();
+ FAIL(ss);
+}
- /*
- * Non-cacheable queries.
- * These queries will be sent through the planning process everytime.
- */
-
- /**
- * Collection scan
- * This should normally be handled by the IDHack runner.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryCollectionScan) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{}"));
- assertShouldNotCacheQuery(*cq);
- }
+void assertShouldNotCacheQuery(const BSONObj& query) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(query));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * Hint
- * A hinted query implies strong user preference for a particular index.
- * Therefore, not much point in caching.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithHint) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{a: 1, b: 1}",
- "{}", "{}"));
- assertShouldNotCacheQuery(*cq);
- }
+void assertShouldNotCacheQuery(const char* queryStr) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * Min queries are a specialized case of hinted queries
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithMin) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}",
- "{a: 100}", "{}"));
- assertShouldNotCacheQuery(*cq);
- }
+/**
+ * Cacheable queries
+ * These queries will be added to the cache with run-time statistics
+ * and can be managed with the cache DB commands.
+ */
- /**
- * Max queries are non-cacheable for the same reasons as min queries.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithMax) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}",
- "{}", "{a: 100}"));
- assertShouldNotCacheQuery(*cq);
- }
+TEST(PlanCacheTest, ShouldCacheQueryBasic) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ assertShouldCacheQuery(*cq);
+}
- /**
- * $geoWithin queries with legacy coordinates are cacheable as long as
- * the planner is able to come up with a cacheable solution.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
- assertShouldCacheQuery(*cq);
- }
+TEST(PlanCacheTest, ShouldCacheQuerySort) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}"));
+ assertShouldCacheQuery(*cq);
+}
- /**
- * $geoWithin queries with GeoJSON coordinates are supported by the index bounds builder.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
- assertShouldCacheQuery(*cq);
- }
+/*
+ * Non-cacheable queries.
+ * These queries will be sent through the planning process everytime.
+ */
- /**
- * $geoWithin queries with both legacy and GeoJSON coordinates are cacheable.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
- "coordinates: [[[0, 0], [0, 90], "
- "[90, 0], [0, 0]]]}}}},"
- "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
- assertShouldCacheQuery(*cq);
- }
+/**
+ * Collection scan
+ * This should normally be handled by the IDHack runner.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryCollectionScan) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{}"));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * $geoIntersects queries are always cacheable because they support GeoJSON coordinates only.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoIntersects: "
- "{$geometry: {type: 'Point', coordinates: "
- "[10.0, 10.0]}}}}"));
- assertShouldCacheQuery(*cq);
- }
+/**
+ * Hint
+ * A hinted query implies strong user preference for a particular index.
+ * Therefore, not much point in caching.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithHint) {
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: 1}", "{}", "{}", 0, 0, "{a: 1, b: 1}", "{}", "{}"));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * $geoNear queries are cacheable because we are able to distinguish
- * between flat and spherical queries.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoNear: {$geometry: {type: 'Point',"
- "coordinates: [0,0]}, $maxDistance:100}}}"));
- assertShouldCacheQuery(*cq);
- }
+/**
+ * Min queries are a specialized case of hinted queries
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithMin) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}", "{a: 100}", "{}"));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * Explain queries are not-cacheable because of allPlans cannot
- * be accurately generated from stale cached stats in the plan cache for
- * non-winning plans.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryExplain) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}",
- "{}", "{}", // min, max
- false, // snapshot
- true // explain
- ));
- const LiteParsedQuery& pq = cq->getParsed();
- ASSERT_TRUE(pq.isExplain());
- assertShouldNotCacheQuery(*cq);
- }
+/**
+ * Max queries are non-cacheable for the same reasons as min queries.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithMax) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}", "{}", "{a: 100}"));
+ assertShouldNotCacheQuery(*cq);
+}
- // Adding an empty vector of query solutions should fail.
- TEST(PlanCacheTest, AddEmptySolutions) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- std::vector<QuerySolution*> solns;
- std::unique_ptr<PlanRankingDecision> decision(createDecision(1U));
- ASSERT_NOT_OK(planCache.add(*cq, solns, decision.get()));
- }
+/**
+ * $geoWithin queries with legacy coordinates are cacheable as long as
+ * the planner is able to come up with a cacheable solution.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
+ assertShouldCacheQuery(*cq);
+}
- TEST(PlanCacheTest, AddValidSolution) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- QuerySolution qs;
- qs.cacheData.reset(new SolutionCacheData());
- qs.cacheData->tree.reset(new PlanCacheIndexTree());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
+/**
+ * $geoWithin queries with GeoJSON coordinates are supported by the index bounds builder.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ assertShouldCacheQuery(*cq);
+}
- // Check if key is in cache before and after add().
- ASSERT_FALSE(planCache.contains(*cq));
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+/**
+ * $geoWithin queries with both legacy and GeoJSON coordinates are cacheable.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
+ "coordinates: [[[0, 0], [0, 90], "
+ "[90, 0], [0, 0]]]}}}},"
+ "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
+ assertShouldCacheQuery(*cq);
+}
- ASSERT_TRUE(planCache.contains(*cq));
- ASSERT_EQUALS(planCache.size(), 1U);
- }
+/**
+ * $geoIntersects queries are always cacheable because they support GeoJSON coordinates only.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoIntersects: "
+ "{$geometry: {type: 'Point', coordinates: "
+ "[10.0, 10.0]}}}}"));
+ assertShouldCacheQuery(*cq);
+}
- TEST(PlanCacheTest, NotifyOfWriteOp) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- QuerySolution qs;
- qs.cacheData.reset(new SolutionCacheData());
- qs.cacheData->tree.reset(new PlanCacheIndexTree());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
- ASSERT_EQUALS(planCache.size(), 1U);
-
- // First (N - 1) write ops should have no effect on cache contents.
- for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
- planCache.notifyOfWriteOp();
- }
- ASSERT_EQUALS(planCache.size(), 1U);
+/**
+ * $geoNear queries are cacheable because we are able to distinguish
+ * between flat and spherical queries.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoNear: {$geometry: {type: 'Point',"
+ "coordinates: [0,0]}, $maxDistance:100}}}"));
+ assertShouldCacheQuery(*cq);
+}
- // N-th notification will cause cache to be cleared.
- planCache.notifyOfWriteOp();
- ASSERT_EQUALS(planCache.size(), 0U);
-
- // Clearing the cache should reset the internal write
- // operation counter.
- // Repopulate cache. Write (N - 1) times.
- // Clear cache.
- // Add cache entry again.
- // After clearing and adding a new entry, the next write operation should not
- // clear the cache.
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
- for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
- planCache.notifyOfWriteOp();
- }
- ASSERT_EQUALS(planCache.size(), 1U);
- planCache.clear();
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
- // Notification after clearing will not flush cache.
+/**
+ * Explain queries are not-cacheable because of allPlans cannot
+ * be accurately generated from stale cached stats in the plan cache for
+ * non-winning plans.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryExplain) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}",
+ "{}",
+ "{}",
+ 0,
+ 0,
+ "{}",
+ "{}",
+ "{}", // min, max
+ false, // snapshot
+ true // explain
+ ));
+ const LiteParsedQuery& pq = cq->getParsed();
+ ASSERT_TRUE(pq.isExplain());
+ assertShouldNotCacheQuery(*cq);
+}
+
+// Adding an empty vector of query solutions should fail.
+TEST(PlanCacheTest, AddEmptySolutions) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ std::vector<QuerySolution*> solns;
+ std::unique_ptr<PlanRankingDecision> decision(createDecision(1U));
+ ASSERT_NOT_OK(planCache.add(*cq, solns, decision.get()));
+}
+
+TEST(PlanCacheTest, AddValidSolution) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ QuerySolution qs;
+ qs.cacheData.reset(new SolutionCacheData());
+ qs.cacheData->tree.reset(new PlanCacheIndexTree());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+
+ // Check if key is in cache before and after add().
+ ASSERT_FALSE(planCache.contains(*cq));
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+
+ ASSERT_TRUE(planCache.contains(*cq));
+ ASSERT_EQUALS(planCache.size(), 1U);
+}
+
+TEST(PlanCacheTest, NotifyOfWriteOp) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ QuerySolution qs;
+ qs.cacheData.reset(new SolutionCacheData());
+ qs.cacheData->tree.reset(new PlanCacheIndexTree());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+ ASSERT_EQUALS(planCache.size(), 1U);
+
+ // First (N - 1) write ops should have no effect on cache contents.
+ for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
planCache.notifyOfWriteOp();
- ASSERT_EQUALS(planCache.size(), 1U);
}
-
- /**
- * Each test in the CachePlanSelectionTest suite goes through
- * the following flow:
- *
- * 1) Run QueryPlanner::plan on the query, with specified indices
- * available. This simulates the case in which we failed to plan from
- * the plan cache, and fell back on selecting a plan ourselves. The
- * enumerator will run, and cache data will be stashed into each solution
- * that it generates.
- *
- * 2) Use firstMatchingSolution to select one of the solutions generated
- * by QueryPlanner::plan. This simulates the multi plan runner picking
- * the "best solution".
- *
- * 3) The cache data stashed inside the "best solution" is used to
- * make a CachedSolution which looks exactly like the data structure that
- * would be returned from the cache. This simulates a plan cache hit.
- *
- * 4) Call QueryPlanner::planFromCache, passing it the CachedSolution.
- * This exercises the code which is able to map from a CachedSolution to
- * a full-blown QuerySolution. Finally, assert that the query solution
- * recovered from the cache is identical to the original "best solution".
- */
- class CachePlanSelectionTest : public mongo::unittest::Test {
- protected:
- void setUp() {
- cq = NULL;
- params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
- addIndex(BSON("_id" << 1));
- }
-
- void tearDown() {
- delete cq;
-
- for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
- delete *it;
- }
- }
-
- void addIndex(BSONObj keyPattern, bool multikey = false) {
- // The first false means not multikey.
- // The second false means not sparse.
- // The third arg is the index name and I am egotistical.
- // The NULL means no filter expression.
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- false,
- false,
- "hari_king_of_the_stove",
- NULL,
- BSONObj()));
- }
-
- void addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- sparse,
- false,
- "note_to_self_dont_break_build",
- NULL,
- BSONObj()));
- }
-
- //
- // Execute planner.
- //
-
- void runQuery(BSONObj query) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
- }
-
- void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj) {
- runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
- }
-
- void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
- }
-
- void runQueryHint(const BSONObj& query, const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
- }
-
- void runQuerySortProjSkipLimit(const BSONObj& query,
- const BSONObj& sort, const BSONObj& proj,
- long long skip, long long limit) {
- runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
- }
-
- void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
- }
-
- void runQueryHintMinMax(const BSONObj& query, const BSONObj& hint,
- const BSONObj& minObj, const BSONObj& maxObj) {
-
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
- }
-
- void runQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort, const BSONObj& proj,
- long long skip, long long limit,
- const BSONObj& hint) {
- runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
- }
-
- void runQuerySnapshot(const BSONObj& query) {
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(),
- BSONObj(), true);
- }
-
- void runQueryFull(const BSONObj& query,
- const BSONObj& sort, const BSONObj& proj,
- long long skip, long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot) {
-
- // Clean up any previous state from a call to runQueryFull
- delete cq;
- cq = NULL;
-
- for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
- delete *it;
- }
-
- solns.clear();
-
-
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, skip, limit, hint,
- minObj, maxObj, snapshot,
- false, // explain
- &cq);
- if (!s.isOK()) { cq = NULL; }
- ASSERT_OK(s);
- s = QueryPlanner::plan(*cq, params, &solns);
- ASSERT_OK(s);
- }
-
- //
- // Solution introspection.
- //
-
- void dumpSolutions(str::stream& ost) const {
- for (vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- ost << (*it)->toString() << '\n';
- }
- }
-
- /**
- * Returns number of generated solutions matching JSON.
- */
- size_t numSolutionMatches(const string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- size_t matches = 0;
- for (vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolutionNode* root = (*it)->root.get();
- if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
- ++matches;
- }
- }
- return matches;
- }
-
- /**
- * Verifies that the solution tree represented in json by 'solnJson' is
- * one of the solutions generated by QueryPlanner.
- *
- * The number of expected matches, 'numMatches', could be greater than
- * 1 if solutions differ only by the pattern of index tags on a filter.
- */
- void assertSolutionExists(const string& solnJson, size_t numMatches = 1) const {
- size_t matches = numSolutionMatches(solnJson);
- if (numMatches == matches) {
- return;
- }
- str::stream ss;
- ss << "expected " << numMatches << " matches for solution " << solnJson
- << " but got " << matches
- << " instead. all solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
- }
-
- /**
- * Plan 'query' from the cache. A mock cache entry is created using
- * the cacheData stored inside the QuerySolution 'soln'.
- *
- * Does not take ownership of 'soln'.
- */
- QuerySolution* planQueryFromCache(const BSONObj& query, const QuerySolution& soln) const {
- return planQueryFromCache(query, BSONObj(), BSONObj(), soln);
- }
-
- /**
- * Plan 'query' from the cache with sort order 'sort' and
- * projection 'proj'. A mock cache entry is created using
- * the cacheData stored inside the QuerySolution 'soln'.
- *
- * Does not take ownership of 'soln'.
- */
- QuerySolution* planQueryFromCache(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- const QuerySolution& soln) const {
- CanonicalQuery* cq;
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, &cq);
- ASSERT_OK(s);
- unique_ptr<CanonicalQuery> scopedCq(cq);
- cq = NULL;
-
- // Create a CachedSolution the long way..
- // QuerySolution -> PlanCacheEntry -> CachedSolution
- QuerySolution qs;
- qs.cacheData.reset(soln.cacheData->clone());
- std::vector<QuerySolution*> solutions;
- solutions.push_back(&qs);
- PlanCacheEntry entry(solutions, createDecision(1U));
- CachedSolution cachedSoln(ck, entry);
-
- QuerySolution *out;
- s = QueryPlanner::planFromCache(*scopedCq.get(), params, cachedSoln, &out);
- ASSERT_OK(s);
-
- return out;
- }
-
- /**
- * @param solnJson -- a json representation of a query solution.
- *
- * Returns the first solution matching 'solnJson', or fails if
- * no match is found.
- */
- QuerySolution* firstMatchingSolution(const string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- for (vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolutionNode* root = (*it)->root.get();
- if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
- return *it;
- }
- }
-
- str::stream ss;
- ss << "Could not find a match for solution " << solnJson
- << " All solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
-
- return NULL;
- }
-
- /**
- * Assert that the QuerySolution 'trueSoln' matches the JSON-based representation
- * of the solution in 'solnJson'.
- *
- * Relies on solutionMatches() -- see query_planner_test_lib.h
- */
- void assertSolutionMatches(QuerySolution* trueSoln, const string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- if (!QueryPlannerTestLib::solutionMatches(testSoln, trueSoln->root.get())) {
- str::stream ss;
- ss << "Expected solution " << solnJson << " did not match true solution: "
- << trueSoln->toString() << '\n';
- FAIL(ss);
- }
- }
-
- /**
- * Overloaded so that it is not necessary to specificy sort and project.
- */
- void assertPlanCacheRecoversSolution(const BSONObj& query, const string& solnJson) {
- assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), solnJson);
- }
-
- /**
- * First, the solution matching 'solnJson' is retrieved from the vector
- * of solutions generated by QueryPlanner::plan. This solution is
- * then passed into planQueryFromCache(). Asserts that the solution
- * generated by QueryPlanner::planFromCache matches 'solnJson'.
- *
- * Must be called after calling one of the runQuery* methods.
- *
- * Together, 'query', 'sort', and 'proj' should specify the query which
- * was previously run using one of the runQuery* methods.
- */
- void assertPlanCacheRecoversSolution(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- const string& solnJson) {
- QuerySolution* bestSoln = firstMatchingSolution(solnJson);
- QuerySolution* planSoln = planQueryFromCache(query, sort, proj, *bestSoln);
- assertSolutionMatches(planSoln, solnJson);
- delete planSoln;
- }
-
- /**
- * Check that the solution will not be cached. The planner will store
- * cache data inside non-cachable solutions, but will not do so for
- * non-cachable solutions. Therefore, we just have to check that
- * cache data is NULL.
- */
- void assertNotCached(const string& solnJson) {
- QuerySolution* bestSoln = firstMatchingSolution(solnJson);
- ASSERT(NULL != bestSoln);
- ASSERT(NULL == bestSoln->cacheData.get());
- }
-
- static const PlanCacheKey ck;
-
- BSONObj queryObj;
- CanonicalQuery* cq;
- QueryPlannerParams params;
- vector<QuerySolution*> solns;
- };
-
- const PlanCacheKey CachePlanSelectionTest::ck = "mock_cache_key";
-
- //
- // Equality
- //
-
- TEST_F(CachePlanSelectionTest, EqualityIndexScan) {
- addIndex(BSON("x" << 1));
- runQuery(BSON("x" << 5));
-
- assertPlanCacheRecoversSolution(BSON("x" << 5),
- "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(CachePlanSelectionTest, EqualityIndexScanWithTrailingFields) {
- addIndex(BSON("x" << 1 << "y" << 1));
- runQuery(BSON("x" << 5));
-
- assertPlanCacheRecoversSolution(BSON("x" << 5),
- "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
+ ASSERT_EQUALS(planCache.size(), 1U);
+
+ // N-th notification will cause cache to be cleared.
+ planCache.notifyOfWriteOp();
+ ASSERT_EQUALS(planCache.size(), 0U);
+
+ // Clearing the cache should reset the internal write
+ // operation counter.
+ // Repopulate cache. Write (N - 1) times.
+ // Clear cache.
+ // Add cache entry again.
+ // After clearing and adding a new entry, the next write operation should not
+ // clear the cache.
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+ for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
+ planCache.notifyOfWriteOp();
}
+ ASSERT_EQUALS(planCache.size(), 1U);
+ planCache.clear();
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+ // Notification after clearing will not flush cache.
+ planCache.notifyOfWriteOp();
+ ASSERT_EQUALS(planCache.size(), 1U);
+}
- //
- // Geo
- //
-
- TEST_F(CachePlanSelectionTest, Basic2DSphereNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- BSONObj query;
-
- query = fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- query = fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+/**
+ * Each test in the CachePlanSelectionTest suite goes through
+ * the following flow:
+ *
+ * 1) Run QueryPlanner::plan on the query, with specified indices
+ * available. This simulates the case in which we failed to plan from
+ * the plan cache, and fell back on selecting a plan ourselves. The
+ * enumerator will run, and cache data will be stashed into each solution
+ * that it generates.
+ *
+ * 2) Use firstMatchingSolution to select one of the solutions generated
+ * by QueryPlanner::plan. This simulates the multi plan runner picking
+ * the "best solution".
+ *
+ * 3) The cache data stashed inside the "best solution" is used to
+ * make a CachedSolution which looks exactly like the data structure that
+ * would be returned from the cache. This simulates a plan cache hit.
+ *
+ * 4) Call QueryPlanner::planFromCache, passing it the CachedSolution.
+ * This exercises the code which is able to map from a CachedSolution to
+ * a full-blown QuerySolution. Finally, assert that the query solution
+ * recovered from the cache is identical to the original "best solution".
+ */
+class CachePlanSelectionTest : public mongo::unittest::Test {
+protected:
+ void setUp() {
+ cq = NULL;
+ params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
+ addIndex(BSON("_id" << 1));
}
- TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNear) {
- addIndex(BSON("a" << "2dsphere"));
- BSONObj query;
+ void tearDown() {
+ delete cq;
- query = fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
-
- query = fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
- }
-
- TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNearReverseCompound) {
- addIndex(BSON("x" << 1));
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- BSONObj query = fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+ for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
+ delete *it;
+ }
}
- TEST_F(CachePlanSelectionTest, TwoDSphereNoGeoPred) {
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- runQuery(BSON("x" << 1));
- assertPlanCacheRecoversSolution(BSON("x" << 1),
- "{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+ void addIndex(BSONObj keyPattern, bool multikey = false) {
+ // The first false means not multikey.
+ // The second false means not sparse.
+ // The third arg is the index name and I am egotistical.
+ // The NULL means no filter expression.
+ params.indices.push_back(IndexEntry(
+ keyPattern, multikey, false, false, "hari_king_of_the_stove", NULL, BSONObj()));
}
- TEST_F(CachePlanSelectionTest, Or2DSphereNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- addIndex(BSON("b" << "2dsphere"));
- BSONObj query = fromjson("{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
- " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}");
-
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+ void addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
+ params.indices.push_back(IndexEntry(
+ keyPattern, multikey, sparse, false, "note_to_self_dont_break_build", NULL, BSONObj()));
}
//
- // tree operations
+ // Execute planner.
//
- TEST_F(CachePlanSelectionTest, TwoPredicatesAnding) {
- addIndex(BSON("x" << 1));
- BSONObj query = fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
+ void runQuery(BSONObj query) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
}
- TEST_F(CachePlanSelectionTest, SimpleOr) {
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{$or: [{a: 20}, {a: 21}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {a:1}}}}}");
+ void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj) {
+ runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
}
- TEST_F(CachePlanSelectionTest, OrWithAndChild) {
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1}}}, "
- "{fetch: {filter: {b: 7}, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}]}}}}");
+ void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
}
- TEST_F(CachePlanSelectionTest, AndWithUnindexedOrChild) {
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{a:20, $or: [{b:1}, {c:7}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ void runQueryHint(const BSONObj& query, const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
}
-
- TEST_F(CachePlanSelectionTest, AndWithOrWithOneIndex) {
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{$or: [{b:1}, {c:7}], a:20}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ void runQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit) {
+ runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
}
- //
- // Sort orders
- //
-
- // SERVER-1205.
- TEST_F(CachePlanSelectionTest, MergeSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
- BSONObj sort = BSON("c" << 1);
- runQuerySortProj(query, sort, BSONObj());
-
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
+ void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
}
- // SERVER-1205 as well.
- TEST_F(CachePlanSelectionTest, NoMergeSortIfNoSortWanted) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
- runQuerySortProj(query, BSONObj(), BSONObj());
-
- assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(),
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
- "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
- }
-
- // Disabled: SERVER-10801.
- /*
- TEST_F(CachePlanSelectionTest, SortOnGeoQuery) {
- addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
- "coordinates: [[[1, 1], [1, 90], [180, 90], "
- "[180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
- runQuerySortProj(query, sort, BSONObj());
-
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
- }
- */
-
- // SERVER-9257
- TEST_F(CachePlanSelectionTest, CompoundGeoNoGeoPredicate) {
- addIndex(BSON("creationDate" << 1 << "foo.bar" << "2dsphere"));
- BSONObj query = fromjson("{creationDate: {$gt: 7}}");
- BSONObj sort = fromjson("{creationDate: 1}");
- runQuerySortProj(query, sort, BSONObj());
-
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
- }
-
- TEST_F(CachePlanSelectionTest, ReverseScanForSort) {
- addIndex(BSON("_id" << 1));
- runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
- assertPlanCacheRecoversSolution(BSONObj(), fromjson("{_id: -1}"), BSONObj(),
- "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {_id: 1}}}}}");
+ void runQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
}
- //
- // Caching collection scans.
- //
-
- TEST_F(CachePlanSelectionTest, CollscanNoUsefulIndices) {
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("c" << 1));
- runQuery(BSON("b" << 4));
- assertPlanCacheRecoversSolution(BSON("b" << 4),
- "{cscan: {filter: {b: 4}, dir: 1}}");
+ void runQuerySortProjSkipLimitHint(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint) {
+ runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
}
- TEST_F(CachePlanSelectionTest, CollscanOrWithoutEnoughIndices) {
- addIndex(BSON("a" << 1));
- BSONObj query =fromjson("{$or: [{a: 20}, {b: 21}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{cscan: {filter: {$or:[{a:20},{b:21}]}, dir: 1}}");
+ void runQuerySnapshot(const BSONObj& query) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(), BSONObj(), true);
}
- TEST_F(CachePlanSelectionTest, CollscanMergeSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
- BSONObj sort = BSON("c" << 1);
- runQuerySortProj(query, sort, BSONObj());
+ void runQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot) {
+ // Clean up any previous state from a call to runQueryFull
+ delete cq;
+ cq = NULL;
+
+ for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
+ delete *it;
+ }
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ solns.clear();
+
+
+ Status s = CanonicalQuery::canonicalize(ns,
+ query,
+ sort,
+ proj,
+ skip,
+ limit,
+ hint,
+ minObj,
+ maxObj,
+ snapshot,
+ false, // explain
+ &cq);
+ if (!s.isOK()) {
+ cq = NULL;
+ }
+ ASSERT_OK(s);
+ s = QueryPlanner::plan(*cq, params, &solns);
+ ASSERT_OK(s);
}
//
- // Check queries that, at least for now, are not cached.
+ // Solution introspection.
//
- TEST_F(CachePlanSelectionTest, GeoNear2DNotCached) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
- assertNotCached("{geoNear2d: {a: '2d'}}");
- }
-
- TEST_F(CachePlanSelectionTest, MinNotCached) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
- assertNotCached("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ void dumpSolutions(str::stream& ost) const {
+ for (vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ ost << (*it)->toString() << '\n';
+ }
}
- TEST_F(CachePlanSelectionTest, MaxNotCached) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
- assertNotCached("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ /**
+ * Returns number of generated solutions matching JSON.
+ */
+ size_t numSolutionMatches(const string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ size_t matches = 0;
+ for (vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolutionNode* root = (*it)->root.get();
+ if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
+ ++matches;
+ }
+ }
+ return matches;
}
- TEST_F(CachePlanSelectionTest, NaturalHintNotCached) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
- assertNotCached("{sort: {pattern: {b: 1}, limit: 0, node: "
- "{cscan: {filter: {a: 1}, dir: 1}}}}");
+ /**
+ * Verifies that the solution tree represented in json by 'solnJson' is
+ * one of the solutions generated by QueryPlanner.
+ *
+ * The number of expected matches, 'numMatches', could be greater than
+ * 1 if solutions differ only by the pattern of index tags on a filter.
+ */
+ void assertSolutionExists(const string& solnJson, size_t numMatches = 1) const {
+ size_t matches = numSolutionMatches(solnJson);
+ if (numMatches == matches) {
+ return;
+ }
+ str::stream ss;
+ ss << "expected " << numMatches << " matches for solution " << solnJson << " but got "
+ << matches << " instead. all solutions generated: " << '\n';
+ dumpSolutions(ss);
+ FAIL(ss);
}
- TEST_F(CachePlanSelectionTest, HintValidNotCached) {
- addIndex(BSON("a" << 1));
- runQueryHint(BSONObj(), fromjson("{a: 1}"));
- assertNotCached("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ /**
+ * Plan 'query' from the cache. A mock cache entry is created using
+ * the cacheData stored inside the QuerySolution 'soln'.
+ *
+ * Does not take ownership of 'soln'.
+ */
+ QuerySolution* planQueryFromCache(const BSONObj& query, const QuerySolution& soln) const {
+ return planQueryFromCache(query, BSONObj(), BSONObj(), soln);
}
- //
- // Queries using '2d' indices are not cached.
- //
+ /**
+ * Plan 'query' from the cache with sort order 'sort' and
+ * projection 'proj'. A mock cache entry is created using
+ * the cacheData stored inside the QuerySolution 'soln'.
+ *
+ * Does not take ownership of 'soln'.
+ */
+ QuerySolution* planQueryFromCache(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ const QuerySolution& soln) const {
+ CanonicalQuery* cq;
+ Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, &cq);
+ ASSERT_OK(s);
+ unique_ptr<CanonicalQuery> scopedCq(cq);
+ cq = NULL;
- TEST_F(CachePlanSelectionTest, Basic2DNonNearNotCached) {
- addIndex(BSON("a" << "2d"));
- BSONObj query;
-
- // Polygon
- query = fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Center
- query = fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Centersphere
- query = fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Within box.
- query = fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
- }
+ // Create a CachedSolution the long way..
+ // QuerySolution -> PlanCacheEntry -> CachedSolution
+ QuerySolution qs;
+ qs.cacheData.reset(soln.cacheData->clone());
+ std::vector<QuerySolution*> solutions;
+ solutions.push_back(&qs);
+ PlanCacheEntry entry(solutions, createDecision(1U));
+ CachedSolution cachedSoln(ck, entry);
- TEST_F(CachePlanSelectionTest, Or2DNonNearNotCached) {
- addIndex(BSON("a" << "2d"));
- addIndex(BSON("b" << "2d"));
- BSONObj query = fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}");
+ QuerySolution* out;
+ s = QueryPlanner::planFromCache(*scopedCq.get(), params, cachedSoln, &out);
+ ASSERT_OK(s);
- runQuery(query);
- assertNotCached("{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
+ return out;
}
/**
- * Test functions for computeKey. Cache keys are intentionally obfuscated and are
- * meaningful only within the current lifetime of the server process. Users should treat plan
- * cache keys as opaque.
+ * @param solnJson -- a json representation of a query solution.
+ *
+ * Returns the first solution matching 'solnJson', or fails if
+ * no match is found.
*/
- void testComputeKey(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- const char *expectedStr) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr));
- PlanCacheKey key = planCache.computeKey(*cq);
- PlanCacheKey expectedKey(expectedStr);
- if (key == expectedKey) {
- return;
+ QuerySolution* firstMatchingSolution(const string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ for (vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolutionNode* root = (*it)->root.get();
+ if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
+ return *it;
+ }
}
+
str::stream ss;
- ss << "Unexpected plan cache key. Expected: " << expectedKey << ". Actual: " << key
- << ". Query: " << cq->toString();
+ ss << "Could not find a match for solution " << solnJson
+ << " All solutions generated: " << '\n';
+ dumpSolutions(ss);
FAIL(ss);
- }
- TEST(PlanCacheTest, ComputeKey) {
- // Generated cache keys should be treated as opaque to the user.
-
- // No sorts
- testComputeKey("{}", "{}", "{}", "an");
- testComputeKey("{$or: [{a: 1}, {b: 2}]}", "{}", "{}", "or[eqa,eqb]");
- testComputeKey("{$or: [{a: 1}, {b: 1}, {c: 1}], d: 1}", "{}", "{}",
- "an[or[eqa,eqb,eqc],eqd]");
- testComputeKey("{$or: [{a: 1}, {b: 1}], c: 1, d: 1}", "{}", "{}",
- "an[or[eqa,eqb],eqc,eqd]");
- testComputeKey("{a: 1, b: 1, c: 1}", "{}", "{}", "an[eqa,eqb,eqc]");
- testComputeKey("{a: 1, beqc: 1}", "{}", "{}", "an[eqa,eqbeqc]");
- testComputeKey("{ap1a: 1}", "{}", "{}", "eqap1a");
- testComputeKey("{aab: 1}", "{}", "{}", "eqaab");
-
- // With sort
- testComputeKey("{}", "{a: 1}", "{}", "an~aa");
- testComputeKey("{}", "{a: -1}", "{}", "an~da");
- testComputeKey("{}", "{a: {$meta: 'textScore'}}", "{a: {$meta: 'textScore'}}",
- "an~ta|{ $meta: \"textScore\" }a");
- testComputeKey("{a: 1}", "{b: 1}", "{}", "eqa~ab");
-
- // With projection
- testComputeKey("{}", "{}", "{a: 1}", "an|ia");
- testComputeKey("{}", "{}", "{a: -1}", "an|ia");
- testComputeKey("{}", "{}", "{a: -1.0}", "an|ia");
- testComputeKey("{}", "{}", "{a: true}", "an|ia");
- testComputeKey("{}", "{}", "{a: 0}", "an|ea");
- testComputeKey("{}", "{}", "{a: false}", "an|ea");
- testComputeKey("{}", "{}", "{a: 99}", "an|ia");
- testComputeKey("{}", "{}", "{a: 'foo'}", "an|ia");
- testComputeKey("{}", "{}", "{a: {$slice: [3, 5]}}", "an|{ $slice: \\[ 3\\, 5 \\] }a");
- testComputeKey("{}", "{}", "{a: {$elemMatch: {x: 2}}}",
- "an|{ $elemMatch: { x: 2 } }a");
- testComputeKey("{}", "{}", "{a: ObjectId('507f191e810c19729de860ea')}",
- "an|ia");
- testComputeKey("{a: 1}", "{}", "{'a.$': 1}", "eqa|ia.$");
- testComputeKey("{a: 1}", "{}", "{a: 1}", "eqa|ia");
-
- // Projection should be order-insensitive
- testComputeKey("{}", "{}", "{a: 1, b: 1}", "an|iaib");
- testComputeKey("{}", "{}", "{b: 1, a: 1}", "an|iaib");
-
- // With or-elimination and projection
- testComputeKey("{$or: [{a: 1}]}", "{}", "{_id: 0, a: 1}", "eqa|e_idia");
- testComputeKey("{$or: [{a: 1}]}", "{}", "{'a.$': 1}", "eqa|ia.$");
+ return NULL;
}
- // Delimiters found in user field names or non-standard projection field values
- // must be escaped.
- TEST(PlanCacheTest, ComputeKeyEscaped) {
- // Field name in query.
- testComputeKey("{'a,[]~|<>': 1}", "{}", "{}", "eqa\\,\\[\\]\\~\\|\\<\\>");
-
- // Field name in sort.
- testComputeKey("{}", "{'a,[]~|<>': 1}", "{}", "an~aa\\,\\[\\]\\~\\|\\<\\>");
-
- // Field name in projection.
- testComputeKey("{}", "{}", "{'a,[]~|<>': 1}", "an|ia\\,\\[\\]\\~\\|\\<\\>");
-
- // Value in projection.
- testComputeKey("{}", "{}", "{a: 'foo,[]~|<>'}", "an|ia");
+ /**
+ * Assert that the QuerySolution 'trueSoln' matches the JSON-based representation
+ * of the solution in 'solnJson'.
+ *
+ * Relies on solutionMatches() -- see query_planner_test_lib.h
+ */
+ void assertSolutionMatches(QuerySolution* trueSoln, const string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ if (!QueryPlannerTestLib::solutionMatches(testSoln, trueSoln->root.get())) {
+ str::stream ss;
+ ss << "Expected solution " << solnJson
+ << " did not match true solution: " << trueSoln->toString() << '\n';
+ FAIL(ss);
+ }
}
- // Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should
- // not be the same.
- TEST(PlanCacheTest, ComputeKeyGeoWithin) {
- PlanCache planCache;
-
- // Legacy coordinates.
- unique_ptr<CanonicalQuery> cqLegacy(canonicalize("{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
- // GeoJSON coordinates.
- unique_ptr<CanonicalQuery> cqNew(canonicalize("{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqLegacy),
- planCache.computeKey(*cqNew));
+ /**
+ * Overloaded so that it is not necessary to specificy sort and project.
+ */
+ void assertPlanCacheRecoversSolution(const BSONObj& query, const string& solnJson) {
+ assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), solnJson);
}
- // GEO_NEAR cache keys should include information on geometry and CRS in addition
- // to the match type and field name.
- TEST(PlanCacheTest, ComputeKeyGeoNear) {
- testComputeKey("{a: {$near: [0,0], $maxDistance:0.3 }}", "{}", "{}", "gnanrfl");
- testComputeKey("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}", "{}", "{}", "gnanssp");
- testComputeKey("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}", "{}", "{}", "gnanrsp");
+ /**
+ * First, the solution matching 'solnJson' is retrieved from the vector
+ * of solutions generated by QueryPlanner::plan. This solution is
+ * then passed into planQueryFromCache(). Asserts that the solution
+ * generated by QueryPlanner::planFromCache matches 'solnJson'.
+ *
+ * Must be called after calling one of the runQuery* methods.
+ *
+ * Together, 'query', 'sort', and 'proj' should specify the query which
+ * was previously run using one of the runQuery* methods.
+ */
+ void assertPlanCacheRecoversSolution(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ const string& solnJson) {
+ QuerySolution* bestSoln = firstMatchingSolution(solnJson);
+ QuerySolution* planSoln = planQueryFromCache(query, sort, proj, *bestSoln);
+ assertSolutionMatches(planSoln, solnJson);
+ delete planSoln;
}
- // When a sparse index is present, computeKey() should generate different keys depending on
- // whether or not the predicates in the given query can use the index.
- TEST(PlanCacheTest, ComputeKeySparseIndex) {
- PlanCache planCache;
- planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
- false, // multikey
- true, // sparse
- false, // unique
- "", // name
- nullptr, // filterExpr
- BSONObj())});
-
- unique_ptr<CanonicalQuery> cqEqNumber(canonicalize("{a: 0}}"));
- unique_ptr<CanonicalQuery> cqEqString(canonicalize("{a: 'x'}}"));
- unique_ptr<CanonicalQuery> cqEqNull(canonicalize("{a: null}}"));
-
- // 'cqEqNumber' and 'cqEqString' get the same key, since both are compatible with this
- // index.
- ASSERT_EQ(planCache.computeKey(*cqEqNumber), planCache.computeKey(*cqEqString));
-
- // 'cqEqNull' gets a different key, since it is not compatible with this index.
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqEqNull), planCache.computeKey(*cqEqNumber));
+ /**
+ * Check that the solution will not be cached. The planner will store
+ * cache data inside non-cachable solutions, but will not do so for
+ * non-cachable solutions. Therefore, we just have to check that
+ * cache data is NULL.
+ */
+ void assertNotCached(const string& solnJson) {
+ QuerySolution* bestSoln = firstMatchingSolution(solnJson);
+ ASSERT(NULL != bestSoln);
+ ASSERT(NULL == bestSoln->cacheData.get());
}
- // When a partial index is present, computeKey() should generate different keys depending on
- // whether or not the predicates in the given query "match" the predicates in the partial index
- // filter.
- TEST(PlanCacheTest, ComputeKeyPartialIndex) {
- BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
- unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
-
- PlanCache planCache;
- planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr.get(),
- BSONObj())});
-
- unique_ptr<CanonicalQuery> cqGtNegativeFive(canonicalize("{f: {$gt: -5}}"));
- unique_ptr<CanonicalQuery> cqGtZero(canonicalize("{f: {$gt: 0}}"));
- unique_ptr<CanonicalQuery> cqGtFive(canonicalize("{f: {$gt: 5}}"));
-
- // 'cqGtZero' and 'cqGtFive' get the same key, since both are compatible with this index.
- ASSERT_EQ(planCache.computeKey(*cqGtZero), planCache.computeKey(*cqGtFive));
-
- // 'cqGtNegativeFive' gets a different key, since it is not compatible with this index.
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqGtNegativeFive), planCache.computeKey(*cqGtZero));
+ static const PlanCacheKey ck;
+
+ BSONObj queryObj;
+ CanonicalQuery* cq;
+ QueryPlannerParams params;
+ vector<QuerySolution*> solns;
+};
+
+const PlanCacheKey CachePlanSelectionTest::ck = "mock_cache_key";
+
+//
+// Equality
+//
+
+TEST_F(CachePlanSelectionTest, EqualityIndexScan) {
+ addIndex(BSON("x" << 1));
+ runQuery(BSON("x" << 5));
+
+ assertPlanCacheRecoversSolution(BSON("x" << 5),
+ "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, EqualityIndexScanWithTrailingFields) {
+ addIndex(BSON("x" << 1 << "y" << 1));
+ runQuery(BSON("x" << 5));
+
+ assertPlanCacheRecoversSolution(
+ BSON("x" << 5), "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
+}
+
+//
+// Geo
+//
+
+TEST_F(CachePlanSelectionTest, Basic2DSphereNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ BSONObj query;
+
+ query = fromjson(
+ "{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ query = fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ BSONObj query;
+
+ query = fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
+
+ query = fromjson(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
+}
+
+TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNearReverseCompound) {
+ addIndex(BSON("x" << 1));
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ BSONObj query = fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+}
+
+TEST_F(CachePlanSelectionTest, TwoDSphereNoGeoPred) {
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ runQuery(BSON("x" << 1));
+ assertPlanCacheRecoversSolution(BSON("x" << 1),
+ "{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, Or2DSphereNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ addIndex(BSON("b"
+ << "2dsphere"));
+ BSONObj query = fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
+ " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}");
+
+ runQuery(query);
+ assertPlanCacheRecoversSolution(
+ query,
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+}
+
+//
+// tree operations
+//
+
+TEST_F(CachePlanSelectionTest, TwoPredicatesAnding) {
+ addIndex(BSON("x" << 1));
+ BSONObj query = fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(
+ query, "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, SimpleOr) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{a: 20}, {a: 21}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(
+ query, "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {a:1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, OrWithAndChild) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query,
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1}}}, "
+ "{fetch: {filter: {b: 7}, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}]}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, AndWithUnindexedOrChild) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{a:20, $or: [{b:1}, {c:7}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query,
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+
+TEST_F(CachePlanSelectionTest, AndWithOrWithOneIndex) {
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{b:1}, {c:7}], a:20}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query,
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+//
+// Sort orders
+//
+
+// SERVER-1205.
+TEST_F(CachePlanSelectionTest, MergeSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
+ BSONObj sort = BSON("c" << 1);
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(
+ query,
+ sort,
+ BSONObj(),
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// SERVER-1205 as well.
+TEST_F(CachePlanSelectionTest, NoMergeSortIfNoSortWanted) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
+ runQuerySortProj(query, BSONObj(), BSONObj());
+
+ assertPlanCacheRecoversSolution(query,
+ BSONObj(),
+ BSONObj(),
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
+ "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// Disabled: SERVER-10801.
+/*
+TEST_F(CachePlanSelectionTest, SortOnGeoQuery) {
+ addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
+ "coordinates: [[[1, 1], [1, 90], [180, 90], "
+ "[180, 1], [1, 1]]]}}}}");
+ BSONObj sort = fromjson("{timestamp: -1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(query, sort, BSONObj(),
+ "{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
+}
+*/
+
+// SERVER-9257
+TEST_F(CachePlanSelectionTest, CompoundGeoNoGeoPredicate) {
+ addIndex(BSON("creationDate" << 1 << "foo.bar"
+ << "2dsphere"));
+ BSONObj query = fromjson("{creationDate: {$gt: 7}}");
+ BSONObj sort = fromjson("{creationDate: 1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(
+ query,
+ sort,
+ BSONObj(),
+ "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, ReverseScanForSort) {
+ addIndex(BSON("_id" << 1));
+ runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
+ assertPlanCacheRecoversSolution(
+ BSONObj(),
+ fromjson("{_id: -1}"),
+ BSONObj(),
+ "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {_id: 1}}}}}");
+}
+
+//
+// Caching collection scans.
+//
+
+TEST_F(CachePlanSelectionTest, CollscanNoUsefulIndices) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("c" << 1));
+ runQuery(BSON("b" << 4));
+ assertPlanCacheRecoversSolution(BSON("b" << 4), "{cscan: {filter: {b: 4}, dir: 1}}");
+}
+
+TEST_F(CachePlanSelectionTest, CollscanOrWithoutEnoughIndices) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{a: 20}, {b: 21}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{cscan: {filter: {$or:[{a:20},{b:21}]}, dir: 1}}");
+}
+
+TEST_F(CachePlanSelectionTest, CollscanMergeSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
+ BSONObj sort = BSON("c" << 1);
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(
+ query, sort, BSONObj(), "{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+}
+
+//
+// Check queries that, at least for now, are not cached.
+//
+
+TEST_F(CachePlanSelectionTest, GeoNear2DNotCached) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
+ assertNotCached("{geoNear2d: {a: '2d'}}");
+}
+
+TEST_F(CachePlanSelectionTest, MinNotCached) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
+ assertNotCached(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, MaxNotCached) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
+ assertNotCached(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, NaturalHintNotCached) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
+ assertNotCached(
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{cscan: {filter: {a: 1}, dir: 1}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, HintValidNotCached) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(BSONObj(), fromjson("{a: 1}"));
+ assertNotCached(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+//
+// Queries using '2d' indices are not cached.
+//
+
+TEST_F(CachePlanSelectionTest, Basic2DNonNearNotCached) {
+ addIndex(BSON("a"
+ << "2d"));
+ BSONObj query;
+
+ // Polygon
+ query = fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Center
+ query = fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Centersphere
+ query = fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Within box.
+ query = fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, Or2DNonNearNotCached) {
+ addIndex(BSON("a"
+ << "2d"));
+ addIndex(BSON("b"
+ << "2d"));
+ BSONObj query = fromjson(
+ "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}");
+
+ runQuery(query);
+ assertNotCached(
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
+}
+
+/**
+ * Test functions for computeKey. Cache keys are intentionally obfuscated and are
+ * meaningful only within the current lifetime of the server process. Users should treat plan
+ * cache keys as opaque.
+ */
+void testComputeKey(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ const char* expectedStr) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr));
+ PlanCacheKey key = planCache.computeKey(*cq);
+ PlanCacheKey expectedKey(expectedStr);
+ if (key == expectedKey) {
+ return;
}
+ str::stream ss;
+ ss << "Unexpected plan cache key. Expected: " << expectedKey << ". Actual: " << key
+ << ". Query: " << cq->toString();
+ FAIL(ss);
+}
+
+TEST(PlanCacheTest, ComputeKey) {
+ // Generated cache keys should be treated as opaque to the user.
+
+ // No sorts
+ testComputeKey("{}", "{}", "{}", "an");
+ testComputeKey("{$or: [{a: 1}, {b: 2}]}", "{}", "{}", "or[eqa,eqb]");
+ testComputeKey("{$or: [{a: 1}, {b: 1}, {c: 1}], d: 1}", "{}", "{}", "an[or[eqa,eqb,eqc],eqd]");
+ testComputeKey("{$or: [{a: 1}, {b: 1}], c: 1, d: 1}", "{}", "{}", "an[or[eqa,eqb],eqc,eqd]");
+ testComputeKey("{a: 1, b: 1, c: 1}", "{}", "{}", "an[eqa,eqb,eqc]");
+ testComputeKey("{a: 1, beqc: 1}", "{}", "{}", "an[eqa,eqbeqc]");
+ testComputeKey("{ap1a: 1}", "{}", "{}", "eqap1a");
+ testComputeKey("{aab: 1}", "{}", "{}", "eqaab");
+
+ // With sort
+ testComputeKey("{}", "{a: 1}", "{}", "an~aa");
+ testComputeKey("{}", "{a: -1}", "{}", "an~da");
+ testComputeKey("{}",
+ "{a: {$meta: 'textScore'}}",
+ "{a: {$meta: 'textScore'}}",
+ "an~ta|{ $meta: \"textScore\" }a");
+ testComputeKey("{a: 1}", "{b: 1}", "{}", "eqa~ab");
+
+ // With projection
+ testComputeKey("{}", "{}", "{a: 1}", "an|ia");
+ testComputeKey("{}", "{}", "{a: -1}", "an|ia");
+ testComputeKey("{}", "{}", "{a: -1.0}", "an|ia");
+ testComputeKey("{}", "{}", "{a: true}", "an|ia");
+ testComputeKey("{}", "{}", "{a: 0}", "an|ea");
+ testComputeKey("{}", "{}", "{a: false}", "an|ea");
+ testComputeKey("{}", "{}", "{a: 99}", "an|ia");
+ testComputeKey("{}", "{}", "{a: 'foo'}", "an|ia");
+ testComputeKey("{}", "{}", "{a: {$slice: [3, 5]}}", "an|{ $slice: \\[ 3\\, 5 \\] }a");
+ testComputeKey("{}", "{}", "{a: {$elemMatch: {x: 2}}}", "an|{ $elemMatch: { x: 2 } }a");
+ testComputeKey("{}", "{}", "{a: ObjectId('507f191e810c19729de860ea')}", "an|ia");
+ testComputeKey("{a: 1}", "{}", "{'a.$': 1}", "eqa|ia.$");
+ testComputeKey("{a: 1}", "{}", "{a: 1}", "eqa|ia");
+
+ // Projection should be order-insensitive
+ testComputeKey("{}", "{}", "{a: 1, b: 1}", "an|iaib");
+ testComputeKey("{}", "{}", "{b: 1, a: 1}", "an|iaib");
+
+ // With or-elimination and projection
+ testComputeKey("{$or: [{a: 1}]}", "{}", "{_id: 0, a: 1}", "eqa|e_idia");
+ testComputeKey("{$or: [{a: 1}]}", "{}", "{'a.$': 1}", "eqa|ia.$");
+}
+
+// Delimiters found in user field names or non-standard projection field values
+// must be escaped.
+TEST(PlanCacheTest, ComputeKeyEscaped) {
+ // Field name in query.
+ testComputeKey("{'a,[]~|<>': 1}", "{}", "{}", "eqa\\,\\[\\]\\~\\|\\<\\>");
+
+ // Field name in sort.
+ testComputeKey("{}", "{'a,[]~|<>': 1}", "{}", "an~aa\\,\\[\\]\\~\\|\\<\\>");
+
+ // Field name in projection.
+ testComputeKey("{}", "{}", "{'a,[]~|<>': 1}", "an|ia\\,\\[\\]\\~\\|\\<\\>");
+
+ // Value in projection.
+ testComputeKey("{}", "{}", "{a: 'foo,[]~|<>'}", "an|ia");
+}
+
+// Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should
+// not be the same.
+TEST(PlanCacheTest, ComputeKeyGeoWithin) {
+ PlanCache planCache;
+
+ // Legacy coordinates.
+ unique_ptr<CanonicalQuery> cqLegacy(canonicalize(
+ "{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
+ // GeoJSON coordinates.
+ unique_ptr<CanonicalQuery> cqNew(canonicalize(
+ "{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ ASSERT_NOT_EQUALS(planCache.computeKey(*cqLegacy), planCache.computeKey(*cqNew));
+}
+
+// GEO_NEAR cache keys should include information on geometry and CRS in addition
+// to the match type and field name.
+TEST(PlanCacheTest, ComputeKeyGeoNear) {
+ testComputeKey("{a: {$near: [0,0], $maxDistance:0.3 }}", "{}", "{}", "gnanrfl");
+ testComputeKey("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}", "{}", "{}", "gnanssp");
+ testComputeKey(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}",
+ "{}",
+ "{}",
+ "gnanrsp");
+}
+
+// When a sparse index is present, computeKey() should generate different keys depending on
+// whether or not the predicates in the given query can use the index.
+TEST(PlanCacheTest, ComputeKeySparseIndex) {
+ PlanCache planCache;
+ planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "", // name
+ nullptr, // filterExpr
+ BSONObj())});
+
+ unique_ptr<CanonicalQuery> cqEqNumber(canonicalize("{a: 0}}"));
+ unique_ptr<CanonicalQuery> cqEqString(canonicalize("{a: 'x'}}"));
+ unique_ptr<CanonicalQuery> cqEqNull(canonicalize("{a: null}}"));
+
+ // 'cqEqNumber' and 'cqEqString' get the same key, since both are compatible with this
+ // index.
+ ASSERT_EQ(planCache.computeKey(*cqEqNumber), planCache.computeKey(*cqEqString));
+
+ // 'cqEqNull' gets a different key, since it is not compatible with this index.
+ ASSERT_NOT_EQUALS(planCache.computeKey(*cqEqNull), planCache.computeKey(*cqEqNumber));
+}
+
+// When a partial index is present, computeKey() should generate different keys depending on
+// whether or not the predicates in the given query "match" the predicates in the partial index
+// filter.
+TEST(PlanCacheTest, ComputeKeyPartialIndex) {
+ BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
+ unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+
+ PlanCache planCache;
+ planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr.get(),
+ BSONObj())});
+
+ unique_ptr<CanonicalQuery> cqGtNegativeFive(canonicalize("{f: {$gt: -5}}"));
+ unique_ptr<CanonicalQuery> cqGtZero(canonicalize("{f: {$gt: 0}}"));
+ unique_ptr<CanonicalQuery> cqGtFive(canonicalize("{f: {$gt: 5}}"));
+
+ // 'cqGtZero' and 'cqGtFive' get the same key, since both are compatible with this index.
+ ASSERT_EQ(planCache.computeKey(*cqGtZero), planCache.computeKey(*cqGtFive));
+
+ // 'cqGtNegativeFive' gets a different key, since it is not compatible with this index.
+ ASSERT_NOT_EQUALS(planCache.computeKey(*cqGtNegativeFive), planCache.computeKey(*cqGtZero));
+}
} // namespace