summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorScott Hernandez <scotthernandez@gmail.com>2016-07-07 12:13:07 -0400
committerScott Hernandez <scotthernandez@gmail.com>2016-07-07 12:13:13 -0400
commit0b797de3ce2eb461e82cac647067dfaaa4d36988 (patch)
tree27f4e2126c4771fefb5f64f0ca5c2468a4158f02 /src/mongo
parente7cc7ebf50c2883776cb4cc2423d687bcafa35b6 (diff)
downloadmongo-0b797de3ce2eb461e82cac647067dfaaa4d36988.tar.gz
Revert "SERVER-23882 Collation should be considered part of a query's shape"
This reverts commit 582818dac41ac01975820c09f79d3b86dc3782cc.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp11
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp147
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp30
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp178
-rw-r--r--src/mongo/db/matcher/expression_algo.cpp19
-rw-r--r--src/mongo/db/matcher/expression_leaf.h16
-rw-r--r--src/mongo/db/query/SConscript1
-rw-r--r--src/mongo/db/query/canonical_query.cpp7
-rw-r--r--src/mongo/db/query/plan_cache.cpp15
-rw-r--r--src/mongo/db/query/plan_cache.h2
-rw-r--r--src/mongo/db/query/plan_cache_indexability.cpp58
-rw-r--r--src/mongo/db/query/plan_cache_indexability.h59
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp296
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp140
-rw-r--r--src/mongo/db/query/query_settings.cpp13
-rw-r--r--src/mongo/db/query/query_settings.h8
-rw-r--r--src/mongo/shell/collection.js41
17 files changed, 176 insertions, 865 deletions
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index f07115c9705..627a55cead2 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -224,9 +224,6 @@ Status ListFilters::list(const QuerySettings& querySettings, BSONObjBuilder* bob
hintBob.append("query", entry->query);
hintBob.append("sort", entry->sort);
hintBob.append("projection", entry->projection);
- if (!entry->collation.isEmpty()) {
- hintBob.append("collation", entry->collation);
- }
BSONArrayBuilder indexesBuilder(hintBob.subarrayStart("indexes"));
for (vector<BSONObj>::const_iterator j = entry->indexKeyPatterns.begin();
j != entry->indexKeyPatterns.end();
@@ -292,12 +289,11 @@ Status ClearFilters::clear(OperationContext* txn,
return Status::OK();
}
- // If query is not provided, make sure sort, projection, and collation are not in arguments.
+ // If query is not provided, make sure sort and projection are not in arguments.
// We do not want to clear the entire cache inadvertently when the user
// forgot to provide a value for "query".
- if (cmdObj.hasField("sort") || cmdObj.hasField("projection") || cmdObj.hasField("collation")) {
- return Status(ErrorCodes::BadValue,
- "sort, projection, or collation provided without query");
+ if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
+ return Status(ErrorCodes::BadValue, "sort or projection provided without query");
}
// Get entries from query settings. We need to remove corresponding entries from the plan
@@ -330,7 +326,6 @@ Status ClearFilters::clear(OperationContext* txn,
qr->setFilter(entry->query);
qr->setSort(entry->sort);
qr->setProj(entry->projection);
- qr->setCollation(entry->collation);
auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
invariantOK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 0960a4434be..72c40a07083 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -83,12 +83,6 @@ vector<BSONObj> getFilters(const QuerySettings& querySettings) {
BSONElement projectionElt = obj.getField("projection");
ASSERT_TRUE(projectionElt.isABSONObj());
- // collation (optional)
- BSONElement collationElt = obj.getField("collation");
- if (!collationElt.eoo()) {
- ASSERT_TRUE(collationElt.isABSONObj());
- }
-
// indexes
BSONElement indexesElt = obj.getField("indexes");
ASSERT_EQUALS(indexesElt.type(), mongo::Array);
@@ -123,14 +117,12 @@ void addQueryShapeToPlanCache(OperationContext* txn,
PlanCache* planCache,
const char* queryStr,
const char* sortStr,
- const char* projectionStr,
- const char* collationStr) {
+ const char* projectionStr) {
// Create canonical query.
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
qr->setSort(fromjson(sortStr));
qr->setProj(fromjson(projectionStr));
- qr->setCollation(fromjson(collationStr));
auto statusWithCQ =
CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
@@ -150,8 +142,7 @@ void addQueryShapeToPlanCache(OperationContext* txn,
bool planCacheContains(const PlanCache& planCache,
const char* queryStr,
const char* sortStr,
- const char* projectionStr,
- const char* collationStr) {
+ const char* projectionStr) {
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
@@ -160,7 +151,6 @@ bool planCacheContains(const PlanCache& planCache,
qr->setFilter(fromjson(queryStr));
qr->setSort(fromjson(sortStr));
qr->setProj(fromjson(projectionStr));
- qr->setCollation(fromjson(collationStr));
auto statusWithInputQuery = CanonicalQuery::canonicalize(
txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithInputQuery.getStatus());
@@ -181,7 +171,6 @@ bool planCacheContains(const PlanCache& planCache,
qr->setFilter(entry->query);
qr->setSort(entry->sort);
qr->setProj(entry->projection);
- qr->setCollation(entry->collation);
auto statusWithCurrentQuery = CanonicalQuery::canonicalize(
txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCurrentQuery.getStatus());
@@ -300,13 +289,6 @@ TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
&planCache,
nss.ns(),
fromjson("{query: {a: 1}, projection: 1234, indexes: [{a: 1}, {b: 1}]}")));
- // If present, collation must be an object.
- ASSERT_NOT_OK(
- SetFilter::set(&txn,
- &empty,
- &planCache,
- nss.ns(),
- fromjson("{query: {a: 1}, collation: 1234, indexes: [{a: 1}, {b: 1}]}")));
// Query must pass canonicalization.
ASSERT_NOT_OK(
SetFilter::set(&txn,
@@ -323,45 +305,36 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
auto txn = serviceContext.makeOperationContext();
// Inject query shape into plan cache.
- addQueryShapeToPlanCache(txn.get(),
- &planCache,
- "{a: 1, b: 1}",
- "{a: -1}",
- "{_id: 0, a: 1}",
- "{locale: 'mock_reverse_string'}");
- ASSERT_TRUE(planCacheContains(
- planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}", "{locale: 'mock_reverse_string'}"));
+ addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}");
+ ASSERT_TRUE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
- ASSERT_OK(SetFilter::set(txn.get(),
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson("{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, "
- "a: 1}, collation: {locale: 'mock_reverse_string'}, "
- "indexes: [{a: 1}]}")));
+ ASSERT_OK(
+ SetFilter::set(txn.get(),
+ &querySettings,
+ &planCache,
+ nss.ns(),
+ fromjson("{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1}]}")));
vector<BSONObj> filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
// Query shape should not exist in plan cache after hint is updated.
- ASSERT_FALSE(planCacheContains(
- planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}", "{locale: 'mock_reverse_string'}"));
+ ASSERT_FALSE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
// Fields in filter should match criteria in most recent query settings update.
ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 1, b: 1}"));
ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{a: -1}"));
ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{_id: 0, a: 1}"));
- ASSERT_EQUALS(StringData(filters[0].getObjectField("collation").getStringField("locale")),
- "mock_reverse_string");
// Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
// share same shape) should not change the query settings size.
- ASSERT_OK(SetFilter::set(txn.get(),
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson("{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, "
- "a: 1}, collation: {locale: 'mock_reverse_string'}, "
- "indexes: [{a: 1, b: 1}]}")));
+ ASSERT_OK(
+ SetFilter::set(txn.get(),
+ &querySettings,
+ &planCache,
+ nss.ns(),
+ fromjson("{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1, b: 1}]}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
@@ -384,8 +357,8 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_EQUALS(filters.size(), 3U);
// Add 2 entries to plan cache and check plan cache after clearing one/all filters.
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 1}", "{}", "{}", "{}");
- addQueryShapeToPlanCache(txn.get(), &planCache, "{b: 1}", "{}", "{}", "{}");
+ addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 1}", "{}", "{}");
+ addQueryShapeToPlanCache(txn.get(), &planCache, "{b: 1}", "{}", "{}");
// Clear single hint.
ASSERT_OK(ClearFilters::clear(
@@ -394,8 +367,8 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_EQUALS(filters.size(), 2U);
// Query shape should not exist in plan cache after cleaing 1 hint.
- ASSERT_FALSE(planCacheContains(planCache, "{a: 1}", "{}", "{}", "{}"));
- ASSERT_TRUE(planCacheContains(planCache, "{b: 1}", "{}", "{}", "{}"));
+ ASSERT_FALSE(planCacheContains(planCache, "{a: 1}", "{}", "{}"));
+ ASSERT_TRUE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
// Clear all filters
ASSERT_OK(ClearFilters::clear(txn.get(), &querySettings, &planCache, nss.ns(), fromjson("{}")));
@@ -403,79 +376,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_TRUE(filters.empty());
// {b: 1} should be gone from plan cache after flushing query settings.
- ASSERT_FALSE(planCacheContains(planCache, "{b: 1}", "{}", "{}", "{}"));
-}
-
-TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
- QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
- QuerySettings querySettings;
-
- // Create a plan cache. Add an index so that indexability is included in the plan cache keys.
- PlanCache planCache;
- planCache.notifyOfIndexEntries(
- {IndexEntry(fromjson("{a: 1}"), false, false, false, "index_name", NULL, BSONObj())});
-
- // Inject query shapes with and without collation into plan cache.
- addQueryShapeToPlanCache(
- txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
- ASSERT_TRUE(
- planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
- ASSERT_TRUE(planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{}"));
-
- ASSERT_OK(SetFilter::set(txn.get(),
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson("{query: {a: 'foo'}, sort: {}, projection: {}, collation: "
- "{locale: 'mock_reverse_string'}, "
- "indexes: [{a: 1}]}")));
- vector<BSONObj> filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
- ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 'foo'}"));
- ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{}"));
- ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{}"));
- ASSERT_EQUALS(StringData(filters[0].getObjectField("collation").getStringField("locale")),
- "mock_reverse_string");
-
- // Plan cache should only contain entry for query without collation.
- ASSERT_FALSE(
- planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
- ASSERT_TRUE(planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{}"));
-
- // Add filter for query shape without collation.
- ASSERT_OK(SetFilter::set(txn.get(),
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson("{query: {a: 'foo'}, indexes: [{b: 1}]}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 2U);
-
- // Add plan cache entries for both queries.
- addQueryShapeToPlanCache(
- txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
-
- // Clear filter for query with collation.
- ASSERT_OK(ClearFilters::clear(
- txn.get(),
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson("{query: {a: 'foo'}, collation: {locale: 'mock_reverse_string'}}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
- ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 'foo'}"));
- ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{}"));
- ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{}"));
- ASSERT_EQUALS(filters[0].getObjectField("collation"), fromjson("{}"));
-
- // Plan cache should only contain entry for query without collation.
- ASSERT_FALSE(
- planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
- ASSERT_TRUE(planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{}"));
+ ASSERT_FALSE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
}
} // namespace
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 9d347b7b6a4..7fda9ad233f 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -207,26 +207,12 @@ StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationC
projObj = projElt.Obj();
}
- // collation - optional
- BSONObj collationObj;
- if (auto collationElt = cmdObj["collation"]) {
- if (!collationElt.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "optional field collation must be an object");
- }
- collationObj = collationElt.Obj();
- if (collationObj.isEmpty()) {
- return Status(ErrorCodes::BadValue,
- "optional field collation cannot be an empty object");
- }
- }
-
// Create canonical query
const NamespaceString nss(ns);
auto qr = stdx::make_unique<QueryRequest>(std::move(nss));
qr->setFilter(queryObj);
qr->setSort(sortObj);
qr->setProj(projObj);
- qr->setCollation(collationObj);
const ExtensionsCallbackReal extensionsCallback(txn, &nss);
auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
@@ -275,9 +261,6 @@ Status PlanCacheListQueryShapes::list(const PlanCache& planCache, BSONObjBuilder
shapeBuilder.append("query", entry->query);
shapeBuilder.append("sort", entry->sort);
shapeBuilder.append("projection", entry->projection);
- if (!entry->collation.isEmpty()) {
- shapeBuilder.append("collation", entry->collation);
- }
shapeBuilder.doneFast();
// Release resources for cached solution after extracting query shape.
@@ -332,8 +315,7 @@ Status PlanCacheClear::clear(OperationContext* txn,
// Log if asked to clear non-existent query shape.
LOG(1) << ns << ": query shape doesn't exist in PlanCache - "
<< cq->getQueryObj().toString() << "(sort: " << cq->getQueryRequest().getSort()
- << "; projection: " << cq->getQueryRequest().getProj()
- << "; collation: " << cq->getQueryRequest().getCollation() << ")";
+ << "; projection: " << cq->getQueryRequest().getProj() << ")";
return Status::OK();
}
@@ -344,18 +326,16 @@ Status PlanCacheClear::clear(OperationContext* txn,
LOG(1) << ns << ": removed plan cache entry - " << cq->getQueryObj().toString()
<< "(sort: " << cq->getQueryRequest().getSort()
- << "; projection: " << cq->getQueryRequest().getProj()
- << "; collation: " << cq->getQueryRequest().getCollation() << ")";
+ << "; projection: " << cq->getQueryRequest().getProj() << ")";
return Status::OK();
}
- // If query is not provided, make sure sort, projection, and collation are not in arguments.
+ // If query is not provided, make sure sort and projection are not in arguments.
// We do not want to clear the entire cache inadvertently when the user
// forgets to provide a value for "query".
- if (cmdObj.hasField("sort") || cmdObj.hasField("projection") || cmdObj.hasField("collation")) {
- return Status(ErrorCodes::BadValue,
- "sort, projection, or collation provided without query");
+ if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
+ return Status(ErrorCodes::BadValue, "sort or projection provided without query");
}
planCache->clear();
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 195ad13b250..8730290d78a 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -88,12 +88,6 @@ std::vector<BSONObj> getShapes(const PlanCache& planCache) {
BSONElement projectionElt = obj.getField("projection");
ASSERT_TRUE(projectionElt.isABSONObj());
- // collation
- BSONElement collationElt = obj.getField("collation");
- if (!collationElt.eoo()) {
- ASSERT_TRUE(collationElt.isABSONObj());
- }
-
// All fields OK. Append to vector.
shapes.push_back(obj.getOwned());
}
@@ -138,9 +132,6 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
// Create a canonical query
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 1}"));
- qr->setSort(fromjson("{a: -1}"));
- qr->setProj(fromjson("{_id: 0}"));
- qr->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
@@ -159,7 +150,6 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
ASSERT_EQUALS(shapes[0].getObjectField("query"), cq->getQueryObj());
ASSERT_EQUALS(shapes[0].getObjectField("sort"), cq->getQueryRequest().getSort());
ASSERT_EQUALS(shapes[0].getObjectField("projection"), cq->getQueryRequest().getProj());
- ASSERT_EQUALS(shapes[0].getObjectField("collation"), cq->getCollator()->getSpec().toBSON());
}
/**
@@ -201,69 +191,59 @@ TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
TEST(PlanCacheCommandsTest, Canonicalize) {
// Invalid parameters
PlanCache planCache;
- QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ OperationContextNoop txn;
// Missing query field
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{}")).getStatus());
+ ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{}")).getStatus());
// Query needs to be an object
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: 1}")).getStatus());
+ PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{query: 1}")).getStatus());
// Sort needs to be an object
- ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, sort: 1}"))
- .getStatus());
- // Projection needs to be an object.
- ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, projection: 1}"))
- .getStatus());
- // Collation needs to be an object.
- ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, collation: 1}"))
- .getStatus());
+ ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{query: {}, sort: 1}"))
+ .getStatus());
// Bad query (invalid sort order)
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
+ PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
.getStatus());
// Valid parameters
auto statusWithCQ =
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
+ PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue());
// Equivalent query should generate same key.
statusWithCQ =
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {b: 1, a: 1}}"));
+ PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{query: {b: 1, a: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> equivQuery = std::move(statusWithCQ.getValue());
ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
// Sort query should generate different key from unsorted query.
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
+ &txn, nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> sortQuery1 = std::move(statusWithCQ.getValue());
ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery1));
// Confirm sort arguments are properly delimited (SERVER-17158)
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
+ &txn, nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> sortQuery2 = std::move(statusWithCQ.getValue());
ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
// Changing order and/or value of predicates should not change key
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"));
+ &txn, nss.ns(), fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> sortQuery3 = std::move(statusWithCQ.getValue());
ASSERT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery3));
// Projected query should generate different key from unprojected query.
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
+ &txn, nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> projectionQuery = std::move(statusWithCQ.getValue());
ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
@@ -289,9 +269,6 @@ TEST(PlanCacheCommandsTest, planCacheClearInvalidParameter) {
// Projection present without query is an error.
ASSERT_NOT_OK(PlanCacheClear::clear(
&txn, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
- // Collation present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(
- &txn, &planCache, nss.ns(), fromjson("{collation: {locale: 'en_US'}}")));
}
TEST(PlanCacheCommandsTest, planCacheClearUnknownKey) {
@@ -350,62 +327,6 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
ASSERT_EQUALS(shapesAfter[0], shapeA);
}
-TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
- QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
-
- // Create 2 canonical queries, one with collation.
- auto qr = stdx::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: 'foo'}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- auto qrCollation = stdx::make_unique<QueryRequest>(nss);
- qrCollation->setFilter(fromjson("{a: 'foo'}"));
- qrCollation->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
- auto statusWithCQCollation = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
- ASSERT_OK(statusWithCQCollation.getStatus());
- unique_ptr<CanonicalQuery> cqCollation = std::move(statusWithCQCollation.getValue());
-
- // Create plan cache with 2 entries. Add an index so that indexability is included in the plan
- // cache keys.
- PlanCache planCache;
- planCache.notifyOfIndexEntries(
- {IndexEntry(fromjson("{a: 1}"), false, false, false, "index_name", NULL, BSONObj())});
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
- planCache.add(*cqCollation, solns, createDecision(1U));
-
- // Check keys in cache before dropping the query with collation.
- vector<BSONObj> shapesBefore = getShapes(planCache);
- ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shape = BSON("query" << cq->getQueryObj() << "sort" << cq->getQueryRequest().getSort()
- << "projection"
- << cq->getQueryRequest().getProj());
- BSONObj shapeWithCollation = BSON("query" << cqCollation->getQueryObj() << "sort"
- << cqCollation->getQueryRequest().getSort()
- << "projection"
- << cqCollation->getQueryRequest().getProj()
- << "collation"
- << cqCollation->getCollator()->getSpec().toBSON());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shape) != shapesBefore.end());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeWithCollation) !=
- shapesBefore.end());
-
- // Drop query with collation from cache. Make other query is still in cache afterwards.
- BSONObjBuilder bob;
-
- ASSERT_OK(PlanCacheClear::clear(txn.get(), &planCache, nss.ns(), shapeWithCollation));
- vector<BSONObj> shapesAfter = getShapes(planCache);
- ASSERT_EQUALS(shapesAfter.size(), 1U);
- ASSERT_EQUALS(shapesAfter[0], shape);
-}
-
/**
* Tests for planCacheListPlans
*/
@@ -449,21 +370,12 @@ BSONObj getPlan(const BSONElement& elt) {
vector<BSONObj> getPlans(const PlanCache& planCache,
const BSONObj& query,
const BSONObj& sort,
- const BSONObj& projection,
- const BSONObj& collation) {
- QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ const BSONObj& projection) {
+ OperationContextNoop txn;
BSONObjBuilder bob;
- BSONObjBuilder cmdObjBuilder;
- cmdObjBuilder.append("query", query);
- cmdObjBuilder.append("sort", sort);
- cmdObjBuilder.append("projection", projection);
- if (!collation.isEmpty()) {
- cmdObjBuilder.append("collation", collation);
- }
- BSONObj cmdObj = cmdObjBuilder.obj();
- ASSERT_OK(PlanCacheListPlans::list(txn.get(), planCache, nss.ns(), cmdObj, &bob));
+ BSONObj cmdObj = BSON("query" << query << "sort" << sort << "projection" << projection);
+ ASSERT_OK(PlanCacheListPlans::list(&txn, planCache, nss.ns(), cmdObj, &bob));
BSONObj resultObj = bob.obj();
BSONElement plansElt = resultObj.getField("plans");
ASSERT_EQUALS(plansElt.type(), mongo::Array);
@@ -521,8 +433,7 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
vector<BSONObj> plans = getPlans(planCache,
cq->getQueryObj(),
cq->getQueryRequest().getSort(),
- cq->getQueryRequest().getProj(),
- cq->getQueryRequest().getCollation());
+ cq->getQueryRequest().getProj());
ASSERT_EQUALS(plans.size(), 1U);
}
@@ -551,61 +462,8 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
vector<BSONObj> plans = getPlans(planCache,
cq->getQueryObj(),
cq->getQueryRequest().getSort(),
- cq->getQueryRequest().getProj(),
- cq->getQueryRequest().getCollation());
+ cq->getQueryRequest().getProj());
ASSERT_EQUALS(plans.size(), 2U);
}
-
-TEST(PlanCacheCommandsTest, planCacheListPlansCollation) {
- QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
-
- // Create 2 canonical queries, one with collation.
- auto qr = stdx::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: 'foo'}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- auto qrCollation = stdx::make_unique<QueryRequest>(nss);
- qrCollation->setFilter(fromjson("{a: 'foo'}"));
- qrCollation->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
- auto statusWithCQCollation = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
- ASSERT_OK(statusWithCQCollation.getStatus());
- unique_ptr<CanonicalQuery> cqCollation = std::move(statusWithCQCollation.getValue());
-
- // Create plan cache with 2 entries. Add an index so that indexability is included in the plan
- // cache keys. Give query with collation two solutions.
- PlanCache planCache;
- planCache.notifyOfIndexEntries(
- {IndexEntry(fromjson("{a: 1}"), false, false, false, "index_name", NULL, BSONObj())});
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
- std::vector<QuerySolution*> twoSolns;
- twoSolns.push_back(&qs);
- twoSolns.push_back(&qs);
- planCache.add(*cqCollation, twoSolns, createDecision(2U));
-
- // Normal query should have one solution.
- vector<BSONObj> plans = getPlans(planCache,
- cq->getQueryObj(),
- cq->getQueryRequest().getSort(),
- cq->getQueryRequest().getProj(),
- cq->getQueryRequest().getCollation());
- ASSERT_EQUALS(plans.size(), 1U);
-
- // Query with collation should have two solutions.
- vector<BSONObj> plansCollation = getPlans(planCache,
- cqCollation->getQueryObj(),
- cqCollation->getQueryRequest().getSort(),
- cqCollation->getQueryRequest().getProj(),
- cqCollation->getQueryRequest().getCollation());
- ASSERT_EQUALS(plansCollation.size(), 2U);
-}
-
} // namespace
diff --git a/src/mongo/db/matcher/expression_algo.cpp b/src/mongo/db/matcher/expression_algo.cpp
index d918ead80f9..6365d3326bb 100644
--- a/src/mongo/db/matcher/expression_algo.cpp
+++ b/src/mongo/db/matcher/expression_algo.cpp
@@ -46,6 +46,19 @@ using std::unique_ptr;
namespace {
+bool isComparisonMatchExpression(const MatchExpression* expr) {
+ switch (expr->matchType()) {
+ case MatchExpression::LT:
+ case MatchExpression::LTE:
+ case MatchExpression::EQ:
+ case MatchExpression::GTE:
+ case MatchExpression::GT:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool supportsEquality(const ComparisonMatchExpression* expr) {
switch (expr->matchType()) {
case MatchExpression::LTE:
@@ -140,7 +153,7 @@ bool _isSubsetOf(const MatchExpression* lhs, const ComparisonMatchExpression* rh
return false;
}
- if (ComparisonMatchExpression::isComparisonMatchExpression(lhs)) {
+ if (isComparisonMatchExpression(lhs)) {
return _isSubsetOf(static_cast<const ComparisonMatchExpression*>(lhs), rhs);
}
@@ -175,7 +188,7 @@ bool _isSubsetOf(const MatchExpression* lhs, const ExistsMatchExpression* rhs) {
return false;
}
- if (ComparisonMatchExpression::isComparisonMatchExpression(lhs)) {
+ if (isComparisonMatchExpression(lhs)) {
const ComparisonMatchExpression* cme = static_cast<const ComparisonMatchExpression*>(lhs);
// CompareMatchExpression::init() prohibits creating a match expression with EOO or
// Undefined types, so only need to ensure that the value is not of type jstNULL.
@@ -325,7 +338,7 @@ bool isSubsetOf(const MatchExpression* lhs, const MatchExpression* rhs) {
return true;
}
- if (ComparisonMatchExpression::isComparisonMatchExpression(rhs)) {
+ if (isComparisonMatchExpression(rhs)) {
return _isSubsetOf(lhs, static_cast<const ComparisonMatchExpression*>(rhs));
}
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index 62a45c81e9c..7ef4dd7c860 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -117,22 +117,6 @@ public:
return _collator;
}
- /**
- * Returns true if the MatchExpression is a ComparisonMatchExpression.
- */
- static bool isComparisonMatchExpression(const MatchExpression* expr) {
- switch (expr->matchType()) {
- case MatchExpression::LT:
- case MatchExpression::LTE:
- case MatchExpression::EQ:
- case MatchExpression::GTE:
- case MatchExpression::GT:
- return true;
- default:
- return false;
- }
- }
-
protected:
BSONElement _rhs;
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 700021391a7..e186ce6aa29 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -307,7 +307,6 @@ env.CppUnitTest(
"plan_cache_indexability_test.cpp"
],
LIBDEPS=[
- "collation/collator_interface_mock",
"query_planner",
],
)
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 9bbb5a169dc..334fbe664cc 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -516,9 +516,6 @@ std::string CanonicalQuery::toString() const {
ss << "Tree: " << _root->toString();
ss << "Sort: " << _qr->getSort().toString() << '\n';
ss << "Proj: " << _qr->getProj().toString() << '\n';
- if (!_qr->getCollation().isEmpty()) {
- ss << "Collation: " << _qr->getCollation().toString() << '\n';
- }
return ss;
}
@@ -527,10 +524,6 @@ std::string CanonicalQuery::toStringShort() const {
ss << "query: " << _qr->getFilter().toString() << " sort: " << _qr->getSort().toString()
<< " projection: " << _qr->getProj().toString();
- if (!_qr->getCollation().isEmpty()) {
- ss << " collation: " << _qr->getCollation().toString();
- }
-
if (_qr->getBatchSize()) {
ss << " batchSize: " << *_qr->getBatchSize();
}
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index 5554438ba09..8268555215d 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -36,7 +36,6 @@
#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
#include "mongo/db/matcher/expression_array.h"
#include "mongo/db/matcher/expression_geo.h"
-#include "mongo/db/query/collation/collator_interface.h"
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/query_knobs.h"
#include "mongo/db/query/query_solution.h"
@@ -58,7 +57,6 @@ const char kEncodeChildrenEnd = ']';
const char kEncodeChildrenSeparator = ',';
const char kEncodeSortSection = '~';
const char kEncodeProjectionSection = '|';
-const char kEncodeCollationSection = '#';
/**
* Encode user-provided string. Cache key delimiters seen in the
@@ -75,7 +73,6 @@ void encodeUserString(StringData s, StringBuilder* keyBuilder) {
case kEncodeChildrenSeparator:
case kEncodeSortSection:
case kEncodeProjectionSection:
- case kEncodeCollationSection:
case '\\':
*keyBuilder << '\\';
// Fall through to default case.
@@ -312,7 +309,6 @@ CachedSolution::CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& en
query(entry.query.getOwned()),
sort(entry.sort.getOwned()),
projection(entry.projection.getOwned()),
- collation(entry.collation.getOwned()),
decisionWorks(entry.decision->stats[0]->common.works) {
// CachedSolution should not having any references into
// cache entry. All relevant data should be cloned/copied.
@@ -373,7 +369,6 @@ PlanCacheEntry* PlanCacheEntry::clone() const {
entry->query = query.getOwned();
entry->sort = sort.getOwned();
entry->projection = projection.getOwned();
- entry->collation = collation.getOwned();
// Copy performance stats.
for (size_t i = 0; i < feedback.size(); ++i) {
@@ -388,7 +383,6 @@ PlanCacheEntry* PlanCacheEntry::clone() const {
std::string PlanCacheEntry::toString() const {
return str::stream() << "(query: " << query.toString() << ";sort: " << sort.toString()
<< ";projection: " << projection.toString()
- << ";collation: " << collation.toString()
<< ";solutions: " << plannerData.size() << ")";
}
@@ -506,13 +500,13 @@ void PlanCache::encodeKeyForMatch(const MatchExpression* tree, StringBuilder* ke
}
// Encode indexability.
- const IndexToDiscriminatorMap& discriminators =
+ const IndexabilityDiscriminators& discriminators =
_indexabilityState.getDiscriminators(tree->path());
if (!discriminators.empty()) {
*keyBuilder << kEncodeDiscriminatorsBegin;
// For each discriminator on this path, append the character '0' or '1'.
- for (auto&& indexAndDiscriminatorPair : discriminators) {
- *keyBuilder << indexAndDiscriminatorPair.second.isMatchCompatibleWithIndex(tree);
+ for (const IndexabilityDiscriminator& discriminator : discriminators) {
+ *keyBuilder << discriminator(tree);
}
*keyBuilder << kEncodeDiscriminatorsEnd;
}
@@ -645,9 +639,6 @@ Status PlanCache::add(const CanonicalQuery& query,
const QueryRequest& qr = query.getQueryRequest();
entry->query = qr.getFilter().getOwned();
entry->sort = qr.getSort().getOwned();
- if (query.getCollator()) {
- entry->collation = query.getCollator()->getSpec().toBSON();
- }
// Strip projections on $-prefixed fields, as these are added by internal callers of the query
// system and are not considered part of the user projection.
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 16aaff994a2..332e7b79cea 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -201,7 +201,6 @@ public:
BSONObj query;
BSONObj sort;
BSONObj projection;
- BSONObj collation;
// The number of work cycles taken to decide on a winning plan when the plan was first
// cached.
@@ -251,7 +250,6 @@ public:
BSONObj query;
BSONObj sort;
BSONObj projection;
- BSONObj collation;
//
// Performance stats
diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp
index 442cb222001..066d7a2782b 100644
--- a/src/mongo/db/query/plan_cache_indexability.cpp
+++ b/src/mongo/db/query/plan_cache_indexability.cpp
@@ -35,18 +35,15 @@
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_algo.h"
#include "mongo/db/matcher/expression_leaf.h"
-#include "mongo/db/query/collation/collation_index_key.h"
-#include "mongo/db/query/collation/collator_interface.h"
#include "mongo/db/query/index_entry.h"
#include "mongo/stdx/memory.h"
#include <memory>
namespace mongo {
-void PlanCacheIndexabilityState::processSparseIndex(const std::string& indexName,
- const BSONObj& keyPattern) {
+void PlanCacheIndexabilityState::processSparseIndex(const BSONObj& keyPattern) {
for (BSONElement elem : keyPattern) {
- _pathDiscriminatorsMap[elem.fieldNameStringData()][indexName].addDiscriminator(
+ _pathDiscriminatorsMap[elem.fieldNameStringData()].push_back(
[](const MatchExpression* queryExpr) {
if (queryExpr->matchType() == MatchExpression::EQ) {
const auto* queryExprEquality =
@@ -62,60 +59,24 @@ void PlanCacheIndexabilityState::processSparseIndex(const std::string& indexName
}
}
-void PlanCacheIndexabilityState::processPartialIndex(const std::string& indexName,
- const MatchExpression* filterExpr) {
+void PlanCacheIndexabilityState::processPartialIndex(const MatchExpression* filterExpr) {
invariant(filterExpr);
for (size_t i = 0; i < filterExpr->numChildren(); ++i) {
- processPartialIndex(indexName, filterExpr->getChild(i));
+ processPartialIndex(filterExpr->getChild(i));
}
if (!filterExpr->isLogical()) {
- _pathDiscriminatorsMap[filterExpr->path()][indexName].addDiscriminator(
+ _pathDiscriminatorsMap[filterExpr->path()].push_back(
[filterExpr](const MatchExpression* queryExpr) {
return expression::isSubsetOf(queryExpr, filterExpr);
});
}
}
-void PlanCacheIndexabilityState::processIndexCollation(const std::string& indexName,
- const BSONObj& keyPattern,
- const CollatorInterface* collator) {
- for (BSONElement elem : keyPattern) {
- _pathDiscriminatorsMap[elem.fieldNameStringData()][indexName].addDiscriminator([collator](
- const MatchExpression* queryExpr) {
- if (ComparisonMatchExpression::isComparisonMatchExpression(queryExpr)) {
- const auto* queryExprComparison =
- static_cast<const ComparisonMatchExpression*>(queryExpr);
- const bool collatorsMatch =
- CollatorInterface::collatorsMatch(queryExprComparison->getCollator(), collator);
- const bool isCollatableType =
- CollationIndexKey::isCollatableType(queryExprComparison->getData().type());
- return collatorsMatch || !isCollatableType;
- }
-
- if (queryExpr->matchType() == MatchExpression::MATCH_IN) {
- const auto* queryExprIn = static_cast<const InMatchExpression*>(queryExpr);
- if (CollatorInterface::collatorsMatch(queryExprIn->getCollator(), collator)) {
- return true;
- }
- for (const auto& equality : queryExprIn->getEqualities()) {
- if (CollationIndexKey::isCollatableType(equality.type())) {
- return false;
- }
- }
- return true;
- }
-
- // The predicate never compares strings so it is not affected by collation.
- return true;
- });
- }
-}
-
namespace {
-const IndexToDiscriminatorMap emptyDiscriminators;
+const IndexabilityDiscriminators emptyDiscriminators;
} // namespace
-const IndexToDiscriminatorMap& PlanCacheIndexabilityState::getDiscriminators(
+const IndexabilityDiscriminators& PlanCacheIndexabilityState::getDiscriminators(
StringData path) const {
PathDiscriminatorsMap::const_iterator it = _pathDiscriminatorsMap.find(path);
if (it == _pathDiscriminatorsMap.end()) {
@@ -129,12 +90,11 @@ void PlanCacheIndexabilityState::updateDiscriminators(const std::vector<IndexEnt
for (const IndexEntry& idx : indexEntries) {
if (idx.sparse) {
- processSparseIndex(idx.name, idx.keyPattern);
+ processSparseIndex(idx.keyPattern);
}
if (idx.filterExpr) {
- processPartialIndex(idx.name, idx.filterExpr);
+ processPartialIndex(idx.filterExpr);
}
- processIndexCollation(idx.name, idx.keyPattern, idx.collator);
}
}
diff --git a/src/mongo/db/query/plan_cache_indexability.h b/src/mongo/db/query/plan_cache_indexability.h
index 3dd4e83ec3e..03278b06929 100644
--- a/src/mongo/db/query/plan_cache_indexability.h
+++ b/src/mongo/db/query/plan_cache_indexability.h
@@ -37,42 +37,11 @@
namespace mongo {
class BSONObj;
-class CollatorInterface;
-class CompositeIndexabilityDiscriminator;
class MatchExpression;
struct IndexEntry;
using IndexabilityDiscriminator = stdx::function<bool(const MatchExpression* me)>;
using IndexabilityDiscriminators = std::vector<IndexabilityDiscriminator>;
-using IndexToDiscriminatorMap = StringMap<CompositeIndexabilityDiscriminator>;
-
-/**
- * CompositeIndexabilityDiscriminator holds all indexability discriminators for a particular path,
- * for a particular index. For example, a path may have both a collation discriminator and a sparse
- * index discriminator for a particular index.
- */
-class CompositeIndexabilityDiscriminator {
-public:
- /**
- * Considers all discriminators for the path-index pair, and returns a single bit indicating
- * whether the index can be used for that path.
- */
- bool isMatchCompatibleWithIndex(const MatchExpression* me) const {
- for (auto&& discriminator : _discriminators) {
- if (!discriminator(me)) {
- return false;
- }
- }
- return true;
- }
-
- void addDiscriminator(IndexabilityDiscriminator discriminator) {
- _discriminators.push_back(std::move(discriminator));
- }
-
-private:
- IndexabilityDiscriminators _discriminators;
-};
/**
* PlanCacheIndexabilityState holds a set of "indexability discriminators" for certain paths.
@@ -86,13 +55,13 @@ public:
PlanCacheIndexabilityState() = default;
/**
- * Returns a map from index name to discriminator for each index associated with 'path'.
- * Returns an empty set if no discriminators are registered for 'path'.
+ * Gets the set of discriminators associated with 'path'. Returns an empty set if no
+ * discriminators are registered for 'path'.
*
* The object returned by reference is valid until the next call to updateDiscriminators()
* or until destruction of 'this', whichever is first.
*/
- const IndexToDiscriminatorMap& getDiscriminators(StringData path) const;
+ const IndexabilityDiscriminators& getDiscriminators(StringData path) const;
/**
* Clears discriminators for all paths, and regenerate them from 'indexEntries'.
@@ -110,11 +79,11 @@ private:
* element of the key pattern. The former predicate is compatibile with this index, but the
* latter is not compatible.
*/
- void processSparseIndex(const std::string& indexName, const BSONObj& keyPattern);
+ void processSparseIndex(const BSONObj& keyPattern);
/**
* Adds partial index discriminators for the partial index with the given filter expression
- * to the discriminators for that index in '_pathDiscriminatorsMap'.
+ * to '_pathDiscriminatorsMap'.
*
* A partial index discriminator distinguishes expressions that match a given partial index
* predicate from expressions that don't match the partial index predicate. For example,
@@ -122,23 +91,9 @@ private:
* predicate {a: {$gt: -5}}, if there is a partial index defined with document filter {a:
* {$gt: 0}}. The former is compatible with this index, but the latter is not compatible.
*/
- void processPartialIndex(const std::string& indexName, const MatchExpression* filterExpr);
-
- /**
- * Adds collation discriminators for the index with the given key pattern and collator to
- * '_pathDiscriminatorsMap'.
- *
- * The discriminator for a given path returns true if the index collator matches the query
- * collator, or if the query does not contain string comparison at that path.
- */
- void processIndexCollation(const std::string& indexName,
- const BSONObj& keyPattern,
- const CollatorInterface* collator);
+ void processPartialIndex(const MatchExpression* filterExpr);
- /**
- * PathDiscriminatorsMap is a map from field path to index name to IndexabilityDiscriminator.
- */
- using PathDiscriminatorsMap = StringMap<IndexToDiscriminatorMap>;
+ using PathDiscriminatorsMap = StringMap<IndexabilityDiscriminators>;
PathDiscriminatorsMap _pathDiscriminatorsMap;
};
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index 3cc90e3429a..e5db935d3a3 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -28,10 +28,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
-#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/db/query/index_entry.h"
#include "mongo/db/query/plan_cache_indexability.h"
#include "mongo/unittest/unittest.h"
@@ -39,8 +37,8 @@
namespace mongo {
namespace {
-std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj,
- const CollatorInterface* collator = nullptr) {
+std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) {
+ const CollatorInterface* collator = nullptr;
StatusWithMatchExpression status =
MatchExpressionParser::parse(obj, ExtensionsCallbackDisallowExtensions(), collator);
if (!status.isOK()) {
@@ -57,61 +55,48 @@ TEST(PlanCacheIndexabilityTest, SparseIndexSimple) {
false, // multikey
true, // sparse
false, // unique
- "a_1", // name
+ "", // name
nullptr, // filterExpr
BSONObj())});
- auto discriminators = state.getDiscriminators("a");
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a");
ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
- auto disc = discriminators["a_1"];
- ASSERT_EQ(true, disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 1)).get()));
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get()));
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(1)))).get()));
ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << BSONNULL)).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(1)))).get()));
- ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(BSONNULL)))).get()));
+ disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(BSONNULL)))).get()));
}
// Test sparse index discriminators for a compound sparse index.
TEST(PlanCacheIndexabilityTest, SparseIndexCompound) {
PlanCacheIndexabilityState state;
state.updateDiscriminators({IndexEntry(BSON("a" << 1 << "b" << 1),
- false, // multikey
- true, // sparse
- false, // unique
- "a_1_b_1", // name
- nullptr, // filterExpr
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "", // name
+ nullptr, // filterExpr
BSONObj())});
{
- auto discriminators = state.getDiscriminators("a");
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a");
ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1_b_1") != discriminators.end());
-
- auto disc = discriminators["a_1_b_1"];
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 1)).get()));
- ASSERT_EQ(
- false,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << BSONNULL)).get()));
+
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get()));
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get()));
}
{
- auto discriminators = state.getDiscriminators("b");
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("b");
ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1_b_1") != discriminators.end());
-
- auto disc = discriminators["a_1_b_1"];
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << 1)).get()));
- ASSERT_EQ(
- false,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << BSONNULL)).get()));
+
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("b" << 1)).get()));
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("b" << BSONNULL)).get()));
}
}
@@ -124,37 +109,18 @@ TEST(PlanCacheIndexabilityTest, PartialIndexSimple) {
false, // multikey
false, // sparse
false, // unique
- "a_1", // name
+ "", // name
filterExpr.get(),
BSONObj())});
- {
- auto discriminators = state.getDiscriminators("f");
- ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
-
- auto disc = discriminators["a_1"];
- ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(BSON("f" << BSON("$gt" << -5))).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(BSON("f" << BSON("$gt" << 5))).get()));
- }
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
+ ASSERT_EQ(1U, discriminators.size());
- {
- auto discriminators = state.getDiscriminators("a");
- ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
-
- auto disc = discriminators["a_1"];
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(BSON("a" << BSON("$gt" << -5))).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(BSON("a" << BSON("$gt" << -5))).get()));
- }
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << BSON("$gt" << -5))).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << BSON("$gt" << 5))).get()));
+
+ ASSERT(state.getDiscriminators("a").empty());
}
// Test partial index discriminators for an index where the filter expression is an AND.
@@ -166,45 +132,29 @@ TEST(PlanCacheIndexabilityTest, PartialIndexAnd) {
false, // multikey
false, // sparse
false, // unique
- "a_1", // name
+ "", // name
filterExpr.get(),
BSONObj())});
{
- auto discriminators = state.getDiscriminators("f");
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
- auto disc = discriminators["a_1"];
- ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 0)).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 1)).get()));
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << 0)).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << 1)).get()));
}
{
- auto discriminators = state.getDiscriminators("g");
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("g");
ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
- auto disc = discriminators["a_1"];
- ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("g" << 0)).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("g" << 1)).get()));
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("g" << 0)).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("g" << 1)).get()));
}
- {
- auto discriminators = state.getDiscriminators("a");
- ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
-
- auto disc = discriminators["a_1"];
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 0)).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 1)).get()));
- }
+ ASSERT(state.getDiscriminators("a").empty());
}
// Test partial index discriminators where there are multiple partial indexes.
@@ -220,169 +170,47 @@ TEST(PlanCacheIndexabilityTest, MultiplePartialIndexes) {
false, // multikey
false, // sparse
false, // unique
- "a_1", // name
+ "", // name
filterExpr1.get(),
BSONObj()),
IndexEntry(BSON("b" << 1),
false, // multikey
false, // sparse
false, // unique
- "b_1", // name
+ "", // name
filterExpr2.get(),
BSONObj())});
- {
- auto discriminators = state.getDiscriminators("f");
- ASSERT_EQ(2U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
- ASSERT(discriminators.find("b_1") != discriminators.end());
-
- auto discA = discriminators["a_1"];
- auto discB = discriminators["b_1"];
-
- ASSERT_EQ(false,
- discA.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 0)).get()));
- ASSERT_EQ(false,
- discB.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 0)).get()));
-
- ASSERT_EQ(true,
- discA.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 1)).get()));
- ASSERT_EQ(false,
- discB.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 1)).get()));
-
- ASSERT_EQ(false,
- discA.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 2)).get()));
- ASSERT_EQ(true,
- discB.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 2)).get()));
- }
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
+ ASSERT_EQ(2U, discriminators.size());
- {
- auto discriminators = state.getDiscriminators("a");
- ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
-
- auto disc = discriminators["a_1"];
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 0)).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 1)).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 2)).get()));
- }
+ const IndexabilityDiscriminator& disc1 = discriminators[0];
+ const IndexabilityDiscriminator& disc2 = discriminators[1];
- {
- auto discriminators = state.getDiscriminators("b");
- ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("b_1") != discriminators.end());
-
- auto disc = discriminators["b_1"];
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << 0)).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << 1)).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << 2)).get()));
- }
+ ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
+ ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
+
+ ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 1)).get()),
+ disc2(parseMatchExpression(BSON("f" << 1)).get()));
+
+ ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 2)).get()),
+ disc2(parseMatchExpression(BSON("f" << 2)).get()));
+
+ ASSERT(state.getDiscriminators("a").empty());
+ ASSERT(state.getDiscriminators("b").empty());
}
-// Test that a discriminator is generated for a regular index (this discriminator will only encode
-// collation indexability).
+// Test that no discriminators are generated for a regular index.
TEST(PlanCacheIndexabilityTest, IndexNeitherSparseNorPartial) {
PlanCacheIndexabilityState state;
state.updateDiscriminators({IndexEntry(BSON("a" << 1),
false, // multikey
false, // sparse
false, // unique
- "a_1", // name
- nullptr,
- BSONObj())});
- auto discriminators = state.getDiscriminators("a");
- ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
-}
-
-// Test discriminator for a simple index with a collation.
-TEST(PlanCacheIndexabilityTest, DiscriminatorForCollationIndicatesWhenCollationsAreCompatible) {
- PlanCacheIndexabilityState state;
- IndexEntry entry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "a_1", // name
- nullptr, // filterExpr
- BSONObj());
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- entry.collator = &collator;
- state.updateDiscriminators({entry});
-
- auto discriminators = state.getDiscriminators("a");
- ASSERT_EQ(1U, discriminators.size());
- ASSERT(discriminators.find("a_1") != discriminators.end());
-
- auto disc = discriminators["a_1"];
-
- // Index collator matches query collator.
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: 'abc'}"), &collator).get()));
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$in: ['abc', 'xyz']}}"), &collator).get()));
-
- // Expression is not a ComparisonMatchExpression or InMatchExpression.
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$exists: true}}"), nullptr).get()));
-
- // Expression is a ComparisonMatchExpression with non-matching collator.
- ASSERT_EQ(
- true,
- disc.isMatchCompatibleWithIndex(parseMatchExpression(fromjson("{a: 5}"), nullptr).get()));
- ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: 'abc'}"), nullptr).get()));
- ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {b: 'abc'}}"), nullptr).get()));
- ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: ['abc', 'xyz']}"), nullptr).get()));
-
- // Expression is an InMatchExpression with non-matching collator.
- ASSERT_EQ(true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$in: [1, 2]}}"), nullptr).get()));
- ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$in: [1, 'abc', 2]}}"), nullptr).get()));
- ASSERT_EQ(false,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$in: [1, {b: 'abc'}, 2]}}"), nullptr).get()));
- ASSERT_EQ(
- false,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$in: [1, ['abc', 'xyz'], 2]}}"), nullptr).get()));
-}
-
-// Test that a discriminator is produced for each field in a compound index (this discriminator will
-// only encode collation indexability).
-TEST(PlanCacheIndexabilityTest, CompoundIndexCollationDiscriminator) {
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1 << "b" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "a_1_b_1", // name
+ "", // name
nullptr,
BSONObj())});
-
- auto discriminatorsA = state.getDiscriminators("a");
- ASSERT_EQ(1U, discriminatorsA.size());
- ASSERT(discriminatorsA.find("a_1_b_1") != discriminatorsA.end());
-
- auto discriminatorsB = state.getDiscriminators("b");
- ASSERT_EQ(1U, discriminatorsB.size());
- ASSERT(discriminatorsB.find("a_1_b_1") != discriminatorsB.end());
+ ASSERT(state.getDiscriminators("a").empty());
}
} // namespace
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 86dd471d568..23eef65734b 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -39,7 +39,6 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
-#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/query_knobs.h"
#include "mongo/db/query/query_planner.h"
@@ -52,8 +51,6 @@
using namespace mongo;
-using unittest::assertGet;
-
namespace {
using std::string;
@@ -84,8 +81,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) {
unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
const char* sortStr,
- const char* projStr,
- const char* collationStr) {
+ const char* projStr) {
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
@@ -93,7 +89,6 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
qr->setFilter(fromjson(queryStr));
qr->setSort(fromjson(sortStr));
qr->setProj(fromjson(projStr));
- qr->setCollation(fromjson(collationStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
@@ -273,7 +268,7 @@ TEST(PlanCacheTest, ShouldCacheQueryBasic) {
}
TEST(PlanCacheTest, ShouldCacheQuerySort) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}", "{}"));
+ unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}"));
assertShouldCacheQuery(*cq);
}
@@ -472,12 +467,6 @@ protected:
keyPattern, multikey, sparse, false, "note_to_self_dont_break_build", NULL, BSONObj()));
}
- void addIndex(BSONObj keyPattern, CollatorInterface* collator) {
- IndexEntry entry(keyPattern, false, false, false, "index_with_collation", NULL, BSONObj());
- entry.collator = collator;
- params.indices.push_back(entry);
- }
-
//
// Execute planner.
//
@@ -542,7 +531,7 @@ protected:
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- // Clean up any previous state from a call to runQueryFull or runQueryAsCommand.
+ // Clean up any previous state from a call to runQueryFull
for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
delete *it;
}
@@ -570,28 +559,6 @@ protected:
ASSERT_OK(s);
}
- void runQueryAsCommand(const BSONObj& cmdObj) {
- QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
-
- // Clean up any previous state from a call to runQueryFull or runQueryAsCommand.
- for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
- delete *it;
- }
-
- solns.clear();
-
- const bool isExplain = false;
- std::unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
- ASSERT_OK(statusWithCQ.getStatus());
- Status s = QueryPlanner::plan(*statusWithCQ.getValue(), params, &solns);
- ASSERT_OK(s);
- }
-
//
// Solution introspection.
//
@@ -643,20 +610,19 @@ protected:
* Does not take ownership of 'soln'.
*/
QuerySolution* planQueryFromCache(const BSONObj& query, const QuerySolution& soln) const {
- return planQueryFromCache(query, BSONObj(), BSONObj(), BSONObj(), soln);
+ return planQueryFromCache(query, BSONObj(), BSONObj(), soln);
}
/**
- * Plan 'query' from the cache with sort order 'sort', projection 'proj', and collation
- * 'collation'. A mock cache entry is created using the cacheData stored inside the
- * QuerySolution 'soln'.
+ * Plan 'query' from the cache with sort order 'sort' and
+ * projection 'proj'. A mock cache entry is created using
+ * the cacheData stored inside the QuerySolution 'soln'.
*
* Does not take ownership of 'soln'.
*/
QuerySolution* planQueryFromCache(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
- const BSONObj& collation,
const QuerySolution& soln) const {
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
@@ -665,7 +631,6 @@ protected:
qr->setFilter(query);
qr->setSort(sort);
qr->setProj(proj);
- qr->setCollation(collation);
auto statusWithCQ = CanonicalQuery::canonicalize(
txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
@@ -731,7 +696,7 @@ protected:
* Overloaded so that it is not necessary to specificy sort and project.
*/
void assertPlanCacheRecoversSolution(const BSONObj& query, const string& solnJson) {
- assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), BSONObj(), solnJson);
+ assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), solnJson);
}
/**
@@ -742,16 +707,15 @@ protected:
*
* Must be called after calling one of the runQuery* methods.
*
- * Together, 'query', 'sort', 'proj', and 'collation' should specify the query which was
- * previously run using one of the runQuery* methods.
+ * Together, 'query', 'sort', and 'proj' should specify the query which
+ * was previously run using one of the runQuery* methods.
*/
void assertPlanCacheRecoversSolution(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
- const BSONObj& collation,
const string& solnJson) {
QuerySolution* bestSoln = firstMatchingSolution(solnJson);
- QuerySolution* planSoln = planQueryFromCache(query, sort, proj, collation, *bestSoln);
+ QuerySolution* planSoln = planQueryFromCache(query, sort, proj, *bestSoln);
assertSolutionMatches(planSoln, solnJson);
delete planSoln;
}
@@ -941,7 +905,6 @@ TEST_F(CachePlanSelectionTest, MergeSort) {
query,
sort,
BSONObj(),
- BSONObj(),
"{fetch: {node: {mergeSort: {nodes: "
"[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
}
@@ -957,7 +920,6 @@ TEST_F(CachePlanSelectionTest, NoMergeSortIfNoSortWanted) {
assertPlanCacheRecoversSolution(query,
BSONObj(),
BSONObj(),
- BSONObj(),
"{fetch: {filter: null, node: {or: {nodes: ["
"{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
"{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
@@ -990,7 +952,6 @@ TEST_F(CachePlanSelectionTest, CompoundGeoNoGeoPredicate) {
query,
sort,
BSONObj(),
- BSONObj(),
"{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
}
@@ -1001,7 +962,6 @@ TEST_F(CachePlanSelectionTest, ReverseScanForSort) {
BSONObj(),
fromjson("{_id: -1}"),
BSONObj(),
- BSONObj(),
"{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {_id: 1}}}}}");
}
@@ -1034,7 +994,6 @@ TEST_F(CachePlanSelectionTest, CollscanMergeSort) {
assertPlanCacheRecoversSolution(query,
sort,
BSONObj(),
- BSONObj(),
"{sort: {pattern: {c: 1}, limit: 0, node: {sortKeyGen: "
"{node: {cscan: {dir: 1}}}}}}");
}
@@ -1230,25 +1189,6 @@ TEST_F(CachePlanSelectionTest, Or2DNonNearNotCached) {
"{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
}
-//
-// Collation.
-//
-
-TEST_F(CachePlanSelectionTest, MatchingCollation) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- addIndex(BSON("x" << 1), &collator);
- runQueryAsCommand(fromjson(
- "{find: 'testns', filter: {x: 'foo'}, collation: {locale: 'mock_reverse_string'}}"));
-
- assertPlanCacheRecoversSolution(BSON("x"
- << "bar"),
- BSONObj(),
- BSONObj(),
- BSON("locale"
- << "mock_reverse_string"),
- "{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
-}
-
/**
* Test functions for computeKey. Cache keys are intentionally obfuscated and are
* meaningful only within the current lifetime of the server process. Users should treat plan
@@ -1259,8 +1199,7 @@ void testComputeKey(const char* queryStr,
const char* projStr,
const char* expectedStr) {
PlanCache planCache;
- const char* collationStr = "{}";
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr, collationStr));
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr));
PlanCacheKey key = planCache.computeKey(*cq);
PlanCacheKey expectedKey(expectedStr);
if (key == expectedKey) {
@@ -1415,59 +1354,4 @@ TEST(PlanCacheTest, ComputeKeyPartialIndex) {
ASSERT_NOT_EQUALS(planCache.computeKey(*cqGtNegativeFive), planCache.computeKey(*cqGtZero));
}
-// Query shapes should get the same plan cache key if they have the same collation indexability.
-TEST(PlanCacheTest, ComputeKeyCollationIndex) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
-
- PlanCache planCache;
- IndexEntry entry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- nullptr, // filterExpr
- BSONObj());
- entry.collator = &collator;
- planCache.notifyOfIndexEntries({entry});
-
- unique_ptr<CanonicalQuery> containsString(canonicalize("{a: 'abc'}"));
- unique_ptr<CanonicalQuery> containsObject(canonicalize("{a: {b: 'abc'}}"));
- unique_ptr<CanonicalQuery> containsArray(canonicalize("{a: ['abc', 'xyz']}"));
- unique_ptr<CanonicalQuery> noStrings(canonicalize("{a: 5}"));
- unique_ptr<CanonicalQuery> containsStringHasCollation(
- canonicalize("{a: 'abc'}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
-
- // 'containsString', 'containsObject', and 'containsArray' have the same key, since none are
- // compatible with the index.
- ASSERT_EQ(planCache.computeKey(*containsString), planCache.computeKey(*containsObject));
- ASSERT_EQ(planCache.computeKey(*containsString), planCache.computeKey(*containsArray));
-
- // 'noStrings' gets a different key since it is compatible with the index.
- ASSERT_NOT_EQUALS(planCache.computeKey(*containsString), planCache.computeKey(*noStrings));
-
- // 'noStrings' and 'containsStringHasCollation' get the same key since they compatible with the
- // index.
- ASSERT_EQ(planCache.computeKey(*noStrings), planCache.computeKey(*containsStringHasCollation));
-
- unique_ptr<CanonicalQuery> inContainsString(canonicalize("{a: {$in: [1, 'abc', 2]}}"));
- unique_ptr<CanonicalQuery> inContainsObject(canonicalize("{a: {$in: [1, {b: 'abc'}, 2]}}"));
- unique_ptr<CanonicalQuery> inContainsArray(canonicalize("{a: {$in: [1, ['abc', 'xyz'], 2]}}"));
- unique_ptr<CanonicalQuery> inNoStrings(canonicalize("{a: {$in: [1, 2]}}"));
- unique_ptr<CanonicalQuery> inContainsStringHasCollation(
- canonicalize("{a: {$in: [1, 'abc', 2]}}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
-
- // 'inContainsString', 'inContainsObject', and 'inContainsArray' have the same key, since none
- // are compatible with the index.
- ASSERT_EQ(planCache.computeKey(*inContainsString), planCache.computeKey(*inContainsObject));
- ASSERT_EQ(planCache.computeKey(*inContainsString), planCache.computeKey(*inContainsArray));
-
- // 'inNoStrings' gets a different key since it is compatible with the index.
- ASSERT_NOT_EQUALS(planCache.computeKey(*inContainsString), planCache.computeKey(*inNoStrings));
-
- // 'inNoStrings' and 'inContainsStringHasCollation' get the same key since they compatible with
- // the index.
- ASSERT_EQ(planCache.computeKey(*inNoStrings),
- planCache.computeKey(*inContainsStringHasCollation));
-}
-
} // namespace
diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp
index f2c192b2ab9..385a4eec4c8 100644
--- a/src/mongo/db/query/query_settings.cpp
+++ b/src/mongo/db/query/query_settings.cpp
@@ -56,12 +56,8 @@ AllowedIndices::~AllowedIndices() {}
AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query,
const BSONObj& sort,
const BSONObj& projection,
- const BSONObj& collation,
const std::vector<BSONObj>& indexKeyPatterns)
- : query(query.getOwned()),
- sort(sort.getOwned()),
- projection(projection.getOwned()),
- collation(collation.getOwned()) {
+ : query(query.getOwned()), sort(sort.getOwned()), projection(projection.getOwned()) {
for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin();
i != indexKeyPatterns.end();
++i) {
@@ -73,8 +69,7 @@ AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query,
AllowedIndexEntry::~AllowedIndexEntry() {}
AllowedIndexEntry* AllowedIndexEntry::clone() const {
- AllowedIndexEntry* entry =
- new AllowedIndexEntry(query, sort, projection, collation, indexKeyPatterns);
+ AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexKeyPatterns);
return entry;
}
@@ -128,9 +123,7 @@ void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
const BSONObj& query = qr.getFilter();
const BSONObj& sort = qr.getSort();
const BSONObj& projection = qr.getProj();
- const BSONObj collation =
- canonicalQuery.getCollator() ? canonicalQuery.getCollator()->getSpec().toBSON() : BSONObj();
- AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, collation, indexes);
+ AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexes);
stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
diff --git a/src/mongo/db/query/query_settings.h b/src/mongo/db/query/query_settings.h
index dbf3f1a19bc..e1125320471 100644
--- a/src/mongo/db/query/query_settings.h
+++ b/src/mongo/db/query/query_settings.h
@@ -61,7 +61,7 @@ public:
/**
* Value type for query settings.
* Holds:
- * query shape (query, sort, projection, collation)
+ * query shape (query, sort, projection)
* vector of index specs
*/
class AllowedIndexEntry {
@@ -72,17 +72,15 @@ public:
AllowedIndexEntry(const BSONObj& query,
const BSONObj& sort,
const BSONObj& projection,
- const BSONObj& collation,
const std::vector<BSONObj>& indexKeyPatterns);
~AllowedIndexEntry();
AllowedIndexEntry* clone() const;
- // query, sort, projection, and collation collectively represent the query shape that we are
- // storing hint overrides for.
+ // _query, _sort and _projection collectively
+ // represent the query shape that we are storing hint overrides for.
BSONObj query;
BSONObj sort;
BSONObj projection;
- BSONObj collation;
// These are the index key patterns that
// we will use to override the indexes retrieved from
diff --git a/src/mongo/shell/collection.js b/src/mongo/shell/collection.js
index 022b152a705..7d7c79871c9 100644
--- a/src/mongo/shell/collection.js
+++ b/src/mongo/shell/collection.js
@@ -1797,11 +1797,9 @@ PlanCache.prototype.help = function() {
"displays all query shapes in a collection");
print("\tdb." + shortName + ".getPlanCache().clear() - " +
"drops all cached queries in a collection");
- print("\tdb." + shortName +
- ".getPlanCache().clearPlansByQuery(query[, projection, sort, collation]) - " +
+ print("\tdb." + shortName + ".getPlanCache().clearPlansByQuery(query[, projection, sort]) - " +
"drops query shape from plan cache");
- print("\tdb." + shortName +
- ".getPlanCache().getPlansByQuery(query[, projection, sort, collation]) - " +
+ print("\tdb." + shortName + ".getPlanCache().getPlansByQuery(query[, projection, sort]) - " +
"displays the cached plans for a query shape");
return __magicNoPrint;
};
@@ -1809,30 +1807,23 @@ PlanCache.prototype.help = function() {
/**
* Internal function to parse query shape.
*/
-PlanCache.prototype._parseQueryShape = function(query, projection, sort, collation) {
+PlanCache.prototype._parseQueryShape = function(query, projection, sort) {
if (query == undefined) {
throw new Error("required parameter query missing");
}
// Accept query shape object as only argument.
- // Query shape must contain 'query', 'projection', and 'sort', and may optionally contain
- // 'collation'. 'collation' must be non-empty if present.
- if (typeof(query) == 'object' && projection == undefined && sort == undefined &&
- collation == undefined) {
+ // Query shape contains exactly 3 fields (query, projection and sort)
+ // as generated in the listQueryShapes() result.
+ if (typeof(query) == 'object' && projection == undefined && sort == undefined) {
var keysSorted = Object.keys(query).sort();
// Expected keys must be sorted for the comparison to work.
if (bsonWoCompare(keysSorted, ['projection', 'query', 'sort']) == 0) {
return query;
}
- if (bsonWoCompare(keysSorted, ['collation', 'projection', 'query', 'sort']) == 0) {
- if (Object.keys(query.collation).length === 0) {
- throw new Error("collation object must not be empty");
- }
- return query;
- }
}
- // Extract query shape, projection, sort and collation from DBQuery if it is the first
+ // Extract query shape, projection and sort from DBQuery if it is the first
// argument. If a sort or projection is provided in addition to DBQuery, do not
// overwrite with the DBQuery value.
if (query instanceof DBQuery) {
@@ -1842,14 +1833,10 @@ PlanCache.prototype._parseQueryShape = function(query, projection, sort, collati
if (sort != undefined) {
throw new Error("cannot pass DBQuery with sort");
}
- if (collation != undefined) {
- throw new Error("cannot pass DBQuery with collation");
- }
var queryObj = query._query["query"] || {};
projection = query._fields || {};
sort = query._query["orderby"] || {};
- collation = query._query["collation"] || undefined;
// Overwrite DBQuery with the BSON query.
query = queryObj;
}
@@ -1859,11 +1846,6 @@ PlanCache.prototype._parseQueryShape = function(query, projection, sort, collati
projection: projection == undefined ? {} : projection,
sort: sort == undefined ? {} : sort,
};
-
- if (collation !== undefined) {
- shape.collation = collation;
- }
-
return shape;
};
@@ -1896,18 +1878,17 @@ PlanCache.prototype.clear = function() {
/**
* List plans for a query shape.
*/
-PlanCache.prototype.getPlansByQuery = function(query, projection, sort, collation) {
+PlanCache.prototype.getPlansByQuery = function(query, projection, sort) {
return this
._runCommandThrowOnError("planCacheListPlans",
- this._parseQueryShape(query, projection, sort, collation))
+ this._parseQueryShape(query, projection, sort))
.plans;
};
/**
* Drop query shape from the plan cache.
*/
-PlanCache.prototype.clearPlansByQuery = function(query, projection, sort, collation) {
- this._runCommandThrowOnError("planCacheClear",
- this._parseQueryShape(query, projection, sort, collation));
+PlanCache.prototype.clearPlansByQuery = function(query, projection, sort) {
+ this._runCommandThrowOnError("planCacheClear", this._parseQueryShape(query, projection, sort));
return;
};