diff options
author | Scott Hernandez <scotthernandez@gmail.com> | 2016-07-07 12:13:07 -0400 |
---|---|---|
committer | Scott Hernandez <scotthernandez@gmail.com> | 2016-07-07 12:13:13 -0400 |
commit | 0b797de3ce2eb461e82cac647067dfaaa4d36988 (patch) | |
tree | 27f4e2126c4771fefb5f64f0ca5c2468a4158f02 /src/mongo/db/query | |
parent | e7cc7ebf50c2883776cb4cc2423d687bcafa35b6 (diff) | |
download | mongo-0b797de3ce2eb461e82cac647067dfaaa4d36988.tar.gz |
Revert "SERVER-23882 Collation should be considered part of a query's shape"
This reverts commit 582818dac41ac01975820c09f79d3b86dc3782cc.
Diffstat (limited to 'src/mongo/db/query')
-rw-r--r-- | src/mongo/db/query/SConscript | 1 | ||||
-rw-r--r-- | src/mongo/db/query/canonical_query.cpp | 7 | ||||
-rw-r--r-- | src/mongo/db/query/plan_cache.cpp | 15 | ||||
-rw-r--r-- | src/mongo/db/query/plan_cache.h | 2 | ||||
-rw-r--r-- | src/mongo/db/query/plan_cache_indexability.cpp | 58 | ||||
-rw-r--r-- | src/mongo/db/query/plan_cache_indexability.h | 59 | ||||
-rw-r--r-- | src/mongo/db/query/plan_cache_indexability_test.cpp | 296 | ||||
-rw-r--r-- | src/mongo/db/query/plan_cache_test.cpp | 140 | ||||
-rw-r--r-- | src/mongo/db/query/query_settings.cpp | 13 | ||||
-rw-r--r-- | src/mongo/db/query/query_settings.h | 8 |
10 files changed, 99 insertions, 500 deletions
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript index 700021391a7..e186ce6aa29 100644 --- a/src/mongo/db/query/SConscript +++ b/src/mongo/db/query/SConscript @@ -307,7 +307,6 @@ env.CppUnitTest( "plan_cache_indexability_test.cpp" ], LIBDEPS=[ - "collation/collator_interface_mock", "query_planner", ], ) diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp index 9bbb5a169dc..334fbe664cc 100644 --- a/src/mongo/db/query/canonical_query.cpp +++ b/src/mongo/db/query/canonical_query.cpp @@ -516,9 +516,6 @@ std::string CanonicalQuery::toString() const { ss << "Tree: " << _root->toString(); ss << "Sort: " << _qr->getSort().toString() << '\n'; ss << "Proj: " << _qr->getProj().toString() << '\n'; - if (!_qr->getCollation().isEmpty()) { - ss << "Collation: " << _qr->getCollation().toString() << '\n'; - } return ss; } @@ -527,10 +524,6 @@ std::string CanonicalQuery::toStringShort() const { ss << "query: " << _qr->getFilter().toString() << " sort: " << _qr->getSort().toString() << " projection: " << _qr->getProj().toString(); - if (!_qr->getCollation().isEmpty()) { - ss << " collation: " << _qr->getCollation().toString(); - } - if (_qr->getBatchSize()) { ss << " batchSize: " << *_qr->getBatchSize(); } diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp index 5554438ba09..8268555215d 100644 --- a/src/mongo/db/query/plan_cache.cpp +++ b/src/mongo/db/query/plan_cache.cpp @@ -36,7 +36,6 @@ #include "mongo/client/dbclientinterface.h" // For QueryOption_foobar #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_geo.h" -#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/query_knobs.h" #include "mongo/db/query/query_solution.h" @@ -58,7 +57,6 @@ const char kEncodeChildrenEnd = ']'; const char kEncodeChildrenSeparator = ','; const char kEncodeSortSection = '~'; const char kEncodeProjectionSection = '|'; -const char kEncodeCollationSection = '#'; /** * Encode user-provided string. Cache key delimiters seen in the @@ -75,7 +73,6 @@ void encodeUserString(StringData s, StringBuilder* keyBuilder) { case kEncodeChildrenSeparator: case kEncodeSortSection: case kEncodeProjectionSection: - case kEncodeCollationSection: case '\\': *keyBuilder << '\\'; // Fall through to default case. @@ -312,7 +309,6 @@ CachedSolution::CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& en query(entry.query.getOwned()), sort(entry.sort.getOwned()), projection(entry.projection.getOwned()), - collation(entry.collation.getOwned()), decisionWorks(entry.decision->stats[0]->common.works) { // CachedSolution should not having any references into // cache entry. All relevant data should be cloned/copied. @@ -373,7 +369,6 @@ PlanCacheEntry* PlanCacheEntry::clone() const { entry->query = query.getOwned(); entry->sort = sort.getOwned(); entry->projection = projection.getOwned(); - entry->collation = collation.getOwned(); // Copy performance stats. for (size_t i = 0; i < feedback.size(); ++i) { @@ -388,7 +383,6 @@ PlanCacheEntry* PlanCacheEntry::clone() const { std::string PlanCacheEntry::toString() const { return str::stream() << "(query: " << query.toString() << ";sort: " << sort.toString() << ";projection: " << projection.toString() - << ";collation: " << collation.toString() << ";solutions: " << plannerData.size() << ")"; } @@ -506,13 +500,13 @@ void PlanCache::encodeKeyForMatch(const MatchExpression* tree, StringBuilder* ke } // Encode indexability. - const IndexToDiscriminatorMap& discriminators = + const IndexabilityDiscriminators& discriminators = _indexabilityState.getDiscriminators(tree->path()); if (!discriminators.empty()) { *keyBuilder << kEncodeDiscriminatorsBegin; // For each discriminator on this path, append the character '0' or '1'. - for (auto&& indexAndDiscriminatorPair : discriminators) { - *keyBuilder << indexAndDiscriminatorPair.second.isMatchCompatibleWithIndex(tree); + for (const IndexabilityDiscriminator& discriminator : discriminators) { + *keyBuilder << discriminator(tree); } *keyBuilder << kEncodeDiscriminatorsEnd; } @@ -645,9 +639,6 @@ Status PlanCache::add(const CanonicalQuery& query, const QueryRequest& qr = query.getQueryRequest(); entry->query = qr.getFilter().getOwned(); entry->sort = qr.getSort().getOwned(); - if (query.getCollator()) { - entry->collation = query.getCollator()->getSpec().toBSON(); - } // Strip projections on $-prefixed fields, as these are added by internal callers of the query // system and are not considered part of the user projection. diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h index 16aaff994a2..332e7b79cea 100644 --- a/src/mongo/db/query/plan_cache.h +++ b/src/mongo/db/query/plan_cache.h @@ -201,7 +201,6 @@ public: BSONObj query; BSONObj sort; BSONObj projection; - BSONObj collation; // The number of work cycles taken to decide on a winning plan when the plan was first // cached. @@ -251,7 +250,6 @@ public: BSONObj query; BSONObj sort; BSONObj projection; - BSONObj collation; // // Performance stats diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp index 442cb222001..066d7a2782b 100644 --- a/src/mongo/db/query/plan_cache_indexability.cpp +++ b/src/mongo/db/query/plan_cache_indexability.cpp @@ -35,18 +35,15 @@ #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/expression_leaf.h" -#include "mongo/db/query/collation/collation_index_key.h" -#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/index_entry.h" #include "mongo/stdx/memory.h" #include <memory> namespace mongo { -void PlanCacheIndexabilityState::processSparseIndex(const std::string& indexName, - const BSONObj& keyPattern) { +void PlanCacheIndexabilityState::processSparseIndex(const BSONObj& keyPattern) { for (BSONElement elem : keyPattern) { - _pathDiscriminatorsMap[elem.fieldNameStringData()][indexName].addDiscriminator( + _pathDiscriminatorsMap[elem.fieldNameStringData()].push_back( [](const MatchExpression* queryExpr) { if (queryExpr->matchType() == MatchExpression::EQ) { const auto* queryExprEquality = @@ -62,60 +59,24 @@ void PlanCacheIndexabilityState::processSparseIndex(const std::string& indexName } } -void PlanCacheIndexabilityState::processPartialIndex(const std::string& indexName, - const MatchExpression* filterExpr) { +void PlanCacheIndexabilityState::processPartialIndex(const MatchExpression* filterExpr) { invariant(filterExpr); for (size_t i = 0; i < filterExpr->numChildren(); ++i) { - processPartialIndex(indexName, filterExpr->getChild(i)); + processPartialIndex(filterExpr->getChild(i)); } if (!filterExpr->isLogical()) { - _pathDiscriminatorsMap[filterExpr->path()][indexName].addDiscriminator( + _pathDiscriminatorsMap[filterExpr->path()].push_back( [filterExpr](const MatchExpression* queryExpr) { return expression::isSubsetOf(queryExpr, filterExpr); }); } } -void PlanCacheIndexabilityState::processIndexCollation(const std::string& indexName, - const BSONObj& keyPattern, - const CollatorInterface* collator) { - for (BSONElement elem : keyPattern) { - _pathDiscriminatorsMap[elem.fieldNameStringData()][indexName].addDiscriminator([collator]( - const MatchExpression* queryExpr) { - if (ComparisonMatchExpression::isComparisonMatchExpression(queryExpr)) { - const auto* queryExprComparison = - static_cast<const ComparisonMatchExpression*>(queryExpr); - const bool collatorsMatch = - CollatorInterface::collatorsMatch(queryExprComparison->getCollator(), collator); - const bool isCollatableType = - CollationIndexKey::isCollatableType(queryExprComparison->getData().type()); - return collatorsMatch || !isCollatableType; - } - - if (queryExpr->matchType() == MatchExpression::MATCH_IN) { - const auto* queryExprIn = static_cast<const InMatchExpression*>(queryExpr); - if (CollatorInterface::collatorsMatch(queryExprIn->getCollator(), collator)) { - return true; - } - for (const auto& equality : queryExprIn->getEqualities()) { - if (CollationIndexKey::isCollatableType(equality.type())) { - return false; - } - } - return true; - } - - // The predicate never compares strings so it is not affected by collation. - return true; - }); - } -} - namespace { -const IndexToDiscriminatorMap emptyDiscriminators; +const IndexabilityDiscriminators emptyDiscriminators; } // namespace -const IndexToDiscriminatorMap& PlanCacheIndexabilityState::getDiscriminators( +const IndexabilityDiscriminators& PlanCacheIndexabilityState::getDiscriminators( StringData path) const { PathDiscriminatorsMap::const_iterator it = _pathDiscriminatorsMap.find(path); if (it == _pathDiscriminatorsMap.end()) { @@ -129,12 +90,11 @@ void PlanCacheIndexabilityState::updateDiscriminators(const std::vector<IndexEnt for (const IndexEntry& idx : indexEntries) { if (idx.sparse) { - processSparseIndex(idx.name, idx.keyPattern); + processSparseIndex(idx.keyPattern); } if (idx.filterExpr) { - processPartialIndex(idx.name, idx.filterExpr); + processPartialIndex(idx.filterExpr); } - processIndexCollation(idx.name, idx.keyPattern, idx.collator); } } diff --git a/src/mongo/db/query/plan_cache_indexability.h b/src/mongo/db/query/plan_cache_indexability.h index 3dd4e83ec3e..03278b06929 100644 --- a/src/mongo/db/query/plan_cache_indexability.h +++ b/src/mongo/db/query/plan_cache_indexability.h @@ -37,42 +37,11 @@ namespace mongo { class BSONObj; -class CollatorInterface; -class CompositeIndexabilityDiscriminator; class MatchExpression; struct IndexEntry; using IndexabilityDiscriminator = stdx::function<bool(const MatchExpression* me)>; using IndexabilityDiscriminators = std::vector<IndexabilityDiscriminator>; -using IndexToDiscriminatorMap = StringMap<CompositeIndexabilityDiscriminator>; - -/** - * CompositeIndexabilityDiscriminator holds all indexability discriminators for a particular path, - * for a particular index. For example, a path may have both a collation discriminator and a sparse - * index discriminator for a particular index. - */ -class CompositeIndexabilityDiscriminator { -public: - /** - * Considers all discriminators for the path-index pair, and returns a single bit indicating - * whether the index can be used for that path. - */ - bool isMatchCompatibleWithIndex(const MatchExpression* me) const { - for (auto&& discriminator : _discriminators) { - if (!discriminator(me)) { - return false; - } - } - return true; - } - - void addDiscriminator(IndexabilityDiscriminator discriminator) { - _discriminators.push_back(std::move(discriminator)); - } - -private: - IndexabilityDiscriminators _discriminators; -}; /** * PlanCacheIndexabilityState holds a set of "indexability discriminators" for certain paths. @@ -86,13 +55,13 @@ public: PlanCacheIndexabilityState() = default; /** - * Returns a map from index name to discriminator for each index associated with 'path'. - * Returns an empty set if no discriminators are registered for 'path'. + * Gets the set of discriminators associated with 'path'. Returns an empty set if no + * discriminators are registered for 'path'. * * The object returned by reference is valid until the next call to updateDiscriminators() * or until destruction of 'this', whichever is first. */ - const IndexToDiscriminatorMap& getDiscriminators(StringData path) const; + const IndexabilityDiscriminators& getDiscriminators(StringData path) const; /** * Clears discriminators for all paths, and regenerate them from 'indexEntries'. @@ -110,11 +79,11 @@ private: * element of the key pattern. The former predicate is compatibile with this index, but the * latter is not compatible. */ - void processSparseIndex(const std::string& indexName, const BSONObj& keyPattern); + void processSparseIndex(const BSONObj& keyPattern); /** * Adds partial index discriminators for the partial index with the given filter expression - * to the discriminators for that index in '_pathDiscriminatorsMap'. + * to '_pathDiscriminatorsMap'. * * A partial index discriminator distinguishes expressions that match a given partial index * predicate from expressions that don't match the partial index predicate. For example, @@ -122,23 +91,9 @@ private: * predicate {a: {$gt: -5}}, if there is a partial index defined with document filter {a: * {$gt: 0}}. The former is compatible with this index, but the latter is not compatible. */ - void processPartialIndex(const std::string& indexName, const MatchExpression* filterExpr); - - /** - * Adds collation discriminators for the index with the given key pattern and collator to - * '_pathDiscriminatorsMap'. - * - * The discriminator for a given path returns true if the index collator matches the query - * collator, or if the query does not contain string comparison at that path. - */ - void processIndexCollation(const std::string& indexName, - const BSONObj& keyPattern, - const CollatorInterface* collator); + void processPartialIndex(const MatchExpression* filterExpr); - /** - * PathDiscriminatorsMap is a map from field path to index name to IndexabilityDiscriminator. - */ - using PathDiscriminatorsMap = StringMap<IndexToDiscriminatorMap>; + using PathDiscriminatorsMap = StringMap<IndexabilityDiscriminators>; PathDiscriminatorsMap _pathDiscriminatorsMap; }; diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp index 3cc90e3429a..e5db935d3a3 100644 --- a/src/mongo/db/query/plan_cache_indexability_test.cpp +++ b/src/mongo/db/query/plan_cache_indexability_test.cpp @@ -28,10 +28,8 @@ #include "mongo/platform/basic.h" -#include "mongo/db/json.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_disallow_extensions.h" -#include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/index_entry.h" #include "mongo/db/query/plan_cache_indexability.h" #include "mongo/unittest/unittest.h" @@ -39,8 +37,8 @@ namespace mongo { namespace { -std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj, - const CollatorInterface* collator = nullptr) { +std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) { + const CollatorInterface* collator = nullptr; StatusWithMatchExpression status = MatchExpressionParser::parse(obj, ExtensionsCallbackDisallowExtensions(), collator); if (!status.isOK()) { @@ -57,61 +55,48 @@ TEST(PlanCacheIndexabilityTest, SparseIndexSimple) { false, // multikey true, // sparse false, // unique - "a_1", // name + "", // name nullptr, // filterExpr BSONObj())}); - auto discriminators = state.getDiscriminators("a"); + const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a"); ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); - auto disc = discriminators["a_1"]; - ASSERT_EQ(true, disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 1)).get())); + const IndexabilityDiscriminator& disc = discriminators[0]; + ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get())); + ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get())); + ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(1)))).get())); ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << BSONNULL)).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(1)))).get())); - ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(BSONNULL)))).get())); + disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(BSONNULL)))).get())); } // Test sparse index discriminators for a compound sparse index. TEST(PlanCacheIndexabilityTest, SparseIndexCompound) { PlanCacheIndexabilityState state; state.updateDiscriminators({IndexEntry(BSON("a" << 1 << "b" << 1), - false, // multikey - true, // sparse - false, // unique - "a_1_b_1", // name - nullptr, // filterExpr + false, // multikey + true, // sparse + false, // unique + "", // name + nullptr, // filterExpr BSONObj())}); { - auto discriminators = state.getDiscriminators("a"); + const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a"); ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1_b_1") != discriminators.end()); - - auto disc = discriminators["a_1_b_1"]; - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 1)).get())); - ASSERT_EQ( - false, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << BSONNULL)).get())); + + const IndexabilityDiscriminator& disc = discriminators[0]; + ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get())); + ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get())); } { - auto discriminators = state.getDiscriminators("b"); + const IndexabilityDiscriminators& discriminators = state.getDiscriminators("b"); ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1_b_1") != discriminators.end()); - - auto disc = discriminators["a_1_b_1"]; - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << 1)).get())); - ASSERT_EQ( - false, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << BSONNULL)).get())); + + const IndexabilityDiscriminator& disc = discriminators[0]; + ASSERT_EQ(true, disc(parseMatchExpression(BSON("b" << 1)).get())); + ASSERT_EQ(false, disc(parseMatchExpression(BSON("b" << BSONNULL)).get())); } } @@ -124,37 +109,18 @@ TEST(PlanCacheIndexabilityTest, PartialIndexSimple) { false, // multikey false, // sparse false, // unique - "a_1", // name + "", // name filterExpr.get(), BSONObj())}); - { - auto discriminators = state.getDiscriminators("f"); - ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); - - auto disc = discriminators["a_1"]; - ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(BSON("f" << BSON("$gt" << -5))).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(BSON("f" << BSON("$gt" << 5))).get())); - } + const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f"); + ASSERT_EQ(1U, discriminators.size()); - { - auto discriminators = state.getDiscriminators("a"); - ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); - - auto disc = discriminators["a_1"]; - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(BSON("a" << BSON("$gt" << -5))).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(BSON("a" << BSON("$gt" << -5))).get())); - } + const IndexabilityDiscriminator& disc = discriminators[0]; + ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << BSON("$gt" << -5))).get())); + ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << BSON("$gt" << 5))).get())); + + ASSERT(state.getDiscriminators("a").empty()); } // Test partial index discriminators for an index where the filter expression is an AND. @@ -166,45 +132,29 @@ TEST(PlanCacheIndexabilityTest, PartialIndexAnd) { false, // multikey false, // sparse false, // unique - "a_1", // name + "", // name filterExpr.get(), BSONObj())}); { - auto discriminators = state.getDiscriminators("f"); + const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f"); ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); - auto disc = discriminators["a_1"]; - ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 0)).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 1)).get())); + const IndexabilityDiscriminator& disc = discriminators[0]; + ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << 0)).get())); + ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << 1)).get())); } { - auto discriminators = state.getDiscriminators("g"); + const IndexabilityDiscriminators& discriminators = state.getDiscriminators("g"); ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); - auto disc = discriminators["a_1"]; - ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("g" << 0)).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("g" << 1)).get())); + const IndexabilityDiscriminator& disc = discriminators[0]; + ASSERT_EQ(false, disc(parseMatchExpression(BSON("g" << 0)).get())); + ASSERT_EQ(true, disc(parseMatchExpression(BSON("g" << 1)).get())); } - { - auto discriminators = state.getDiscriminators("a"); - ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); - - auto disc = discriminators["a_1"]; - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 0)).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 1)).get())); - } + ASSERT(state.getDiscriminators("a").empty()); } // Test partial index discriminators where there are multiple partial indexes. @@ -220,169 +170,47 @@ TEST(PlanCacheIndexabilityTest, MultiplePartialIndexes) { false, // multikey false, // sparse false, // unique - "a_1", // name + "", // name filterExpr1.get(), BSONObj()), IndexEntry(BSON("b" << 1), false, // multikey false, // sparse false, // unique - "b_1", // name + "", // name filterExpr2.get(), BSONObj())}); - { - auto discriminators = state.getDiscriminators("f"); - ASSERT_EQ(2U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); - ASSERT(discriminators.find("b_1") != discriminators.end()); - - auto discA = discriminators["a_1"]; - auto discB = discriminators["b_1"]; - - ASSERT_EQ(false, - discA.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 0)).get())); - ASSERT_EQ(false, - discB.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 0)).get())); - - ASSERT_EQ(true, - discA.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 1)).get())); - ASSERT_EQ(false, - discB.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 1)).get())); - - ASSERT_EQ(false, - discA.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 2)).get())); - ASSERT_EQ(true, - discB.isMatchCompatibleWithIndex(parseMatchExpression(BSON("f" << 2)).get())); - } + const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f"); + ASSERT_EQ(2U, discriminators.size()); - { - auto discriminators = state.getDiscriminators("a"); - ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); - - auto disc = discriminators["a_1"]; - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 0)).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 1)).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("a" << 2)).get())); - } + const IndexabilityDiscriminator& disc1 = discriminators[0]; + const IndexabilityDiscriminator& disc2 = discriminators[1]; - { - auto discriminators = state.getDiscriminators("b"); - ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("b_1") != discriminators.end()); - - auto disc = discriminators["b_1"]; - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << 0)).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << 1)).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(BSON("b" << 2)).get())); - } + ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get())); + ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get())); + + ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 1)).get()), + disc2(parseMatchExpression(BSON("f" << 1)).get())); + + ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 2)).get()), + disc2(parseMatchExpression(BSON("f" << 2)).get())); + + ASSERT(state.getDiscriminators("a").empty()); + ASSERT(state.getDiscriminators("b").empty()); } -// Test that a discriminator is generated for a regular index (this discriminator will only encode -// collation indexability). +// Test that no discriminators are generated for a regular index. TEST(PlanCacheIndexabilityTest, IndexNeitherSparseNorPartial) { PlanCacheIndexabilityState state; state.updateDiscriminators({IndexEntry(BSON("a" << 1), false, // multikey false, // sparse false, // unique - "a_1", // name - nullptr, - BSONObj())}); - auto discriminators = state.getDiscriminators("a"); - ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); -} - -// Test discriminator for a simple index with a collation. -TEST(PlanCacheIndexabilityTest, DiscriminatorForCollationIndicatesWhenCollationsAreCompatible) { - PlanCacheIndexabilityState state; - IndexEntry entry(BSON("a" << 1), - false, // multikey - false, // sparse - false, // unique - "a_1", // name - nullptr, // filterExpr - BSONObj()); - CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); - entry.collator = &collator; - state.updateDiscriminators({entry}); - - auto discriminators = state.getDiscriminators("a"); - ASSERT_EQ(1U, discriminators.size()); - ASSERT(discriminators.find("a_1") != discriminators.end()); - - auto disc = discriminators["a_1"]; - - // Index collator matches query collator. - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: 'abc'}"), &collator).get())); - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: {$in: ['abc', 'xyz']}}"), &collator).get())); - - // Expression is not a ComparisonMatchExpression or InMatchExpression. - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: {$exists: true}}"), nullptr).get())); - - // Expression is a ComparisonMatchExpression with non-matching collator. - ASSERT_EQ( - true, - disc.isMatchCompatibleWithIndex(parseMatchExpression(fromjson("{a: 5}"), nullptr).get())); - ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: 'abc'}"), nullptr).get())); - ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: {b: 'abc'}}"), nullptr).get())); - ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: ['abc', 'xyz']}"), nullptr).get())); - - // Expression is an InMatchExpression with non-matching collator. - ASSERT_EQ(true, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: {$in: [1, 2]}}"), nullptr).get())); - ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: {$in: [1, 'abc', 2]}}"), nullptr).get())); - ASSERT_EQ(false, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: {$in: [1, {b: 'abc'}, 2]}}"), nullptr).get())); - ASSERT_EQ( - false, - disc.isMatchCompatibleWithIndex( - parseMatchExpression(fromjson("{a: {$in: [1, ['abc', 'xyz'], 2]}}"), nullptr).get())); -} - -// Test that a discriminator is produced for each field in a compound index (this discriminator will -// only encode collation indexability). -TEST(PlanCacheIndexabilityTest, CompoundIndexCollationDiscriminator) { - PlanCacheIndexabilityState state; - state.updateDiscriminators({IndexEntry(BSON("a" << 1 << "b" << 1), - false, // multikey - false, // sparse - false, // unique - "a_1_b_1", // name + "", // name nullptr, BSONObj())}); - - auto discriminatorsA = state.getDiscriminators("a"); - ASSERT_EQ(1U, discriminatorsA.size()); - ASSERT(discriminatorsA.find("a_1_b_1") != discriminatorsA.end()); - - auto discriminatorsB = state.getDiscriminators("b"); - ASSERT_EQ(1U, discriminatorsB.size()); - ASSERT(discriminatorsB.find("a_1_b_1") != discriminatorsB.end()); + ASSERT(state.getDiscriminators("a").empty()); } } // namespace diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp index 86dd471d568..23eef65734b 100644 --- a/src/mongo/db/query/plan_cache_test.cpp +++ b/src/mongo/db/query/plan_cache_test.cpp @@ -39,7 +39,6 @@ #include "mongo/db/jsobj.h" #include "mongo/db/json.h" #include "mongo/db/matcher/extensions_callback_disallow_extensions.h" -#include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/query_knobs.h" #include "mongo/db/query/query_planner.h" @@ -52,8 +51,6 @@ using namespace mongo; -using unittest::assertGet; - namespace { using std::string; @@ -84,8 +81,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) { unique_ptr<CanonicalQuery> canonicalize(const char* queryStr, const char* sortStr, - const char* projStr, - const char* collationStr) { + const char* projStr) { QueryTestServiceContext serviceContext; auto txn = serviceContext.makeOperationContext(); @@ -93,7 +89,6 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr, qr->setFilter(fromjson(queryStr)); qr->setSort(fromjson(sortStr)); qr->setProj(fromjson(projStr)); - qr->setCollation(fromjson(collationStr)); auto statusWithCQ = CanonicalQuery::canonicalize( txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); @@ -273,7 +268,7 @@ TEST(PlanCacheTest, ShouldCacheQueryBasic) { } TEST(PlanCacheTest, ShouldCacheQuerySort) { - unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}", "{}")); + unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}")); assertShouldCacheQuery(*cq); } @@ -472,12 +467,6 @@ protected: keyPattern, multikey, sparse, false, "note_to_self_dont_break_build", NULL, BSONObj())); } - void addIndex(BSONObj keyPattern, CollatorInterface* collator) { - IndexEntry entry(keyPattern, false, false, false, "index_with_collation", NULL, BSONObj()); - entry.collator = collator; - params.indices.push_back(entry); - } - // // Execute planner. // @@ -542,7 +531,7 @@ protected: QueryTestServiceContext serviceContext; auto txn = serviceContext.makeOperationContext(); - // Clean up any previous state from a call to runQueryFull or runQueryAsCommand. + // Clean up any previous state from a call to runQueryFull for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) { delete *it; } @@ -570,28 +559,6 @@ protected: ASSERT_OK(s); } - void runQueryAsCommand(const BSONObj& cmdObj) { - QueryTestServiceContext serviceContext; - auto txn = serviceContext.makeOperationContext(); - - // Clean up any previous state from a call to runQueryFull or runQueryAsCommand. - for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) { - delete *it; - } - - solns.clear(); - - const bool isExplain = false; - std::unique_ptr<QueryRequest> qr( - assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain))); - - auto statusWithCQ = CanonicalQuery::canonicalize( - txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()); - ASSERT_OK(statusWithCQ.getStatus()); - Status s = QueryPlanner::plan(*statusWithCQ.getValue(), params, &solns); - ASSERT_OK(s); - } - // // Solution introspection. // @@ -643,20 +610,19 @@ protected: * Does not take ownership of 'soln'. */ QuerySolution* planQueryFromCache(const BSONObj& query, const QuerySolution& soln) const { - return planQueryFromCache(query, BSONObj(), BSONObj(), BSONObj(), soln); + return planQueryFromCache(query, BSONObj(), BSONObj(), soln); } /** - * Plan 'query' from the cache with sort order 'sort', projection 'proj', and collation - * 'collation'. A mock cache entry is created using the cacheData stored inside the - * QuerySolution 'soln'. + * Plan 'query' from the cache with sort order 'sort' and + * projection 'proj'. A mock cache entry is created using + * the cacheData stored inside the QuerySolution 'soln'. * * Does not take ownership of 'soln'. */ QuerySolution* planQueryFromCache(const BSONObj& query, const BSONObj& sort, const BSONObj& proj, - const BSONObj& collation, const QuerySolution& soln) const { QueryTestServiceContext serviceContext; auto txn = serviceContext.makeOperationContext(); @@ -665,7 +631,6 @@ protected: qr->setFilter(query); qr->setSort(sort); qr->setProj(proj); - qr->setCollation(collation); auto statusWithCQ = CanonicalQuery::canonicalize( txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); @@ -731,7 +696,7 @@ protected: * Overloaded so that it is not necessary to specificy sort and project. */ void assertPlanCacheRecoversSolution(const BSONObj& query, const string& solnJson) { - assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), BSONObj(), solnJson); + assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), solnJson); } /** @@ -742,16 +707,15 @@ protected: * * Must be called after calling one of the runQuery* methods. * - * Together, 'query', 'sort', 'proj', and 'collation' should specify the query which was - * previously run using one of the runQuery* methods. + * Together, 'query', 'sort', and 'proj' should specify the query which + * was previously run using one of the runQuery* methods. */ void assertPlanCacheRecoversSolution(const BSONObj& query, const BSONObj& sort, const BSONObj& proj, - const BSONObj& collation, const string& solnJson) { QuerySolution* bestSoln = firstMatchingSolution(solnJson); - QuerySolution* planSoln = planQueryFromCache(query, sort, proj, collation, *bestSoln); + QuerySolution* planSoln = planQueryFromCache(query, sort, proj, *bestSoln); assertSolutionMatches(planSoln, solnJson); delete planSoln; } @@ -941,7 +905,6 @@ TEST_F(CachePlanSelectionTest, MergeSort) { query, sort, BSONObj(), - BSONObj(), "{fetch: {node: {mergeSort: {nodes: " "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}"); } @@ -957,7 +920,6 @@ TEST_F(CachePlanSelectionTest, NoMergeSortIfNoSortWanted) { assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), - BSONObj(), "{fetch: {filter: null, node: {or: {nodes: [" "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, " "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}"); @@ -990,7 +952,6 @@ TEST_F(CachePlanSelectionTest, CompoundGeoNoGeoPredicate) { query, sort, BSONObj(), - BSONObj(), "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}"); } @@ -1001,7 +962,6 @@ TEST_F(CachePlanSelectionTest, ReverseScanForSort) { BSONObj(), fromjson("{_id: -1}"), BSONObj(), - BSONObj(), "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {_id: 1}}}}}"); } @@ -1034,7 +994,6 @@ TEST_F(CachePlanSelectionTest, CollscanMergeSort) { assertPlanCacheRecoversSolution(query, sort, BSONObj(), - BSONObj(), "{sort: {pattern: {c: 1}, limit: 0, node: {sortKeyGen: " "{node: {cscan: {dir: 1}}}}}}"); } @@ -1230,25 +1189,6 @@ TEST_F(CachePlanSelectionTest, Or2DNonNearNotCached) { "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}"); } -// -// Collation. -// - -TEST_F(CachePlanSelectionTest, MatchingCollation) { - CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); - addIndex(BSON("x" << 1), &collator); - runQueryAsCommand(fromjson( - "{find: 'testns', filter: {x: 'foo'}, collation: {locale: 'mock_reverse_string'}}")); - - assertPlanCacheRecoversSolution(BSON("x" - << "bar"), - BSONObj(), - BSONObj(), - BSON("locale" - << "mock_reverse_string"), - "{fetch: {node: {ixscan: {pattern: {x: 1}}}}}"); -} - /** * Test functions for computeKey. Cache keys are intentionally obfuscated and are * meaningful only within the current lifetime of the server process. Users should treat plan @@ -1259,8 +1199,7 @@ void testComputeKey(const char* queryStr, const char* projStr, const char* expectedStr) { PlanCache planCache; - const char* collationStr = "{}"; - unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr, collationStr)); + unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr)); PlanCacheKey key = planCache.computeKey(*cq); PlanCacheKey expectedKey(expectedStr); if (key == expectedKey) { @@ -1415,59 +1354,4 @@ TEST(PlanCacheTest, ComputeKeyPartialIndex) { ASSERT_NOT_EQUALS(planCache.computeKey(*cqGtNegativeFive), planCache.computeKey(*cqGtZero)); } -// Query shapes should get the same plan cache key if they have the same collation indexability. -TEST(PlanCacheTest, ComputeKeyCollationIndex) { - CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); - - PlanCache planCache; - IndexEntry entry(BSON("a" << 1), - false, // multikey - false, // sparse - false, // unique - "", // name - nullptr, // filterExpr - BSONObj()); - entry.collator = &collator; - planCache.notifyOfIndexEntries({entry}); - - unique_ptr<CanonicalQuery> containsString(canonicalize("{a: 'abc'}")); - unique_ptr<CanonicalQuery> containsObject(canonicalize("{a: {b: 'abc'}}")); - unique_ptr<CanonicalQuery> containsArray(canonicalize("{a: ['abc', 'xyz']}")); - unique_ptr<CanonicalQuery> noStrings(canonicalize("{a: 5}")); - unique_ptr<CanonicalQuery> containsStringHasCollation( - canonicalize("{a: 'abc'}", "{}", "{}", "{locale: 'mock_reverse_string'}")); - - // 'containsString', 'containsObject', and 'containsArray' have the same key, since none are - // compatible with the index. - ASSERT_EQ(planCache.computeKey(*containsString), planCache.computeKey(*containsObject)); - ASSERT_EQ(planCache.computeKey(*containsString), planCache.computeKey(*containsArray)); - - // 'noStrings' gets a different key since it is compatible with the index. - ASSERT_NOT_EQUALS(planCache.computeKey(*containsString), planCache.computeKey(*noStrings)); - - // 'noStrings' and 'containsStringHasCollation' get the same key since they compatible with the - // index. - ASSERT_EQ(planCache.computeKey(*noStrings), planCache.computeKey(*containsStringHasCollation)); - - unique_ptr<CanonicalQuery> inContainsString(canonicalize("{a: {$in: [1, 'abc', 2]}}")); - unique_ptr<CanonicalQuery> inContainsObject(canonicalize("{a: {$in: [1, {b: 'abc'}, 2]}}")); - unique_ptr<CanonicalQuery> inContainsArray(canonicalize("{a: {$in: [1, ['abc', 'xyz'], 2]}}")); - unique_ptr<CanonicalQuery> inNoStrings(canonicalize("{a: {$in: [1, 2]}}")); - unique_ptr<CanonicalQuery> inContainsStringHasCollation( - canonicalize("{a: {$in: [1, 'abc', 2]}}", "{}", "{}", "{locale: 'mock_reverse_string'}")); - - // 'inContainsString', 'inContainsObject', and 'inContainsArray' have the same key, since none - // are compatible with the index. - ASSERT_EQ(planCache.computeKey(*inContainsString), planCache.computeKey(*inContainsObject)); - ASSERT_EQ(planCache.computeKey(*inContainsString), planCache.computeKey(*inContainsArray)); - - // 'inNoStrings' gets a different key since it is compatible with the index. - ASSERT_NOT_EQUALS(planCache.computeKey(*inContainsString), planCache.computeKey(*inNoStrings)); - - // 'inNoStrings' and 'inContainsStringHasCollation' get the same key since they compatible with - // the index. - ASSERT_EQ(planCache.computeKey(*inNoStrings), - planCache.computeKey(*inContainsStringHasCollation)); -} - } // namespace diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp index f2c192b2ab9..385a4eec4c8 100644 --- a/src/mongo/db/query/query_settings.cpp +++ b/src/mongo/db/query/query_settings.cpp @@ -56,12 +56,8 @@ AllowedIndices::~AllowedIndices() {} AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query, const BSONObj& sort, const BSONObj& projection, - const BSONObj& collation, const std::vector<BSONObj>& indexKeyPatterns) - : query(query.getOwned()), - sort(sort.getOwned()), - projection(projection.getOwned()), - collation(collation.getOwned()) { + : query(query.getOwned()), sort(sort.getOwned()), projection(projection.getOwned()) { for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin(); i != indexKeyPatterns.end(); ++i) { @@ -73,8 +69,7 @@ AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query, AllowedIndexEntry::~AllowedIndexEntry() {} AllowedIndexEntry* AllowedIndexEntry::clone() const { - AllowedIndexEntry* entry = - new AllowedIndexEntry(query, sort, projection, collation, indexKeyPatterns); + AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexKeyPatterns); return entry; } @@ -128,9 +123,7 @@ void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery, const BSONObj& query = qr.getFilter(); const BSONObj& sort = qr.getSort(); const BSONObj& projection = qr.getProj(); - const BSONObj collation = - canonicalQuery.getCollator() ? canonicalQuery.getCollator()->getSpec().toBSON() : BSONObj(); - AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, collation, indexes); + AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexes); stdx::lock_guard<stdx::mutex> cacheLock(_mutex); AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key); diff --git a/src/mongo/db/query/query_settings.h b/src/mongo/db/query/query_settings.h index dbf3f1a19bc..e1125320471 100644 --- a/src/mongo/db/query/query_settings.h +++ b/src/mongo/db/query/query_settings.h @@ -61,7 +61,7 @@ public: /** * Value type for query settings. * Holds: - * query shape (query, sort, projection, collation) + * query shape (query, sort, projection) * vector of index specs */ class AllowedIndexEntry { @@ -72,17 +72,15 @@ public: AllowedIndexEntry(const BSONObj& query, const BSONObj& sort, const BSONObj& projection, - const BSONObj& collation, const std::vector<BSONObj>& indexKeyPatterns); ~AllowedIndexEntry(); AllowedIndexEntry* clone() const; - // query, sort, projection, and collation collectively represent the query shape that we are - // storing hint overrides for. + // _query, _sort and _projection collectively + // represent the query shape that we are storing hint overrides for. BSONObj query; BSONObj sort; BSONObj projection; - BSONObj collation; // These are the index key patterns that // we will use to override the indexes retrieved from |