summaryrefslogtreecommitdiff
path: root/src/mongo/db/query
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/query')
-rw-r--r--src/mongo/db/query/canonical_query.cpp1068
-rw-r--r--src/mongo/db/query/canonical_query.h377
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp1003
-rw-r--r--src/mongo/db/query/count_request.cpp134
-rw-r--r--src/mongo/db/query/count_request.h103
-rw-r--r--src/mongo/db/query/count_request_test.cpp190
-rw-r--r--src/mongo/db/query/cursor_responses.cpp42
-rw-r--r--src/mongo/db/query/cursor_responses.h60
-rw-r--r--src/mongo/db/query/explain.cpp1147
-rw-r--r--src/mongo/db/query/explain.h289
-rw-r--r--src/mongo/db/query/explain_common.cpp49
-rw-r--r--src/mongo/db/query/explain_common.h69
-rw-r--r--src/mongo/db/query/expression_index.cpp300
-rw-r--r--src/mongo/db/query/expression_index.h41
-rw-r--r--src/mongo/db/query/expression_index_knobs.cpp4
-rw-r--r--src/mongo/db/query/expression_index_knobs.h22
-rw-r--r--src/mongo/db/query/find.cpp1178
-rw-r--r--src/mongo/db/query/find.h222
-rw-r--r--src/mongo/db/query/find_and_modify_request.cpp259
-rw-r--r--src/mongo/db/query/find_and_modify_request.h233
-rw-r--r--src/mongo/db/query/find_and_modify_request_test.cpp444
-rw-r--r--src/mongo/db/query/find_constants.h6
-rw-r--r--src/mongo/db/query/get_executor.cpp2408
-rw-r--r--src/mongo/db/query/get_executor.h318
-rw-r--r--src/mongo/db/query/get_executor_test.cpp187
-rw-r--r--src/mongo/db/query/getmore_request.cpp165
-rw-r--r--src/mongo/db/query/getmore_request.h56
-rw-r--r--src/mongo/db/query/getmore_request_test.cpp284
-rw-r--r--src/mongo/db/query/index_bounds.cpp853
-rw-r--r--src/mongo/db/query/index_bounds.h375
-rw-r--r--src/mongo/db/query/index_bounds_builder.cpp1673
-rw-r--r--src/mongo/db/query/index_bounds_builder.h283
-rw-r--r--src/mongo/db/query/index_bounds_builder_test.cpp2686
-rw-r--r--src/mongo/db/query/index_bounds_test.cpp1282
-rw-r--r--src/mongo/db/query/index_entry.cpp42
-rw-r--r--src/mongo/db/query/index_entry.h161
-rw-r--r--src/mongo/db/query/index_tag.cpp126
-rw-r--r--src/mongo/db/query/index_tag.h182
-rw-r--r--src/mongo/db/query/indexability.h204
-rw-r--r--src/mongo/db/query/internal_plans.cpp147
-rw-r--r--src/mongo/db/query/internal_plans.h85
-rw-r--r--src/mongo/db/query/interval.cpp356
-rw-r--r--src/mongo/db/query/interval.h275
-rw-r--r--src/mongo/db/query/interval_test.cpp448
-rw-r--r--src/mongo/db/query/lite_parsed_query.cpp1490
-rw-r--r--src/mongo/db/query/lite_parsed_query.h537
-rw-r--r--src/mongo/db/query/lite_parsed_query_test.cpp1914
-rw-r--r--src/mongo/db/query/lru_key_value.h318
-rw-r--r--src/mongo/db/query/lru_key_value_test.cpp280
-rw-r--r--src/mongo/db/query/parsed_projection.cpp477
-rw-r--r--src/mongo/db/query/parsed_projection.h214
-rw-r--r--src/mongo/db/query/parsed_projection_test.cpp354
-rw-r--r--src/mongo/db/query/plan_cache.cpp1099
-rw-r--r--src/mongo/db/query/plan_cache.h678
-rw-r--r--src/mongo/db/query/plan_cache_indexability.cpp92
-rw-r--r--src/mongo/db/query/plan_cache_indexability.h103
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp310
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp2175
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp2083
-rw-r--r--src/mongo/db/query/plan_enumerator.h780
-rw-r--r--src/mongo/db/query/plan_executor.cpp837
-rw-r--r--src/mongo/db/query/plan_executor.h781
-rw-r--r--src/mongo/db/query/plan_ranker.cpp368
-rw-r--r--src/mongo/db/query/plan_ranker.h135
-rw-r--r--src/mongo/db/query/plan_yield_policy.cpp126
-rw-r--r--src/mongo/db/query/plan_yield_policy.h125
-rw-r--r--src/mongo/db/query/planner_access.cpp2233
-rw-r--r--src/mongo/db/query/planner_access.h675
-rw-r--r--src/mongo/db/query/planner_analysis.cpp1226
-rw-r--r--src/mongo/db/query/planner_analysis.h136
-rw-r--r--src/mongo/db/query/planner_analysis_test.cpp238
-rw-r--r--src/mongo/db/query/planner_ixselect.cpp1079
-rw-r--r--src/mongo/db/query/planner_ixselect.h272
-rw-r--r--src/mongo/db/query/planner_ixselect_test.cpp391
-rw-r--r--src/mongo/db/query/query_knobs.cpp38
-rw-r--r--src/mongo/db/query/query_knobs.h104
-rw-r--r--src/mongo/db/query/query_planner.cpp1457
-rw-r--r--src/mongo/db/query/query_planner.h146
-rw-r--r--src/mongo/db/query/query_planner_array_test.cpp1902
-rw-r--r--src/mongo/db/query/query_planner_common.cpp75
-rw-r--r--src/mongo/db/query/query_planner_common.h78
-rw-r--r--src/mongo/db/query/query_planner_geo_test.cpp1536
-rw-r--r--src/mongo/db/query/query_planner_params.h140
-rw-r--r--src/mongo/db/query/query_planner_test.cpp7109
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp523
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.h250
-rw-r--r--src/mongo/db/query/query_planner_test_lib.cpp803
-rw-r--r--src/mongo/db/query/query_planner_test_lib.h28
-rw-r--r--src/mongo/db/query/query_planner_text_test.cpp670
-rw-r--r--src/mongo/db/query/query_settings.cpp215
-rw-r--r--src/mongo/db/query/query_settings.h193
-rw-r--r--src/mongo/db/query/query_solution.cpp1456
-rw-r--r--src/mongo/db/query/query_solution.h1353
-rw-r--r--src/mongo/db/query/query_yield.cpp56
-rw-r--r--src/mongo/db/query/query_yield.h33
-rw-r--r--src/mongo/db/query/stage_builder.cpp505
-rw-r--r--src/mongo/db/query/stage_builder.h36
-rw-r--r--src/mongo/db/query/stage_types.h102
98 files changed, 30341 insertions, 29829 deletions
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 67f753e0591..ac6ba1627d1 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -39,635 +39,615 @@
namespace mongo {
namespace {
- /**
- * Comparator for MatchExpression nodes. Returns an integer less than, equal to, or greater
- * than zero if 'lhs' is less than, equal to, or greater than 'rhs', respectively.
- *
- * Sorts by:
- * 1) operator type (MatchExpression::MatchType)
- * 2) path name (MatchExpression::path())
- * 3) sort order of children
- * 4) number of children (MatchExpression::numChildren())
- *
- * The third item is needed to ensure that match expression trees which should have the same
- * cache key always sort the same way. If you're wondering when the tuple (operator type, path
- * name) could ever be equal, consider this query:
- *
- * {$and:[{$or:[{a:1},{a:2}]},{$or:[{a:1},{b:2}]}]}
- *
- * The two OR nodes would compare as equal in this case were it not for tuple item #3 (sort
- * order of children).
- */
- int matchExpressionComparator(const MatchExpression* lhs, const MatchExpression* rhs) {
- MatchExpression::MatchType lhsMatchType = lhs->matchType();
- MatchExpression::MatchType rhsMatchType = rhs->matchType();
- if (lhsMatchType != rhsMatchType) {
- return lhsMatchType < rhsMatchType ? -1 : 1;
- }
-
- StringData lhsPath = lhs->path();
- StringData rhsPath = rhs->path();
- int pathsCompare = lhsPath.compare(rhsPath);
- if (pathsCompare != 0) {
- return pathsCompare;
- }
+/**
+ * Comparator for MatchExpression nodes. Returns an integer less than, equal to, or greater
+ * than zero if 'lhs' is less than, equal to, or greater than 'rhs', respectively.
+ *
+ * Sorts by:
+ * 1) operator type (MatchExpression::MatchType)
+ * 2) path name (MatchExpression::path())
+ * 3) sort order of children
+ * 4) number of children (MatchExpression::numChildren())
+ *
+ * The third item is needed to ensure that match expression trees which should have the same
+ * cache key always sort the same way. If you're wondering when the tuple (operator type, path
+ * name) could ever be equal, consider this query:
+ *
+ * {$and:[{$or:[{a:1},{a:2}]},{$or:[{a:1},{b:2}]}]}
+ *
+ * The two OR nodes would compare as equal in this case were it not for tuple item #3 (sort
+ * order of children).
+ */
+int matchExpressionComparator(const MatchExpression* lhs, const MatchExpression* rhs) {
+ MatchExpression::MatchType lhsMatchType = lhs->matchType();
+ MatchExpression::MatchType rhsMatchType = rhs->matchType();
+ if (lhsMatchType != rhsMatchType) {
+ return lhsMatchType < rhsMatchType ? -1 : 1;
+ }
- const size_t numChildren = std::min(lhs->numChildren(), rhs->numChildren());
- for (size_t childIdx = 0; childIdx < numChildren; ++childIdx) {
- int childCompare = matchExpressionComparator(lhs->getChild(childIdx),
- rhs->getChild(childIdx));
- if (childCompare != 0) {
- return childCompare;
- }
- }
+ StringData lhsPath = lhs->path();
+ StringData rhsPath = rhs->path();
+ int pathsCompare = lhsPath.compare(rhsPath);
+ if (pathsCompare != 0) {
+ return pathsCompare;
+ }
- if (lhs->numChildren() != rhs->numChildren()) {
- return lhs->numChildren() < rhs->numChildren() ? -1 : 1;
+ const size_t numChildren = std::min(lhs->numChildren(), rhs->numChildren());
+ for (size_t childIdx = 0; childIdx < numChildren; ++childIdx) {
+ int childCompare =
+ matchExpressionComparator(lhs->getChild(childIdx), rhs->getChild(childIdx));
+ if (childCompare != 0) {
+ return childCompare;
}
-
- // They're equal!
- return 0;
}
- bool matchExpressionLessThan(const MatchExpression* lhs, const MatchExpression* rhs) {
- return matchExpressionComparator(lhs, rhs) < 0;
+ if (lhs->numChildren() != rhs->numChildren()) {
+ return lhs->numChildren() < rhs->numChildren() ? -1 : 1;
}
-} // namespace
+ // They're equal!
+ return 0;
+}
- //
- // These all punt to the many-argumented canonicalize below.
- //
+bool matchExpressionLessThan(const MatchExpression* lhs, const MatchExpression* rhs) {
+ return matchExpressionComparator(lhs, rhs) < 0;
+}
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(
- ns, query, emptyObj, emptyObj, 0, 0, out, whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- bool explain,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(ns,
- query,
- emptyObj, // sort
- emptyObj, // projection
- 0, // skip
- 0, // limit
- emptyObj, // hint
- emptyObj, // min
- emptyObj, // max
- false, // snapshot
- explain,
- out,
- whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- long long skip,
- long long limit,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(ns,
- query,
- emptyObj,
- emptyObj,
- skip,
- limit,
- out,
- whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- return CanonicalQuery::canonicalize(ns, query, sort, proj, 0, 0, out, whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(
- ns, query, sort, proj, skip, limit, emptyObj, out, whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(ns, query, sort, proj, skip, limit, hint,
- emptyObj, emptyObj,
- false, // snapshot
- false, // explain
- out,
- whereCallback);
+} // namespace
+
+//
+// These all punt to the many-argumented canonicalize below.
+//
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(ns, query, emptyObj, emptyObj, 0, 0, out, whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ bool explain,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(ns,
+ query,
+ emptyObj, // sort
+ emptyObj, // projection
+ 0, // skip
+ 0, // limit
+ emptyObj, // hint
+ emptyObj, // min
+ emptyObj, // max
+ false, // snapshot
+ explain,
+ out,
+ whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ long long skip,
+ long long limit,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(
+ ns, query, emptyObj, emptyObj, skip, limit, out, whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ return CanonicalQuery::canonicalize(ns, query, sort, proj, 0, 0, out, whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(
+ ns, query, sort, proj, skip, limit, emptyObj, out, whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(ns,
+ query,
+ sort,
+ proj,
+ skip,
+ limit,
+ hint,
+ emptyObj,
+ emptyObj,
+ false, // snapshot
+ false, // explain
+ out,
+ whereCallback);
+}
+
+//
+// These actually call init() on the CQ.
+//
+
+// static
+Status CanonicalQuery::canonicalize(const QueryMessage& qm,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ // Make LiteParsedQuery.
+ auto lpqStatus = LiteParsedQuery::fromLegacyQueryMessage(qm);
+ if (!lpqStatus.isOK()) {
+ return lpqStatus.getStatus();
}
- //
- // These actually call init() on the CQ.
- //
+ return CanonicalQuery::canonicalize(lpqStatus.getValue().release(), out, whereCallback);
+}
- // static
- Status CanonicalQuery::canonicalize(const QueryMessage& qm,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- // Make LiteParsedQuery.
- auto lpqStatus = LiteParsedQuery::fromLegacyQueryMessage(qm);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
- }
+// static
+Status CanonicalQuery::canonicalize(LiteParsedQuery* lpq,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ std::unique_ptr<LiteParsedQuery> autoLpq(lpq);
- return CanonicalQuery::canonicalize(lpqStatus.getValue().release(), out, whereCallback);
+ // Make MatchExpression.
+ StatusWithMatchExpression swme =
+ MatchExpressionParser::parse(autoLpq->getFilter(), whereCallback);
+ if (!swme.isOK()) {
+ return swme.getStatus();
}
- // static
- Status CanonicalQuery::canonicalize(LiteParsedQuery* lpq,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- std::unique_ptr<LiteParsedQuery> autoLpq(lpq);
-
- // Make MatchExpression.
- StatusWithMatchExpression swme = MatchExpressionParser::parse(autoLpq->getFilter(),
- whereCallback);
- if (!swme.isOK()) {
- return swme.getStatus();
- }
+ // Make the CQ we'll hopefully return.
+ std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
- // Make the CQ we'll hopefully return.
- std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
-
- // Takes ownership of lpq and the MatchExpression* in swme.
- Status initStatus = cq->init(autoLpq.release(), whereCallback, swme.getValue());
-
- if (!initStatus.isOK()) { return initStatus; }
- *out = cq.release();
- return Status::OK();
- }
-
- // static
- Status CanonicalQuery::canonicalize(const CanonicalQuery& baseQuery,
- MatchExpression* root,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
-
- // Pass empty sort and projection.
- BSONObj emptyObj;
-
- // 0, 0, 0 is 'ntoskip', 'ntoreturn', and 'queryoptions'
- // false, false is 'snapshot' and 'explain'
- auto lpqStatus = LiteParsedQuery::makeAsOpQuery(baseQuery.ns(),
- 0,
- 0,
- 0,
- baseQuery.getParsed().getFilter(),
- baseQuery.getParsed().getProj(),
- baseQuery.getParsed().getSort(),
- emptyObj,
- emptyObj,
- emptyObj,
- false,
- false);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
- }
+ // Takes ownership of lpq and the MatchExpression* in swme.
+ Status initStatus = cq->init(autoLpq.release(), whereCallback, swme.getValue());
- // Make the CQ we'll hopefully return.
- std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
- Status initStatus = cq->init(lpqStatus.getValue().release(), whereCallback, root->shallowClone());
-
- if (!initStatus.isOK()) { return initStatus; }
- *out = cq.release();
- return Status::OK();
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot,
- bool explain,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
-
- // Pass empty sort and projection.
- BSONObj emptyObj;
-
- auto lpqStatus = LiteParsedQuery::makeAsOpQuery(ns,
- skip,
- limit,
- 0,
- query,
- proj,
- sort,
- hint,
- minObj,
- maxObj,
- snapshot,
- explain);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
- }
+ if (!initStatus.isOK()) {
+ return initStatus;
+ }
+ *out = cq.release();
+ return Status::OK();
+}
+
+// static
+Status CanonicalQuery::canonicalize(const CanonicalQuery& baseQuery,
+ MatchExpression* root,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ // Pass empty sort and projection.
+ BSONObj emptyObj;
+
+ // 0, 0, 0 is 'ntoskip', 'ntoreturn', and 'queryoptions'
+ // false, false is 'snapshot' and 'explain'
+ auto lpqStatus = LiteParsedQuery::makeAsOpQuery(baseQuery.ns(),
+ 0,
+ 0,
+ 0,
+ baseQuery.getParsed().getFilter(),
+ baseQuery.getParsed().getProj(),
+ baseQuery.getParsed().getSort(),
+ emptyObj,
+ emptyObj,
+ emptyObj,
+ false,
+ false);
+ if (!lpqStatus.isOK()) {
+ return lpqStatus.getStatus();
+ }
- auto& lpq = lpqStatus.getValue();
+ // Make the CQ we'll hopefully return.
+ std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
+ Status initStatus =
+ cq->init(lpqStatus.getValue().release(), whereCallback, root->shallowClone());
- // Build a parse tree from the BSONObj in the parsed query.
- StatusWithMatchExpression swme =
- MatchExpressionParser::parse(lpq->getFilter(), whereCallback);
- if (!swme.isOK()) {
- return swme.getStatus();
- }
+ if (!initStatus.isOK()) {
+ return initStatus;
+ }
+ *out = cq.release();
+ return Status::OK();
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot,
+ bool explain,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ // Pass empty sort and projection.
+ BSONObj emptyObj;
+
+ auto lpqStatus = LiteParsedQuery::makeAsOpQuery(
+ ns, skip, limit, 0, query, proj, sort, hint, minObj, maxObj, snapshot, explain);
+ if (!lpqStatus.isOK()) {
+ return lpqStatus.getStatus();
+ }
- // Make the CQ we'll hopefully return.
- std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
- // Takes ownership of lpq and the MatchExpression* in swme.
- Status initStatus = cq->init(lpq.release(), whereCallback, swme.getValue());
+ auto& lpq = lpqStatus.getValue();
- if (!initStatus.isOK()) { return initStatus; }
- *out = cq.release();
- return Status::OK();
+ // Build a parse tree from the BSONObj in the parsed query.
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(lpq->getFilter(), whereCallback);
+ if (!swme.isOK()) {
+ return swme.getStatus();
}
- Status CanonicalQuery::init(LiteParsedQuery* lpq,
- const MatchExpressionParser::WhereCallback& whereCallback,
- MatchExpression* root) {
- _pq.reset(lpq);
+ // Make the CQ we'll hopefully return.
+ std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
+ // Takes ownership of lpq and the MatchExpression* in swme.
+ Status initStatus = cq->init(lpq.release(), whereCallback, swme.getValue());
- // Normalize, sort and validate tree.
- root = normalizeTree(root);
+ if (!initStatus.isOK()) {
+ return initStatus;
+ }
+ *out = cq.release();
+ return Status::OK();
+}
+
+Status CanonicalQuery::init(LiteParsedQuery* lpq,
+ const MatchExpressionParser::WhereCallback& whereCallback,
+ MatchExpression* root) {
+ _pq.reset(lpq);
+
+ // Normalize, sort and validate tree.
+ root = normalizeTree(root);
+
+ sortTree(root);
+ _root.reset(root);
+ Status validStatus = isValid(root, *_pq);
+ if (!validStatus.isOK()) {
+ return validStatus;
+ }
- sortTree(root);
- _root.reset(root);
- Status validStatus = isValid(root, *_pq);
- if (!validStatus.isOK()) {
- return validStatus;
+ // Validate the projection if there is one.
+ if (!_pq->getProj().isEmpty()) {
+ ParsedProjection* pp;
+ Status projStatus = ParsedProjection::make(_pq->getProj(), _root.get(), &pp, whereCallback);
+ if (!projStatus.isOK()) {
+ return projStatus;
}
+ _proj.reset(pp);
+ }
- // Validate the projection if there is one.
- if (!_pq->getProj().isEmpty()) {
- ParsedProjection* pp;
- Status projStatus =
- ParsedProjection::make(_pq->getProj(), _root.get(), &pp, whereCallback);
- if (!projStatus.isOK()) {
- return projStatus;
- }
- _proj.reset(pp);
- }
+ return Status::OK();
+}
- return Status::OK();
- }
+// static
+bool CanonicalQuery::isSimpleIdQuery(const BSONObj& query) {
+ bool hasID = false;
- // static
- bool CanonicalQuery::isSimpleIdQuery(const BSONObj& query) {
- bool hasID = false;
+ BSONObjIterator it(query);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ if (str::equals("_id", elt.fieldName())) {
+ // Verify that the query on _id is a simple equality.
+ hasID = true;
- BSONObjIterator it(query);
- while (it.more()) {
- BSONElement elt = it.next();
- if (str::equals("_id", elt.fieldName() ) ) {
- // Verify that the query on _id is a simple equality.
- hasID = true;
-
- if (elt.type() == Object) {
- // If the value is an object, it can't have a query operator
- // (must be a literal object match).
- if (elt.Obj().firstElementFieldName()[0] == '$') {
- return false;
- }
- }
- else if (!elt.isSimpleType() && BinData != elt.type()) {
- // The _id fild cannot be something like { _id : { $gt : ...
- // But it can be BinData.
+ if (elt.type() == Object) {
+ // If the value is an object, it can't have a query operator
+ // (must be a literal object match).
+ if (elt.Obj().firstElementFieldName()[0] == '$') {
return false;
}
- }
- else if (elt.fieldName()[0] == '$' &&
- (str::equals("$isolated", elt.fieldName())||
- str::equals("$atomic", elt.fieldName()))) {
- // ok, passthrough
- }
- else {
- // If the field is not _id, it must be $isolated/$atomic.
+ } else if (!elt.isSimpleType() && BinData != elt.type()) {
+ // The _id fild cannot be something like { _id : { $gt : ...
+ // But it can be BinData.
return false;
}
+ } else if (elt.fieldName()[0] == '$' && (str::equals("$isolated", elt.fieldName()) ||
+ str::equals("$atomic", elt.fieldName()))) {
+ // ok, passthrough
+ } else {
+ // If the field is not _id, it must be $isolated/$atomic.
+ return false;
}
-
- return hasID;
}
- // static
- MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
- // root->isLogical() is true now. We care about AND, OR, and NOT. NOR currently scares us.
- if (MatchExpression::AND == root->matchType() || MatchExpression::OR == root->matchType()) {
- // We could have AND of AND of AND. Make sure we clean up our children before merging
- // them.
- // UNITTEST 11738048
- for (size_t i = 0; i < root->getChildVector()->size(); ++i) {
- (*root->getChildVector())[i] = normalizeTree(root->getChild(i));
- }
-
- // If any of our children are of the same logical operator that we are, we remove the
- // child's children and append them to ourselves after we examine all children.
- std::vector<MatchExpression*> absorbedChildren;
-
- for (size_t i = 0; i < root->numChildren();) {
- MatchExpression* child = root->getChild(i);
- if (child->matchType() == root->matchType()) {
- // AND of an AND or OR of an OR. Absorb child's children into ourself.
- for (size_t j = 0; j < child->numChildren(); ++j) {
- absorbedChildren.push_back(child->getChild(j));
- }
- // TODO(opt): this is possibly n^2-ish
- root->getChildVector()->erase(root->getChildVector()->begin() + i);
- child->getChildVector()->clear();
- // Note that this only works because we cleared the child's children
- delete child;
- // Don't increment 'i' as the current child 'i' used to be child 'i+1'
- }
- else {
- ++i;
+ return hasID;
+}
+
+// static
+MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
+ // root->isLogical() is true now. We care about AND, OR, and NOT. NOR currently scares us.
+ if (MatchExpression::AND == root->matchType() || MatchExpression::OR == root->matchType()) {
+ // We could have AND of AND of AND. Make sure we clean up our children before merging
+ // them.
+ // UNITTEST 11738048
+ for (size_t i = 0; i < root->getChildVector()->size(); ++i) {
+ (*root->getChildVector())[i] = normalizeTree(root->getChild(i));
+ }
+
+ // If any of our children are of the same logical operator that we are, we remove the
+ // child's children and append them to ourselves after we examine all children.
+ std::vector<MatchExpression*> absorbedChildren;
+
+ for (size_t i = 0; i < root->numChildren();) {
+ MatchExpression* child = root->getChild(i);
+ if (child->matchType() == root->matchType()) {
+ // AND of an AND or OR of an OR. Absorb child's children into ourself.
+ for (size_t j = 0; j < child->numChildren(); ++j) {
+ absorbedChildren.push_back(child->getChild(j));
}
- }
-
- root->getChildVector()->insert(root->getChildVector()->end(),
- absorbedChildren.begin(),
- absorbedChildren.end());
-
- // AND of 1 thing is the thing, OR of 1 thing is the thing.
- if (1 == root->numChildren()) {
- MatchExpression* ret = root->getChild(0);
- root->getChildVector()->clear();
- delete root;
- return ret;
- }
- }
- else if (MatchExpression::NOT == root->matchType()) {
- // Normalize the rest of the tree hanging off this NOT node.
- NotMatchExpression* nme = static_cast<NotMatchExpression*>(root);
- MatchExpression* child = nme->releaseChild();
- // normalizeTree(...) takes ownership of 'child', and then
- // transfers ownership of its return value to 'nme'.
- nme->resetChild(normalizeTree(child));
- }
- else if (MatchExpression::ELEM_MATCH_VALUE == root->matchType()) {
- // Just normalize our children.
- for (size_t i = 0; i < root->getChildVector()->size(); ++i) {
- (*root->getChildVector())[i] = normalizeTree(root->getChild(i));
+ // TODO(opt): this is possibly n^2-ish
+ root->getChildVector()->erase(root->getChildVector()->begin() + i);
+ child->getChildVector()->clear();
+ // Note that this only works because we cleared the child's children
+ delete child;
+ // Don't increment 'i' as the current child 'i' used to be child 'i+1'
+ } else {
+ ++i;
}
}
- return root;
+ root->getChildVector()->insert(
+ root->getChildVector()->end(), absorbedChildren.begin(), absorbedChildren.end());
+
+ // AND of 1 thing is the thing, OR of 1 thing is the thing.
+ if (1 == root->numChildren()) {
+ MatchExpression* ret = root->getChild(0);
+ root->getChildVector()->clear();
+ delete root;
+ return ret;
+ }
+ } else if (MatchExpression::NOT == root->matchType()) {
+ // Normalize the rest of the tree hanging off this NOT node.
+ NotMatchExpression* nme = static_cast<NotMatchExpression*>(root);
+ MatchExpression* child = nme->releaseChild();
+ // normalizeTree(...) takes ownership of 'child', and then
+ // transfers ownership of its return value to 'nme'.
+ nme->resetChild(normalizeTree(child));
+ } else if (MatchExpression::ELEM_MATCH_VALUE == root->matchType()) {
+ // Just normalize our children.
+ for (size_t i = 0; i < root->getChildVector()->size(); ++i) {
+ (*root->getChildVector())[i] = normalizeTree(root->getChild(i));
+ }
}
- // static
- void CanonicalQuery::sortTree(MatchExpression* tree) {
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- sortTree(tree->getChild(i));
- }
- std::vector<MatchExpression*>* children = tree->getChildVector();
- if (NULL != children) {
- std::sort(children->begin(), children->end(), matchExpressionLessThan);
- }
+ return root;
+}
+
+// static
+void CanonicalQuery::sortTree(MatchExpression* tree) {
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ sortTree(tree->getChild(i));
+ }
+ std::vector<MatchExpression*>* children = tree->getChildVector();
+ if (NULL != children) {
+ std::sort(children->begin(), children->end(), matchExpressionLessThan);
}
+}
- // static
- size_t CanonicalQuery::countNodes(const MatchExpression* root,
- MatchExpression::MatchType type) {
- size_t sum = 0;
- if (type == root->matchType()) {
- sum = 1;
- }
- for (size_t i = 0; i < root->numChildren(); ++i) {
- sum += countNodes(root->getChild(i), type);
- }
- return sum;
+// static
+size_t CanonicalQuery::countNodes(const MatchExpression* root, MatchExpression::MatchType type) {
+ size_t sum = 0;
+ if (type == root->matchType()) {
+ sum = 1;
}
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ sum += countNodes(root->getChild(i), type);
+ }
+ return sum;
+}
- /**
- * Does 'root' have a subtree of type 'subtreeType' with a node of type 'childType' inside?
- */
- bool hasNodeInSubtree(MatchExpression* root, MatchExpression::MatchType childType,
- MatchExpression::MatchType subtreeType) {
- if (subtreeType == root->matchType()) {
- return QueryPlannerCommon::hasNode(root, childType);
- }
- for (size_t i = 0; i < root->numChildren(); ++i) {
- if (hasNodeInSubtree(root->getChild(i), childType, subtreeType)) {
- return true;
- }
+/**
+ * Does 'root' have a subtree of type 'subtreeType' with a node of type 'childType' inside?
+ */
+bool hasNodeInSubtree(MatchExpression* root,
+ MatchExpression::MatchType childType,
+ MatchExpression::MatchType subtreeType) {
+ if (subtreeType == root->matchType()) {
+ return QueryPlannerCommon::hasNode(root, childType);
+ }
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ if (hasNodeInSubtree(root->getChild(i), childType, subtreeType)) {
+ return true;
}
- return false;
}
+ return false;
+}
- // static
- Status CanonicalQuery::isValid(MatchExpression* root, const LiteParsedQuery& parsed) {
- // Analysis below should be done after squashing the tree to make it clearer.
+// static
+Status CanonicalQuery::isValid(MatchExpression* root, const LiteParsedQuery& parsed) {
+ // Analysis below should be done after squashing the tree to make it clearer.
- // There can only be one TEXT. If there is a TEXT, it cannot appear inside a NOR.
- //
- // Note that the query grammar (as enforced by the MatchExpression parser) forbids TEXT
- // inside of value-expression clauses like NOT, so we don't check those here.
- size_t numText = countNodes(root, MatchExpression::TEXT);
- if (numText > 1) {
- return Status(ErrorCodes::BadValue, "Too many text expressions");
- }
- else if (1 == numText) {
- if (hasNodeInSubtree(root, MatchExpression::TEXT, MatchExpression::NOR)) {
- return Status(ErrorCodes::BadValue, "text expression not allowed in nor");
- }
+ // There can only be one TEXT. If there is a TEXT, it cannot appear inside a NOR.
+ //
+ // Note that the query grammar (as enforced by the MatchExpression parser) forbids TEXT
+ // inside of value-expression clauses like NOT, so we don't check those here.
+ size_t numText = countNodes(root, MatchExpression::TEXT);
+ if (numText > 1) {
+ return Status(ErrorCodes::BadValue, "Too many text expressions");
+ } else if (1 == numText) {
+ if (hasNodeInSubtree(root, MatchExpression::TEXT, MatchExpression::NOR)) {
+ return Status(ErrorCodes::BadValue, "text expression not allowed in nor");
}
+ }
- // There can only be one NEAR. If there is a NEAR, it must be either the root or the root
- // must be an AND and its child must be a NEAR.
- size_t numGeoNear = countNodes(root, MatchExpression::GEO_NEAR);
- if (numGeoNear > 1) {
- return Status(ErrorCodes::BadValue, "Too many geoNear expressions");
- }
- else if (1 == numGeoNear) {
- bool topLevel = false;
- if (MatchExpression::GEO_NEAR == root->matchType()) {
- topLevel = true;
- }
- else if (MatchExpression::AND == root->matchType()) {
- for (size_t i = 0; i < root->numChildren(); ++i) {
- if (MatchExpression::GEO_NEAR == root->getChild(i)->matchType()) {
- topLevel = true;
- break;
- }
+ // There can only be one NEAR. If there is a NEAR, it must be either the root or the root
+ // must be an AND and its child must be a NEAR.
+ size_t numGeoNear = countNodes(root, MatchExpression::GEO_NEAR);
+ if (numGeoNear > 1) {
+ return Status(ErrorCodes::BadValue, "Too many geoNear expressions");
+ } else if (1 == numGeoNear) {
+ bool topLevel = false;
+ if (MatchExpression::GEO_NEAR == root->matchType()) {
+ topLevel = true;
+ } else if (MatchExpression::AND == root->matchType()) {
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ if (MatchExpression::GEO_NEAR == root->getChild(i)->matchType()) {
+ topLevel = true;
+ break;
}
}
- if (!topLevel) {
- return Status(ErrorCodes::BadValue, "geoNear must be top-level expr");
- }
}
-
- // NEAR cannot have a $natural sort or $natural hint.
- if (numGeoNear > 0) {
- BSONObj sortObj = parsed.getSort();
- if (!sortObj["$natural"].eoo()) {
- return Status(ErrorCodes::BadValue,
- "geoNear expression not allowed with $natural sort order");
- }
-
- BSONObj hintObj = parsed.getHint();
- if (!hintObj["$natural"].eoo()) {
- return Status(ErrorCodes::BadValue,
- "geoNear expression not allowed with $natural hint");
- }
+ if (!topLevel) {
+ return Status(ErrorCodes::BadValue, "geoNear must be top-level expr");
}
+ }
- // TEXT and NEAR cannot both be in the query.
- if (numText > 0 && numGeoNear > 0) {
- return Status(ErrorCodes::BadValue, "text and geoNear not allowed in same query");
+ // NEAR cannot have a $natural sort or $natural hint.
+ if (numGeoNear > 0) {
+ BSONObj sortObj = parsed.getSort();
+ if (!sortObj["$natural"].eoo()) {
+ return Status(ErrorCodes::BadValue,
+ "geoNear expression not allowed with $natural sort order");
}
- // TEXT and {$natural: ...} sort order cannot both be in the query.
- if (numText > 0) {
- const BSONObj& sortObj = parsed.getSort();
- BSONObjIterator it(sortObj);
- while (it.more()) {
- BSONElement elt = it.next();
- if (str::equals("$natural", elt.fieldName())) {
- return Status(ErrorCodes::BadValue,
- "text expression not allowed with $natural sort order");
- }
- }
+ BSONObj hintObj = parsed.getHint();
+ if (!hintObj["$natural"].eoo()) {
+ return Status(ErrorCodes::BadValue,
+ "geoNear expression not allowed with $natural hint");
}
+ }
- // TEXT and hint cannot both be in the query.
- if (numText > 0 && !parsed.getHint().isEmpty()) {
- return Status(ErrorCodes::BadValue, "text and hint not allowed in same query");
- }
+ // TEXT and NEAR cannot both be in the query.
+ if (numText > 0 && numGeoNear > 0) {
+ return Status(ErrorCodes::BadValue, "text and geoNear not allowed in same query");
+ }
- // TEXT and snapshot cannot both be in the query.
- if (numText > 0 && parsed.isSnapshot()) {
- return Status(ErrorCodes::BadValue, "text and snapshot not allowed in same query");
+ // TEXT and {$natural: ...} sort order cannot both be in the query.
+ if (numText > 0) {
+ const BSONObj& sortObj = parsed.getSort();
+ BSONObjIterator it(sortObj);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ if (str::equals("$natural", elt.fieldName())) {
+ return Status(ErrorCodes::BadValue,
+ "text expression not allowed with $natural sort order");
+ }
}
+ }
- return Status::OK();
+ // TEXT and hint cannot both be in the query.
+ if (numText > 0 && !parsed.getHint().isEmpty()) {
+ return Status(ErrorCodes::BadValue, "text and hint not allowed in same query");
}
- // static
- // XXX TODO: This does not belong here at all.
- MatchExpression* CanonicalQuery::logicalRewrite(MatchExpression* tree) {
- // Only thing we do is pull an OR up at the root.
- if (MatchExpression::AND != tree->matchType()) {
- return tree;
- }
+ // TEXT and snapshot cannot both be in the query.
+ if (numText > 0 && parsed.isSnapshot()) {
+ return Status(ErrorCodes::BadValue, "text and snapshot not allowed in same query");
+ }
- // We want to bail out ASAP if we have nothing to do here.
- size_t numOrs = 0;
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- if (MatchExpression::OR == tree->getChild(i)->matchType()) {
- ++numOrs;
- }
- }
+ return Status::OK();
+}
- // Only do this for one OR right now.
- if (1 != numOrs) {
- return tree;
- }
+// static
+// XXX TODO: This does not belong here at all.
+MatchExpression* CanonicalQuery::logicalRewrite(MatchExpression* tree) {
+ // Only thing we do is pull an OR up at the root.
+ if (MatchExpression::AND != tree->matchType()) {
+ return tree;
+ }
- // Detach the OR from the root.
- invariant(NULL != tree->getChildVector());
- std::vector<MatchExpression*>& rootChildren = *tree->getChildVector();
- MatchExpression* orChild = NULL;
- for (size_t i = 0; i < rootChildren.size(); ++i) {
- if (MatchExpression::OR == rootChildren[i]->matchType()) {
- orChild = rootChildren[i];
- rootChildren.erase(rootChildren.begin() + i);
- break;
- }
+ // We want to bail out ASAP if we have nothing to do here.
+ size_t numOrs = 0;
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ if (MatchExpression::OR == tree->getChild(i)->matchType()) {
+ ++numOrs;
}
+ }
- // AND the existing root with each or child.
- invariant(NULL != orChild);
- invariant(NULL != orChild->getChildVector());
- std::vector<MatchExpression*>& orChildren = *orChild->getChildVector();
- for (size_t i = 0; i < orChildren.size(); ++i) {
- AndMatchExpression* ama = new AndMatchExpression();
- ama->add(orChildren[i]);
- ama->add(tree->shallowClone());
- orChildren[i] = ama;
- }
- delete tree;
+ // Only do this for one OR right now.
+ if (1 != numOrs) {
+ return tree;
+ }
- // Clean up any consequences from this tomfoolery.
- return normalizeTree(orChild);
+ // Detach the OR from the root.
+ invariant(NULL != tree->getChildVector());
+ std::vector<MatchExpression*>& rootChildren = *tree->getChildVector();
+ MatchExpression* orChild = NULL;
+ for (size_t i = 0; i < rootChildren.size(); ++i) {
+ if (MatchExpression::OR == rootChildren[i]->matchType()) {
+ orChild = rootChildren[i];
+ rootChildren.erase(rootChildren.begin() + i);
+ break;
+ }
}
- std::string CanonicalQuery::toString() const {
- str::stream ss;
- ss << "ns=" << _pq->ns();
+ // AND the existing root with each or child.
+ invariant(NULL != orChild);
+ invariant(NULL != orChild->getChildVector());
+ std::vector<MatchExpression*>& orChildren = *orChild->getChildVector();
+ for (size_t i = 0; i < orChildren.size(); ++i) {
+ AndMatchExpression* ama = new AndMatchExpression();
+ ama->add(orChildren[i]);
+ ama->add(tree->shallowClone());
+ orChildren[i] = ama;
+ }
+ delete tree;
- if (_pq->getBatchSize()) {
- ss << " batchSize=" << *_pq->getBatchSize();
- }
+ // Clean up any consequences from this tomfoolery.
+ return normalizeTree(orChild);
+}
- if (_pq->getLimit()) {
- ss << " limit=" << *_pq->getLimit();
- }
+std::string CanonicalQuery::toString() const {
+ str::stream ss;
+ ss << "ns=" << _pq->ns();
- ss << " skip=" << _pq->getSkip() << "\n";
+ if (_pq->getBatchSize()) {
+ ss << " batchSize=" << *_pq->getBatchSize();
+ }
- // The expression tree puts an endl on for us.
- ss << "Tree: " << _root->toString();
- ss << "Sort: " << _pq->getSort().toString() << '\n';
- ss << "Proj: " << _pq->getProj().toString() << '\n';
- return ss;
+ if (_pq->getLimit()) {
+ ss << " limit=" << *_pq->getLimit();
}
- std::string CanonicalQuery::toStringShort() const {
- str::stream ss;
- ss << "query: " << _pq->getFilter().toString()
- << " sort: " << _pq->getSort().toString()
- << " projection: " << _pq->getProj().toString()
- << " skip: " << _pq->getSkip();
+ ss << " skip=" << _pq->getSkip() << "\n";
- if (_pq->getBatchSize()) {
- ss << " batchSize: " << *_pq->getBatchSize();
- }
+ // The expression tree puts an endl on for us.
+ ss << "Tree: " << _root->toString();
+ ss << "Sort: " << _pq->getSort().toString() << '\n';
+ ss << "Proj: " << _pq->getProj().toString() << '\n';
+ return ss;
+}
- if (_pq->getLimit()) {
- ss << " limit: " << *_pq->getLimit();
- }
+std::string CanonicalQuery::toStringShort() const {
+ str::stream ss;
+ ss << "query: " << _pq->getFilter().toString() << " sort: " << _pq->getSort().toString()
+ << " projection: " << _pq->getProj().toString() << " skip: " << _pq->getSkip();
- return ss;
+ if (_pq->getBatchSize()) {
+ ss << " batchSize: " << *_pq->getBatchSize();
}
+ if (_pq->getLimit()) {
+ ss << " limit: " << *_pq->getLimit();
+ }
+
+ return ss;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h
index 365a06e4802..58a2c46f3a8 100644
--- a/src/mongo/db/query/canonical_query.h
+++ b/src/mongo/db/query/canonical_query.h
@@ -38,188 +38,199 @@
namespace mongo {
- class CanonicalQuery {
- public:
- /**
- * Caller owns the pointer in 'out' if any call to canonicalize returns Status::OK().
- *
- * Used for legacy find through the OP_QUERY message.
- */
- static Status canonicalize(const QueryMessage& qm,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- /**
- * Takes ownership of 'lpq'.
- *
- * Caller owns the pointer in 'out' if any call to canonicalize returns Status::OK().
- *
- * Used for finds using the find command path.
- */
- static Status canonicalize(LiteParsedQuery* lpq,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- /**
- * For testing or for internal clients to use.
- */
-
- /**
- * Used for creating sub-queries from an existing CanonicalQuery.
- *
- * 'root' must be an expression in baseQuery.root().
- *
- * Does not take ownership of 'root'.
- */
- static Status canonicalize(const CanonicalQuery& baseQuery,
- MatchExpression* root,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- bool explain,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- long long skip,
- long long limit,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot,
- bool explain,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- /**
- * Returns true if "query" describes an exact-match query on _id, possibly with
- * the $isolated/$atomic modifier.
- */
- static bool isSimpleIdQuery(const BSONObj& query);
-
- // What namespace is this query over?
- const std::string& ns() const { return _pq->ns(); }
-
- //
- // Accessors for the query
- //
- MatchExpression* root() const { return _root.get(); }
- BSONObj getQueryObj() const { return _pq->getFilter(); }
- const LiteParsedQuery& getParsed() const { return *_pq; }
- const ParsedProjection* getProj() const { return _proj.get(); }
-
- // Debugging
- std::string toString() const;
- std::string toStringShort() const;
-
- /**
- * Validates match expression, checking for certain
- * combinations of operators in match expression and
- * query options in LiteParsedQuery.
- * Since 'root' is derived from 'filter' in LiteParsedQuery,
- * 'filter' is not validated.
- *
- * TODO: Move this to query_validator.cpp
- */
- static Status isValid(MatchExpression* root, const LiteParsedQuery& parsed);
-
- /**
- * Returns the normalized version of the subtree rooted at 'root'.
- *
- * Takes ownership of 'root'.
- */
- static MatchExpression* normalizeTree(MatchExpression* root);
-
- /**
- * Traverses expression tree post-order.
- * Sorts children at each non-leaf node by (MatchType, path(), children, number of children)
- */
- static void sortTree(MatchExpression* tree);
-
- /**
- * Returns a count of 'type' nodes in expression tree.
- */
- static size_t countNodes(const MatchExpression* root, MatchExpression::MatchType type);
-
- /**
- * Takes ownership of 'tree'. Performs some rewriting of the query to a logically
- * equivalent but more digestible form.
- *
- * TODO: This doesn't entirely belong here. Really we'd do this while exploring
- * solutions in an enumeration setting but given the current lack of pruning
- * while exploring the enumeration space we do it here.
- */
- static MatchExpression* logicalRewrite(MatchExpression* tree);
- private:
- // You must go through canonicalize to create a CanonicalQuery.
- CanonicalQuery() { }
-
- /**
- * Takes ownership of 'root' and 'lpq'.
- */
- Status init(LiteParsedQuery* lpq,
- const MatchExpressionParser::WhereCallback& whereCallback,
- MatchExpression* root);
-
- std::unique_ptr<LiteParsedQuery> _pq;
-
- // _root points into _pq->getFilter()
- std::unique_ptr<MatchExpression> _root;
-
- std::unique_ptr<ParsedProjection> _proj;
- };
+class CanonicalQuery {
+public:
+ /**
+ * Caller owns the pointer in 'out' if any call to canonicalize returns Status::OK().
+ *
+ * Used for legacy find through the OP_QUERY message.
+ */
+ static Status canonicalize(const QueryMessage& qm,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ /**
+ * Takes ownership of 'lpq'.
+ *
+ * Caller owns the pointer in 'out' if any call to canonicalize returns Status::OK().
+ *
+ * Used for finds using the find command path.
+ */
+ static Status canonicalize(LiteParsedQuery* lpq,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ /**
+ * For testing or for internal clients to use.
+ */
+
+ /**
+ * Used for creating sub-queries from an existing CanonicalQuery.
+ *
+ * 'root' must be an expression in baseQuery.root().
+ *
+ * Does not take ownership of 'root'.
+ */
+ static Status canonicalize(const CanonicalQuery& baseQuery,
+ MatchExpression* root,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ bool explain,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ long long skip,
+ long long limit,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot,
+ bool explain,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ /**
+ * Returns true if "query" describes an exact-match query on _id, possibly with
+ * the $isolated/$atomic modifier.
+ */
+ static bool isSimpleIdQuery(const BSONObj& query);
+
+ // What namespace is this query over?
+ const std::string& ns() const {
+ return _pq->ns();
+ }
+
+ //
+ // Accessors for the query
+ //
+ MatchExpression* root() const {
+ return _root.get();
+ }
+ BSONObj getQueryObj() const {
+ return _pq->getFilter();
+ }
+ const LiteParsedQuery& getParsed() const {
+ return *_pq;
+ }
+ const ParsedProjection* getProj() const {
+ return _proj.get();
+ }
+
+ // Debugging
+ std::string toString() const;
+ std::string toStringShort() const;
+
+ /**
+ * Validates match expression, checking for certain
+ * combinations of operators in match expression and
+ * query options in LiteParsedQuery.
+ * Since 'root' is derived from 'filter' in LiteParsedQuery,
+ * 'filter' is not validated.
+ *
+ * TODO: Move this to query_validator.cpp
+ */
+ static Status isValid(MatchExpression* root, const LiteParsedQuery& parsed);
+
+ /**
+ * Returns the normalized version of the subtree rooted at 'root'.
+ *
+ * Takes ownership of 'root'.
+ */
+ static MatchExpression* normalizeTree(MatchExpression* root);
+
+ /**
+ * Traverses expression tree post-order.
+ * Sorts children at each non-leaf node by (MatchType, path(), children, number of children)
+ */
+ static void sortTree(MatchExpression* tree);
+
+ /**
+ * Returns a count of 'type' nodes in expression tree.
+ */
+ static size_t countNodes(const MatchExpression* root, MatchExpression::MatchType type);
+
+ /**
+ * Takes ownership of 'tree'. Performs some rewriting of the query to a logically
+ * equivalent but more digestible form.
+ *
+ * TODO: This doesn't entirely belong here. Really we'd do this while exploring
+ * solutions in an enumeration setting but given the current lack of pruning
+ * while exploring the enumeration space we do it here.
+ */
+ static MatchExpression* logicalRewrite(MatchExpression* tree);
+
+private:
+ // You must go through canonicalize to create a CanonicalQuery.
+ CanonicalQuery() {}
+
+ /**
+ * Takes ownership of 'root' and 'lpq'.
+ */
+ Status init(LiteParsedQuery* lpq,
+ const MatchExpressionParser::WhereCallback& whereCallback,
+ MatchExpression* root);
+
+ std::unique_ptr<LiteParsedQuery> _pq;
+
+ // _root points into _pq->getFilter()
+ std::unique_ptr<MatchExpression> _root;
+
+ std::unique_ptr<ParsedProjection> _proj;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index d125cb692ab..d3fb9a55fd4 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -34,534 +34,507 @@
namespace mongo {
namespace {
- using std::string;
- using std::unique_ptr;
- using unittest::assertGet;
-
- static const char* ns = "somebogusns";
-
- /**
- * Helper function to parse the given BSON object as a MatchExpression, checks the status,
- * and return the MatchExpression*.
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- if (!status.isOK()) {
- mongoutils::str::stream ss;
- ss << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString();
- FAIL(ss);
- }
- return status.getValue();
- }
+using std::string;
+using std::unique_ptr;
+using unittest::assertGet;
- /**
- * Helper function which parses and normalizes 'queryStr', and returns whether the given
- * (expression tree, lite parsed query) tuple passes CanonicalQuery::isValid().
- * Returns Status::OK() if the tuple is valid, else returns an error Status.
- */
- Status isValid(const std::string& queryStr, const LiteParsedQuery& lpqRaw) {
- BSONObj queryObj = fromjson(queryStr);
- std::unique_ptr<MatchExpression> me(
- CanonicalQuery::normalizeTree(parseMatchExpression(queryObj)));
- return CanonicalQuery::isValid(me.get(), lpqRaw);
- }
+static const char* ns = "somebogusns";
- void assertEquivalent(const char* queryStr,
- const MatchExpression* expected,
- const MatchExpression* actual) {
- if (actual->equivalent(expected)) {
- return;
- }
- mongoutils::str::stream ss;
- ss << "Match expressions are not equivalent."
- << "\nOriginal query: " << queryStr
- << "\nExpected: " << expected->toString()
- << "\nActual: " << actual->toString();
- FAIL(ss);
- }
-
- void assertNotEquivalent(const char* queryStr,
- const MatchExpression* expected,
- const MatchExpression* actual) {
- if (!actual->equivalent(expected)) {
- return;
- }
+/**
+ * Helper function to parse the given BSON object as a MatchExpression, checks the status,
+ * and return the MatchExpression*.
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ if (!status.isOK()) {
mongoutils::str::stream ss;
- ss << "Match expressions are equivalent."
- << "\nOriginal query: " << queryStr
- << "\nExpected: " << expected->toString()
- << "\nActual: " << actual->toString();
+ ss << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString();
FAIL(ss);
}
+ return status.getValue();
+}
-
- TEST(CanonicalQueryTest, IsValidText) {
- // Passes in default values for LiteParsedQuery.
- // Filter inside LiteParsedQuery is not used.
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Valid: regular TEXT.
- ASSERT_OK(isValid("{$text: {$search: 's'}}", *lpq));
-
- // Valid: TEXT inside OR.
- ASSERT_OK(isValid(
- "{$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- "]}",
- *lpq
- ));
-
- // Valid: TEXT outside NOR.
- ASSERT_OK(isValid("{$text: {$search: 's'}, $nor: [{a: 1}, {b: 1}]}", *lpq));
-
- // Invalid: TEXT inside NOR.
- ASSERT_NOT_OK(isValid("{$nor: [{$text: {$search: 's'}}, {a: 1}]}", *lpq));
-
- // Invalid: TEXT inside NOR.
- ASSERT_NOT_OK(isValid(
- "{$nor: ["
- " {$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ]},"
- " {a: 2}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 TEXT.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$text: {$search: 's'}},"
- " {$text: {$search: 't'}}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 TEXT.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ]},"
- " {$or: ["
- " {$text: {$search: 't'}},"
- " {b: 1}"
- " ]}"
- "]}",
- *lpq
- ));
- }
-
- TEST(CanonicalQueryTest, IsValidGeo) {
- // Passes in default values for LiteParsedQuery.
- // Filter inside LiteParsedQuery is not used.
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Valid: regular GEO_NEAR.
- ASSERT_OK(isValid("{a: {$near: [0, 0]}}", *lpq));
-
- // Valid: GEO_NEAR inside nested AND.
- ASSERT_OK(isValid(
- "{$and: ["
- " {$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- " ]},"
- " {c: 1}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: {$near: [0, 0]}}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {a: {$geoNear: [0, 0]}},"
- " {b: {$near: [0, 0]}}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- " ]},"
- " {$and: ["
- " {c: {$near: [0, 0]}},"
- " {d: 1}"
- " ]}"
- "]}",
- *lpq
- ));
-
- // Invalid: GEO_NEAR inside NOR.
- ASSERT_NOT_OK(isValid(
- "{$nor: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- "]}",
- *lpq
- ));
-
- // Invalid: GEO_NEAR inside OR.
- ASSERT_NOT_OK(isValid(
- "{$or: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- "]}",
- *lpq
- ));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndGeo) {
- // Passes in default values for LiteParsedQuery.
- // Filter inside LiteParsedQuery is not used.
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$near: [0, 0]}}", *lpq));
-
- // Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$geoNear: [0, 0]}}", *lpq));
-
- // Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ],"
- " b: {$near: [0, 0]}}",
- *lpq
- ));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndNaturalAscending) {
- // Passes in default values for LiteParsedQuery except for sort order.
- // Filter inside LiteParsedQuery is not used.
- BSONObj sort = fromjson("{$natural: 1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- sort,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: TEXT and {$natural: 1} sort order.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndNaturalDescending) {
- // Passes in default values for LiteParsedQuery except for sort order.
- // Filter inside LiteParsedQuery is not used.
- BSONObj sort = fromjson("{$natural: -1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- sort,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: TEXT and {$natural: -1} sort order.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndHint) {
- // Passes in default values for LiteParsedQuery except for hint.
- // Filter inside LiteParsedQuery is not used.
- BSONObj hint = fromjson("{a: 1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- hint,
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: TEXT and {$natural: -1} sort order.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
- }
-
- // SERVER-14366
- TEST(CanonicalQueryTest, IsValidGeoNearNaturalSort) {
- // Passes in default values for LiteParsedQuery except for sort order.
- // Filter inside LiteParsedQuery is not used.
- BSONObj sort = fromjson("{$natural: 1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- sort,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: GEO_NEAR and {$natural: 1} sort order.
- ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}",
- *lpq));
- }
-
- // SERVER-14366
- TEST(CanonicalQueryTest, IsValidGeoNearNaturalHint) {
- // Passes in default values for LiteParsedQuery except for the hint.
- // Filter inside LiteParsedQuery is not used.
- BSONObj hint = fromjson("{$natural: 1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- hint,
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: GEO_NEAR and {$natural: 1} hint.
- ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}",
- *lpq));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndSnapshot) {
- // Passes in default values for LiteParsedQuery except for snapshot.
- // Filter inside LiteParsedQuery is not used.
- bool snapshot = true;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- snapshot,
- false))); // explain
-
- // Invalid: TEXT and snapshot.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
- }
-
- //
- // Tests for CanonicalQuery::sortTree
- //
-
- /**
- * Helper function for testing CanonicalQuery::sortTree().
- *
- * Verifies that sorting the expression 'unsortedQueryStr' yields an expression equivalent to
- * the expression 'sortedQueryStr'.
- */
- void testSortTree(const char* unsortedQueryStr, const char* sortedQueryStr) {
- BSONObj unsortedQueryObj = fromjson(unsortedQueryStr);
- unique_ptr<MatchExpression> unsortedQueryExpr(parseMatchExpression(unsortedQueryObj));
-
- BSONObj sortedQueryObj = fromjson(sortedQueryStr);
- unique_ptr<MatchExpression> sortedQueryExpr(parseMatchExpression(sortedQueryObj));
-
- // Sanity check that the unsorted expression is not equivalent to the sorted expression.
- assertNotEquivalent(unsortedQueryStr, unsortedQueryExpr.get(), sortedQueryExpr.get());
-
- // Sanity check that sorting the sorted expression is a no-op.
- {
- unique_ptr<MatchExpression> sortedQueryExprClone(parseMatchExpression(sortedQueryObj));
- CanonicalQuery::sortTree(sortedQueryExprClone.get());
- assertEquivalent(unsortedQueryStr, sortedQueryExpr.get(), sortedQueryExprClone.get());
- }
-
- // Test that sorting the unsorted expression yields the sorted expression.
- CanonicalQuery::sortTree(unsortedQueryExpr.get());
- assertEquivalent(unsortedQueryStr, unsortedQueryExpr.get(), sortedQueryExpr.get());
- }
-
- // Test that an EQ expression sorts before a GT expression.
- TEST(CanonicalQueryTest, SortTreeMatchTypeComparison) {
- testSortTree("{a: {$gt: 1}, a: 1}", "{a: 1, a: {$gt: 1}}");
- }
-
- // Test that an EQ expression on path "a" sorts before an EQ expression on path "b".
- TEST(CanonicalQueryTest, SortTreePathComparison) {
- testSortTree("{b: 1, a: 1}", "{a: 1, b: 1}");
- testSortTree("{'a.b': 1, a: 1}", "{a: 1, 'a.b': 1}");
- testSortTree("{'a.c': 1, 'a.b': 1}", "{'a.b': 1, 'a.c': 1}");
- }
-
- // Test that AND expressions sort according to their first differing child.
- TEST(CanonicalQueryTest, SortTreeChildComparison) {
- testSortTree("{$or: [{a: 1, c: 1}, {a: 1, b: 1}]}", "{$or: [{a: 1, b: 1}, {a: 1, c: 1}]}");
- }
-
- // Test that an AND with 2 children sorts before an AND with 3 children, if the first 2 children
- // are equivalent in both.
- TEST(CanonicalQueryTest, SortTreeNumChildrenComparison) {
- testSortTree("{$or: [{a: 1, b: 1, c: 1}, {a: 1, b: 1}]}",
- "{$or: [{a: 1, b: 1}, {a: 1, b: 1, c: 1}]}");
+/**
+ * Helper function which parses and normalizes 'queryStr', and returns whether the given
+ * (expression tree, lite parsed query) tuple passes CanonicalQuery::isValid().
+ * Returns Status::OK() if the tuple is valid, else returns an error Status.
+ */
+Status isValid(const std::string& queryStr, const LiteParsedQuery& lpqRaw) {
+ BSONObj queryObj = fromjson(queryStr);
+ std::unique_ptr<MatchExpression> me(
+ CanonicalQuery::normalizeTree(parseMatchExpression(queryObj)));
+ return CanonicalQuery::isValid(me.get(), lpqRaw);
+}
+
+void assertEquivalent(const char* queryStr,
+ const MatchExpression* expected,
+ const MatchExpression* actual) {
+ if (actual->equivalent(expected)) {
+ return;
}
-
- //
- // Tests for CanonicalQuery::logicalRewrite
- //
-
- /**
- * Utility function to create a CanonicalQuery
- */
- CanonicalQuery* canonicalize(const char* queryStr) {
- BSONObj queryObj = fromjson(queryStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
- ASSERT_OK(result);
- return cq;
+ mongoutils::str::stream ss;
+ ss << "Match expressions are not equivalent."
+ << "\nOriginal query: " << queryStr << "\nExpected: " << expected->toString()
+ << "\nActual: " << actual->toString();
+ FAIL(ss);
+}
+
+void assertNotEquivalent(const char* queryStr,
+ const MatchExpression* expected,
+ const MatchExpression* actual) {
+ if (!actual->equivalent(expected)) {
+ return;
}
+ mongoutils::str::stream ss;
+ ss << "Match expressions are equivalent."
+ << "\nOriginal query: " << queryStr << "\nExpected: " << expected->toString()
+ << "\nActual: " << actual->toString();
+ FAIL(ss);
+}
+
+
+TEST(CanonicalQueryTest, IsValidText) {
+ // Passes in default values for LiteParsedQuery.
+ // Filter inside LiteParsedQuery is not used.
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Valid: regular TEXT.
+ ASSERT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+
+ // Valid: TEXT inside OR.
+ ASSERT_OK(isValid(
+ "{$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ "]}",
+ *lpq));
+
+ // Valid: TEXT outside NOR.
+ ASSERT_OK(isValid("{$text: {$search: 's'}, $nor: [{a: 1}, {b: 1}]}", *lpq));
+
+ // Invalid: TEXT inside NOR.
+ ASSERT_NOT_OK(isValid("{$nor: [{$text: {$search: 's'}}, {a: 1}]}", *lpq));
+
+ // Invalid: TEXT inside NOR.
+ ASSERT_NOT_OK(isValid(
+ "{$nor: ["
+ " {$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ]},"
+ " {a: 2}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 TEXT.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {$text: {$search: 's'}},"
+ " {$text: {$search: 't'}}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 TEXT.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ]},"
+ " {$or: ["
+ " {$text: {$search: 't'}},"
+ " {b: 1}"
+ " ]}"
+ "]}",
+ *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidGeo) {
+ // Passes in default values for LiteParsedQuery.
+ // Filter inside LiteParsedQuery is not used.
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Valid: regular GEO_NEAR.
+ ASSERT_OK(isValid("{a: {$near: [0, 0]}}", *lpq));
+
+ // Valid: GEO_NEAR inside nested AND.
+ ASSERT_OK(isValid(
+ "{$and: ["
+ " {$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ " ]},"
+ " {c: 1}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 GEO_NEAR.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: {$near: [0, 0]}}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 GEO_NEAR.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {a: {$geoNear: [0, 0]}},"
+ " {b: {$near: [0, 0]}}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 GEO_NEAR.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ " ]},"
+ " {$and: ["
+ " {c: {$near: [0, 0]}},"
+ " {d: 1}"
+ " ]}"
+ "]}",
+ *lpq));
+
+ // Invalid: GEO_NEAR inside NOR.
+ ASSERT_NOT_OK(isValid(
+ "{$nor: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ "]}",
+ *lpq));
+
+ // Invalid: GEO_NEAR inside OR.
+ ASSERT_NOT_OK(isValid(
+ "{$or: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ "]}",
+ *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndGeo) {
+ // Passes in default values for LiteParsedQuery.
+ // Filter inside LiteParsedQuery is not used.
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: TEXT and GEO_NEAR.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$near: [0, 0]}}", *lpq));
+
+ // Invalid: TEXT and GEO_NEAR.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$geoNear: [0, 0]}}", *lpq));
+
+ // Invalid: TEXT and GEO_NEAR.
+ ASSERT_NOT_OK(isValid(
+ "{$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ],"
+ " b: {$near: [0, 0]}}",
+ *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndNaturalAscending) {
+ // Passes in default values for LiteParsedQuery except for sort order.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj sort = fromjson("{$natural: 1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ sort,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: TEXT and {$natural: 1} sort order.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndNaturalDescending) {
+ // Passes in default values for LiteParsedQuery except for sort order.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj sort = fromjson("{$natural: -1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ sort,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: TEXT and {$natural: -1} sort order.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndHint) {
+ // Passes in default values for LiteParsedQuery except for hint.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj hint = fromjson("{a: 1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ hint,
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: TEXT and {$natural: -1} sort order.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+}
+
+// SERVER-14366
+TEST(CanonicalQueryTest, IsValidGeoNearNaturalSort) {
+ // Passes in default values for LiteParsedQuery except for sort order.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj sort = fromjson("{$natural: 1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ sort,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: GEO_NEAR and {$natural: 1} sort order.
+ ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}", *lpq));
+}
+
+// SERVER-14366
+TEST(CanonicalQueryTest, IsValidGeoNearNaturalHint) {
+ // Passes in default values for LiteParsedQuery except for the hint.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj hint = fromjson("{$natural: 1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ hint,
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: GEO_NEAR and {$natural: 1} hint.
+ ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}", *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndSnapshot) {
+ // Passes in default values for LiteParsedQuery except for snapshot.
+ // Filter inside LiteParsedQuery is not used.
+ bool snapshot = true;
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ snapshot,
+ false))); // explain
+
+ // Invalid: TEXT and snapshot.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+}
+
+//
+// Tests for CanonicalQuery::sortTree
+//
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
+/**
+ * Helper function for testing CanonicalQuery::sortTree().
+ *
+ * Verifies that sorting the expression 'unsortedQueryStr' yields an expression equivalent to
+ * the expression 'sortedQueryStr'.
+ */
+void testSortTree(const char* unsortedQueryStr, const char* sortedQueryStr) {
+ BSONObj unsortedQueryObj = fromjson(unsortedQueryStr);
+ unique_ptr<MatchExpression> unsortedQueryExpr(parseMatchExpression(unsortedQueryObj));
- // Don't do anything with a double OR.
- TEST(CanonicalQueryTest, RewriteNoDoubleOr) {
- string queryStr = "{$or:[{a:1}, {b:1}], $or:[{c:1}, {d:1}], e:1}";
- BSONObj queryObj = fromjson(queryStr);
- unique_ptr<MatchExpression> base(parseMatchExpression(queryObj));
- unique_ptr<MatchExpression> rewrite(CanonicalQuery::logicalRewrite(base->shallowClone()));
- assertEquivalent(queryStr.c_str(), base.get(), rewrite.get());
- }
+ BSONObj sortedQueryObj = fromjson(sortedQueryStr);
+ unique_ptr<MatchExpression> sortedQueryExpr(parseMatchExpression(sortedQueryObj));
- // Do something with a single or.
- TEST(CanonicalQueryTest, RewriteSingleOr) {
- // Rewrite of this...
- string queryStr = "{$or:[{a:1}, {b:1}], e:1}";
- BSONObj queryObj = fromjson(queryStr);
- unique_ptr<MatchExpression> rewrite(CanonicalQuery::logicalRewrite(parseMatchExpression(queryObj)));
-
- // Should look like this.
- string rewriteStr = "{$or:[{a:1, e:1}, {b:1, e:1}]}";
- BSONObj rewriteObj = fromjson(rewriteStr);
- unique_ptr<MatchExpression> base(parseMatchExpression(rewriteObj));
- assertEquivalent(queryStr.c_str(), base.get(), rewrite.get());
- }
+ // Sanity check that the unsorted expression is not equivalent to the sorted expression.
+ assertNotEquivalent(unsortedQueryStr, unsortedQueryExpr.get(), sortedQueryExpr.get());
- /**
- * Test function for CanonicalQuery::normalize.
- */
- void testNormalizeQuery(const char* queryStr, const char* expectedExprStr) {
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
- MatchExpression* me = cq->root();
- BSONObj expectedExprObj = fromjson(expectedExprStr);
- unique_ptr<MatchExpression> expectedExpr(parseMatchExpression(expectedExprObj));
- assertEquivalent(queryStr, expectedExpr.get(), me);
+ // Sanity check that sorting the sorted expression is a no-op.
+ {
+ unique_ptr<MatchExpression> sortedQueryExprClone(parseMatchExpression(sortedQueryObj));
+ CanonicalQuery::sortTree(sortedQueryExprClone.get());
+ assertEquivalent(unsortedQueryStr, sortedQueryExpr.get(), sortedQueryExprClone.get());
}
- TEST(CanonicalQueryTest, NormalizeQuerySort) {
- // Field names
- testNormalizeQuery("{b: 1, a: 1}", "{a: 1, b: 1}");
- // Operator types
- testNormalizeQuery("{a: {$gt: 5}, a: {$lt: 10}}}", "{a: {$lt: 10}, a: {$gt: 5}}");
- // Nested queries
- testNormalizeQuery("{a: {$elemMatch: {c: 1, b:1}}}",
- "{a: {$elemMatch: {b: 1, c:1}}}");
- }
+ // Test that sorting the unsorted expression yields the sorted expression.
+ CanonicalQuery::sortTree(unsortedQueryExpr.get());
+ assertEquivalent(unsortedQueryStr, unsortedQueryExpr.get(), sortedQueryExpr.get());
+}
+
+// Test that an EQ expression sorts before a GT expression.
+TEST(CanonicalQueryTest, SortTreeMatchTypeComparison) {
+ testSortTree("{a: {$gt: 1}, a: 1}", "{a: 1, a: {$gt: 1}}");
+}
+
+// Test that an EQ expression on path "a" sorts before an EQ expression on path "b".
+TEST(CanonicalQueryTest, SortTreePathComparison) {
+ testSortTree("{b: 1, a: 1}", "{a: 1, b: 1}");
+ testSortTree("{'a.b': 1, a: 1}", "{a: 1, 'a.b': 1}");
+ testSortTree("{'a.c': 1, 'a.b': 1}", "{'a.b': 1, 'a.c': 1}");
+}
+
+// Test that AND expressions sort according to their first differing child.
+TEST(CanonicalQueryTest, SortTreeChildComparison) {
+ testSortTree("{$or: [{a: 1, c: 1}, {a: 1, b: 1}]}", "{$or: [{a: 1, b: 1}, {a: 1, c: 1}]}");
+}
+
+// Test that an AND with 2 children sorts before an AND with 3 children, if the first 2 children
+// are equivalent in both.
+TEST(CanonicalQueryTest, SortTreeNumChildrenComparison) {
+ testSortTree("{$or: [{a: 1, b: 1, c: 1}, {a: 1, b: 1}]}",
+ "{$or: [{a: 1, b: 1}, {a: 1, b: 1, c: 1}]}");
+}
+
+//
+// Tests for CanonicalQuery::logicalRewrite
+//
- TEST(CanonicalQueryTest, NormalizeQueryTree) {
- // Single-child $or elimination.
- testNormalizeQuery("{$or: [{b: 1}]}", "{b: 1}");
- // Single-child $and elimination.
- testNormalizeQuery("{$or: [{$and: [{a: 1}]}, {b: 1}]}", "{$or: [{a: 1}, {b: 1}]}");
- // $or absorbs $or children.
- testNormalizeQuery("{$or: [{a: 1}, {$or: [{b: 1}, {$or: [{c: 1}]}]}, {d: 1}]}",
- "{$or: [{a: 1}, {b: 1}, {c: 1}, {d: 1}]}");
- // $and absorbs $and children.
- testNormalizeQuery("{$and: [{$and: [{a: 1}, {b: 1}]}, {c: 1}]}",
- "{$and: [{a: 1}, {b: 1}, {c: 1}]}");
- }
+/**
+ * Utility function to create a CanonicalQuery
+ */
+CanonicalQuery* canonicalize(const char* queryStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr, const char* projStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+// Don't do anything with a double OR.
+TEST(CanonicalQueryTest, RewriteNoDoubleOr) {
+ string queryStr = "{$or:[{a:1}, {b:1}], $or:[{c:1}, {d:1}], e:1}";
+ BSONObj queryObj = fromjson(queryStr);
+ unique_ptr<MatchExpression> base(parseMatchExpression(queryObj));
+ unique_ptr<MatchExpression> rewrite(CanonicalQuery::logicalRewrite(base->shallowClone()));
+ assertEquivalent(queryStr.c_str(), base.get(), rewrite.get());
+}
+
+// Do something with a single or.
+TEST(CanonicalQueryTest, RewriteSingleOr) {
+ // Rewrite of this...
+ string queryStr = "{$or:[{a:1}, {b:1}], e:1}";
+ BSONObj queryObj = fromjson(queryStr);
+ unique_ptr<MatchExpression> rewrite(
+ CanonicalQuery::logicalRewrite(parseMatchExpression(queryObj)));
+
+ // Should look like this.
+ string rewriteStr = "{$or:[{a:1, e:1}, {b:1, e:1}]}";
+ BSONObj rewriteObj = fromjson(rewriteStr);
+ unique_ptr<MatchExpression> base(parseMatchExpression(rewriteObj));
+ assertEquivalent(queryStr.c_str(), base.get(), rewrite.get());
+}
-} // namespace
-} // namespace mongo
+/**
+ * Test function for CanonicalQuery::normalize.
+ */
+void testNormalizeQuery(const char* queryStr, const char* expectedExprStr) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
+ MatchExpression* me = cq->root();
+ BSONObj expectedExprObj = fromjson(expectedExprStr);
+ unique_ptr<MatchExpression> expectedExpr(parseMatchExpression(expectedExprObj));
+ assertEquivalent(queryStr, expectedExpr.get(), me);
+}
+
+TEST(CanonicalQueryTest, NormalizeQuerySort) {
+ // Field names
+ testNormalizeQuery("{b: 1, a: 1}", "{a: 1, b: 1}");
+ // Operator types
+ testNormalizeQuery("{a: {$gt: 5}, a: {$lt: 10}}}", "{a: {$lt: 10}, a: {$gt: 5}}");
+ // Nested queries
+ testNormalizeQuery("{a: {$elemMatch: {c: 1, b:1}}}", "{a: {$elemMatch: {b: 1, c:1}}}");
+}
+
+TEST(CanonicalQueryTest, NormalizeQueryTree) {
+ // Single-child $or elimination.
+ testNormalizeQuery("{$or: [{b: 1}]}", "{b: 1}");
+ // Single-child $and elimination.
+ testNormalizeQuery("{$or: [{$and: [{a: 1}]}, {b: 1}]}", "{$or: [{a: 1}, {b: 1}]}");
+ // $or absorbs $or children.
+ testNormalizeQuery("{$or: [{a: 1}, {$or: [{b: 1}, {$or: [{c: 1}]}]}, {d: 1}]}",
+ "{$or: [{a: 1}, {b: 1}, {c: 1}, {d: 1}]}");
+ // $and absorbs $and children.
+ testNormalizeQuery("{$and: [{$and: [{a: 1}, {b: 1}]}, {c: 1}]}",
+ "{$and: [{a: 1}, {b: 1}, {c: 1}]}");
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/count_request.cpp b/src/mongo/db/query/count_request.cpp
index a587586590f..8541a5cc12b 100644
--- a/src/mongo/db/query/count_request.cpp
+++ b/src/mongo/db/query/count_request.cpp
@@ -36,96 +36,90 @@
namespace mongo {
namespace {
- const char kCmdName[] = "count";
- const char kQueryField[] = "query";
- const char kLimitField[] = "limit";
- const char kSkipField[] = "skip";
- const char kHintField[] = "hint";
+const char kCmdName[] = "count";
+const char kQueryField[] = "query";
+const char kLimitField[] = "limit";
+const char kSkipField[] = "skip";
+const char kHintField[] = "hint";
-} // namespace
+} // namespace
- CountRequest::CountRequest(const std::string& fullNs, BSONObj query)
- : _fullNs(fullNs),
- _query(query.getOwned()) {
- }
+CountRequest::CountRequest(const std::string& fullNs, BSONObj query)
+ : _fullNs(fullNs), _query(query.getOwned()) {}
- void CountRequest::setHint(BSONObj hint) {
- _hint = hint.getOwned();
- }
+void CountRequest::setHint(BSONObj hint) {
+ _hint = hint.getOwned();
+}
- BSONObj CountRequest::toBSON() const {
- BSONObjBuilder builder;
+BSONObj CountRequest::toBSON() const {
+ BSONObjBuilder builder;
- builder.append(kCmdName, _fullNs);
- builder.append(kQueryField, _query);
+ builder.append(kCmdName, _fullNs);
+ builder.append(kQueryField, _query);
- if (_limit) {
- builder.append(kLimitField, _limit.get());
- }
-
- if (_skip) {
- builder.append(kSkipField, _skip.get());
- }
-
- if (_hint) {
- builder.append(kHintField, _hint.get());
- }
+ if (_limit) {
+ builder.append(kLimitField, _limit.get());
+ }
- return builder.obj();
+ if (_skip) {
+ builder.append(kSkipField, _skip.get());
}
- StatusWith<CountRequest> CountRequest::parseFromBSON(const std::string& dbname,
- const BSONObj& cmdObj) {
+ if (_hint) {
+ builder.append(kHintField, _hint.get());
+ }
- BSONElement firstElt = cmdObj.firstElement();
- const std::string coll = (firstElt.type() == BSONType::String) ? firstElt.str() : "";
+ return builder.obj();
+}
- const std::string ns = str::stream() << dbname << "." << coll;
- if (!nsIsFull(ns)) {
- return Status(ErrorCodes::BadValue, "invalid collection name");
- }
+StatusWith<CountRequest> CountRequest::parseFromBSON(const std::string& dbname,
+ const BSONObj& cmdObj) {
+ BSONElement firstElt = cmdObj.firstElement();
+ const std::string coll = (firstElt.type() == BSONType::String) ? firstElt.str() : "";
- // We don't validate that "query" is a nested object due to SERVER-15456.
- CountRequest request(ns, cmdObj.getObjectField(kQueryField));
+ const std::string ns = str::stream() << dbname << "." << coll;
+ if (!nsIsFull(ns)) {
+ return Status(ErrorCodes::BadValue, "invalid collection name");
+ }
- // Limit
- if (cmdObj[kLimitField].isNumber()) {
- long long limit = cmdObj[kLimitField].numberLong();
+ // We don't validate that "query" is a nested object due to SERVER-15456.
+ CountRequest request(ns, cmdObj.getObjectField(kQueryField));
- // For counts, limit and -limit mean the same thing.
- if (limit < 0) {
- limit = -limit;
- }
+ // Limit
+ if (cmdObj[kLimitField].isNumber()) {
+ long long limit = cmdObj[kLimitField].numberLong();
- request.setLimit(limit);
- }
- else if (cmdObj[kLimitField].ok()) {
- return Status(ErrorCodes::BadValue, "limit value is not a valid number");
+ // For counts, limit and -limit mean the same thing.
+ if (limit < 0) {
+ limit = -limit;
}
- // Skip
- if (cmdObj[kSkipField].isNumber()) {
- long long skip = cmdObj[kSkipField].numberLong();
- if (skip < 0) {
- return Status(ErrorCodes::BadValue, "skip value is negative in count query");
- }
+ request.setLimit(limit);
+ } else if (cmdObj[kLimitField].ok()) {
+ return Status(ErrorCodes::BadValue, "limit value is not a valid number");
+ }
- request.setSkip(skip);
- }
- else if (cmdObj[kSkipField].ok()) {
- return Status(ErrorCodes::BadValue, "skip value is not a valid number");
+ // Skip
+ if (cmdObj[kSkipField].isNumber()) {
+ long long skip = cmdObj[kSkipField].numberLong();
+ if (skip < 0) {
+ return Status(ErrorCodes::BadValue, "skip value is negative in count query");
}
- // Hint
- if (Object == cmdObj[kHintField].type()) {
- request.setHint(cmdObj[kHintField].Obj());
- }
- else if (String == cmdObj[kHintField].type()) {
- const std::string hint = cmdObj.getStringField(kHintField);
- request.setHint(BSON("$hint" << hint));
- }
+ request.setSkip(skip);
+ } else if (cmdObj[kSkipField].ok()) {
+ return Status(ErrorCodes::BadValue, "skip value is not a valid number");
+ }
- return request;
+ // Hint
+ if (Object == cmdObj[kHintField].type()) {
+ request.setHint(cmdObj[kHintField].Obj());
+ } else if (String == cmdObj[kHintField].type()) {
+ const std::string hint = cmdObj.getStringField(kHintField);
+ request.setHint(BSON("$hint" << hint));
}
-} // namespace mongo
+ return request;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/count_request.h b/src/mongo/db/query/count_request.h
index 0e9eb25ee30..42b654a7896 100644
--- a/src/mongo/db/query/count_request.h
+++ b/src/mongo/db/query/count_request.h
@@ -36,59 +36,72 @@
namespace mongo {
- template<typename T> class StatusWith;
+template <typename T>
+class StatusWith;
+/**
+ * A description of a request for a count operation. Copyable.
+ */
+class CountRequest {
+public:
/**
- * A description of a request for a count operation. Copyable.
+ * Construct an empty request.
*/
- class CountRequest {
- public:
-
- /**
- * Construct an empty request.
- */
- CountRequest(const std::string& fullNs, BSONObj query);
-
- const std::string& getNs() const { return _fullNs; }
- const BSONObj getQuery() const { return _query; }
-
- long long getLimit() const { return _limit.value_or(0); }
- void setLimit(long long limit) { _limit = limit; }
-
- long long getSkip() const { return _skip.value_or(0); }
- void setSkip(long long skip) { _skip = skip; }
+ CountRequest(const std::string& fullNs, BSONObj query);
+
+ const std::string& getNs() const {
+ return _fullNs;
+ }
+ const BSONObj getQuery() const {
+ return _query;
+ }
+
+ long long getLimit() const {
+ return _limit.value_or(0);
+ }
+ void setLimit(long long limit) {
+ _limit = limit;
+ }
+
+ long long getSkip() const {
+ return _skip.value_or(0);
+ }
+ void setSkip(long long skip) {
+ _skip = skip;
+ }
+
+ const BSONObj getHint() const {
+ return _hint.value_or(BSONObj());
+ }
+ void setHint(BSONObj hint);
- const BSONObj getHint() const { return _hint.value_or(BSONObj()); }
- void setHint(BSONObj hint);
-
- /**
- * Constructs a BSON representation of this request, which can be used for sending it in
- * commands.
- */
- BSONObj toBSON() const;
+ /**
+ * Constructs a BSON representation of this request, which can be used for sending it in
+ * commands.
+ */
+ BSONObj toBSON() const;
- /**
- * Construct a CountRequest from the command specification and db name.
- */
- static StatusWith<CountRequest> parseFromBSON(const std::string& dbname,
- const BSONObj& cmdObj);
+ /**
+ * Construct a CountRequest from the command specification and db name.
+ */
+ static StatusWith<CountRequest> parseFromBSON(const std::string& dbname, const BSONObj& cmdObj);
- private:
- // Namespace to operate on (e.g. "foo.bar").
- const std::string _fullNs;
+private:
+ // Namespace to operate on (e.g. "foo.bar").
+ const std::string _fullNs;
- // A predicate describing the set of documents to count.
- const BSONObj _query;
+ // A predicate describing the set of documents to count.
+ const BSONObj _query;
- // Optional. An integer limiting the number of documents to count.
- boost::optional<long long> _limit;
+ // Optional. An integer limiting the number of documents to count.
+ boost::optional<long long> _limit;
- // Optional. An integer indicating to not include the first n documents in the count.
- boost::optional<long long> _skip;
+ // Optional. An integer indicating to not include the first n documents in the count.
+ boost::optional<long long> _skip;
- // Optional. Indicates to the query planner that it should generate a count plan using a
- // particular index.
- boost::optional<BSONObj> _hint;
- };
+ // Optional. Indicates to the query planner that it should generate a count plan using a
+ // particular index.
+ boost::optional<BSONObj> _hint;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/count_request_test.cpp b/src/mongo/db/query/count_request_test.cpp
index 4f0546b4180..da68a2ce79f 100644
--- a/src/mongo/db/query/count_request_test.cpp
+++ b/src/mongo/db/query/count_request_test.cpp
@@ -36,98 +36,98 @@
namespace mongo {
namespace {
- TEST(CountRequest, ParseDefaults) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("count" << "TestColl" <<
- "query" << BSON("a" << BSON("$lte" << 10))));
-
- ASSERT_OK(countRequestStatus.getStatus());
-
- const CountRequest& countRequest = countRequestStatus.getValue();
-
- ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
-
- // Defaults
- ASSERT_EQUALS(countRequest.getLimit(), 0);
- ASSERT_EQUALS(countRequest.getSkip(), 0);
- ASSERT(countRequest.getHint().isEmpty());
- }
-
- TEST(CountRequest, ParseComplete) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("count" << "TestColl" <<
- "query" << BSON("a" << BSON("$gte" << 11)) <<
- "limit" << 100 <<
- "skip" << 1000 <<
- "hint" << BSON("b" << 5)));
-
- ASSERT_OK(countRequestStatus.getStatus());
-
- const CountRequest& countRequest = countRequestStatus.getValue();
-
- ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
- ASSERT_EQUALS(countRequest.getLimit(), 100);
- ASSERT_EQUALS(countRequest.getSkip(), 1000);
- ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
- }
-
- TEST(CountRequest, ParseNegativeLimit) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("count" << "TestColl" <<
- "query" << BSON("a" << BSON("$gte" << 11)) <<
- "limit" << -100 <<
- "skip" << 1000 <<
- "hint" << BSON("b" << 5)));
-
- ASSERT_OK(countRequestStatus.getStatus());
-
- const CountRequest& countRequest = countRequestStatus.getValue();
-
- ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
- ASSERT_EQUALS(countRequest.getLimit(), 100);
- ASSERT_EQUALS(countRequest.getSkip(), 1000);
- ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
- }
-
- TEST(CountRequest, FailParseMissingNS) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("query" << BSON("a" << BSON("$gte" << 11))));
-
- ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
- }
-
- TEST(CountRequest, FailParseBadSkipValue) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("count" << "TestColl" <<
- "query" << BSON("a" << BSON("$gte" << 11)) <<
- "skip" << -1000));
-
- ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
- }
-
- TEST(CountRequest, ToBSON) {
- CountRequest countRequest("TestDB.TestColl", BSON("a" << BSON("$gte" << 11)));
- countRequest.setLimit(100);
- countRequest.setSkip(1000);
- countRequest.setHint(BSON("b" << 5));
-
- BSONObj actualObj = countRequest.toBSON();
- BSONObj expectedObj(fromjson("{ count : 'TestDB.TestColl',"
- " query : { a : { '$gte' : 11 } },"
- " limit : 100,"
- " skip : 1000,"
- " hint : { b : 5 } }"));
-
- ASSERT_EQUALS(actualObj, expectedObj);
- }
-
-} // namespace
-} // namespace mongo
+TEST(CountRequest, ParseDefaults) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB",
+ BSON("count"
+ << "TestColl"
+ << "query" << BSON("a" << BSON("$lte" << 10))));
+
+ ASSERT_OK(countRequestStatus.getStatus());
+
+ const CountRequest& countRequest = countRequestStatus.getValue();
+
+ ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
+ ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
+
+ // Defaults
+ ASSERT_EQUALS(countRequest.getLimit(), 0);
+ ASSERT_EQUALS(countRequest.getSkip(), 0);
+ ASSERT(countRequest.getHint().isEmpty());
+}
+
+TEST(CountRequest, ParseComplete) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB",
+ BSON("count"
+ << "TestColl"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
+ << 100 << "skip" << 1000 << "hint" << BSON("b" << 5)));
+
+ ASSERT_OK(countRequestStatus.getStatus());
+
+ const CountRequest& countRequest = countRequestStatus.getValue();
+
+ ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
+ ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
+ ASSERT_EQUALS(countRequest.getLimit(), 100);
+ ASSERT_EQUALS(countRequest.getSkip(), 1000);
+ ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
+}
+
+TEST(CountRequest, ParseNegativeLimit) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB",
+ BSON("count"
+ << "TestColl"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
+ << -100 << "skip" << 1000 << "hint" << BSON("b" << 5)));
+
+ ASSERT_OK(countRequestStatus.getStatus());
+
+ const CountRequest& countRequest = countRequestStatus.getValue();
+
+ ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
+ ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
+ ASSERT_EQUALS(countRequest.getLimit(), 100);
+ ASSERT_EQUALS(countRequest.getSkip(), 1000);
+ ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
+}
+
+TEST(CountRequest, FailParseMissingNS) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB", BSON("query" << BSON("a" << BSON("$gte" << 11))));
+
+ ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
+}
+
+TEST(CountRequest, FailParseBadSkipValue) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB",
+ BSON("count"
+ << "TestColl"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "skip"
+ << -1000));
+
+ ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
+}
+
+TEST(CountRequest, ToBSON) {
+ CountRequest countRequest("TestDB.TestColl", BSON("a" << BSON("$gte" << 11)));
+ countRequest.setLimit(100);
+ countRequest.setSkip(1000);
+ countRequest.setHint(BSON("b" << 5));
+
+ BSONObj actualObj = countRequest.toBSON();
+ BSONObj expectedObj(fromjson(
+ "{ count : 'TestDB.TestColl',"
+ " query : { a : { '$gte' : 11 } },"
+ " limit : 100,"
+ " skip : 1000,"
+ " hint : { b : 5 } }"));
+
+ ASSERT_EQUALS(actualObj, expectedObj);
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/cursor_responses.cpp b/src/mongo/db/query/cursor_responses.cpp
index 1bd6be298e2..9165c498e37 100644
--- a/src/mongo/db/query/cursor_responses.cpp
+++ b/src/mongo/db/query/cursor_responses.cpp
@@ -34,26 +34,26 @@
namespace mongo {
- void appendCursorResponseObject(long long cursorId,
- StringData cursorNamespace,
- BSONArray firstBatch,
- BSONObjBuilder* builder) {
- BSONObjBuilder cursorObj(builder->subobjStart("cursor"));
- cursorObj.append("id", cursorId);
- cursorObj.append("ns", cursorNamespace);
- cursorObj.append("firstBatch", firstBatch);
- cursorObj.done();
- }
+void appendCursorResponseObject(long long cursorId,
+ StringData cursorNamespace,
+ BSONArray firstBatch,
+ BSONObjBuilder* builder) {
+ BSONObjBuilder cursorObj(builder->subobjStart("cursor"));
+ cursorObj.append("id", cursorId);
+ cursorObj.append("ns", cursorNamespace);
+ cursorObj.append("firstBatch", firstBatch);
+ cursorObj.done();
+}
- void appendGetMoreResponseObject(long long cursorId,
- StringData cursorNamespace,
- BSONArray nextBatch,
- BSONObjBuilder* builder) {
- BSONObjBuilder cursorObj(builder->subobjStart("cursor"));
- cursorObj.append("id", cursorId);
- cursorObj.append("ns", cursorNamespace);
- cursorObj.append("nextBatch", nextBatch);
- cursorObj.done();
- }
+void appendGetMoreResponseObject(long long cursorId,
+ StringData cursorNamespace,
+ BSONArray nextBatch,
+ BSONObjBuilder* builder) {
+ BSONObjBuilder cursorObj(builder->subobjStart("cursor"));
+ cursorObj.append("id", cursorId);
+ cursorObj.append("ns", cursorNamespace);
+ cursorObj.append("nextBatch", nextBatch);
+ cursorObj.done();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/cursor_responses.h b/src/mongo/db/query/cursor_responses.h
index 2c9fed3b610..06f2f268e63 100644
--- a/src/mongo/db/query/cursor_responses.h
+++ b/src/mongo/db/query/cursor_responses.h
@@ -30,36 +30,36 @@
namespace mongo {
- class BSONObjBuilder;
- class StringData;
- struct BSONArray;
+class BSONObjBuilder;
+class StringData;
+struct BSONArray;
- /**
- * Builds a cursor response object from the provided cursor identifiers and "firstBatch",
- * and appends the response object to the provided builder under the field name "cursor".
- * If the node is a member of a replSet, also appends the current term, primary, and
- * lastOp information.
- *
- * The response object has the following format:
- * { id: <NumberLong>, ns: <String>, firstBatch: <Array> }.
- */
- void appendCursorResponseObject(long long cursorId,
- StringData cursorNamespace,
- BSONArray firstBatch,
- BSONObjBuilder* builder);
+/**
+ * Builds a cursor response object from the provided cursor identifiers and "firstBatch",
+ * and appends the response object to the provided builder under the field name "cursor".
+ * If the node is a member of a replSet, also appends the current term, primary, and
+ * lastOp information.
+ *
+ * The response object has the following format:
+ * { id: <NumberLong>, ns: <String>, firstBatch: <Array> }.
+ */
+void appendCursorResponseObject(long long cursorId,
+ StringData cursorNamespace,
+ BSONArray firstBatch,
+ BSONObjBuilder* builder);
- /**
- * Builds a getMore response object from the provided cursor identifiers and "nextBatch",
- * and appends the response object to the provided builder under the field name "cursor".
- * If the node is a member of a replSet, also appends the current term, primary, and
- * lastOp information.
- *
- * The response object has the following format:
- * { id: <NumberLong>, ns: <String>, nextBatch: <Array> }.
- */
- void appendGetMoreResponseObject(long long cursorId,
- StringData cursorNamespace,
- BSONArray nextBatch,
- BSONObjBuilder* builder);
+/**
+ * Builds a getMore response object from the provided cursor identifiers and "nextBatch",
+ * and appends the response object to the provided builder under the field name "cursor".
+ * If the node is a member of a replSet, also appends the current term, primary, and
+ * lastOp information.
+ *
+ * The response object has the following format:
+ * { id: <NumberLong>, ns: <String>, nextBatch: <Array> }.
+ */
+void appendGetMoreResponseObject(long long cursorId,
+ StringData cursorNamespace,
+ BSONArray nextBatch,
+ BSONObjBuilder* builder);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 709fd07b28b..748e8af3b63 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -46,696 +46,659 @@
namespace {
- using namespace mongo;
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- /**
- * Traverse the tree rooted at 'root', and add all tree nodes into the list 'flattened'.
- */
- void flattenStatsTree(const PlanStageStats* root, vector<const PlanStageStats*>* flattened) {
- flattened->push_back(root);
- for (size_t i = 0; i < root->children.size(); ++i) {
- flattenStatsTree(root->children[i], flattened);
- }
+using namespace mongo;
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+/**
+ * Traverse the tree rooted at 'root', and add all tree nodes into the list 'flattened'.
+ */
+void flattenStatsTree(const PlanStageStats* root, vector<const PlanStageStats*>* flattened) {
+ flattened->push_back(root);
+ for (size_t i = 0; i < root->children.size(); ++i) {
+ flattenStatsTree(root->children[i], flattened);
}
+}
- /**
- * Traverse the tree rooted at 'root', and add all nodes into the list 'flattened'.
- */
- void flattenExecTree(const PlanStage* root, vector<const PlanStage*>* flattened) {
- flattened->push_back(root);
- vector<PlanStage*> children = root->getChildren();
- for (size_t i = 0; i < children.size(); ++i) {
- flattenExecTree(children[i], flattened);
- }
+/**
+ * Traverse the tree rooted at 'root', and add all nodes into the list 'flattened'.
+ */
+void flattenExecTree(const PlanStage* root, vector<const PlanStage*>* flattened) {
+ flattened->push_back(root);
+ vector<PlanStage*> children = root->getChildren();
+ for (size_t i = 0; i < children.size(); ++i) {
+ flattenExecTree(children[i], flattened);
}
+}
- /**
- * Get a pointer to the MultiPlanStage inside the stage tree rooted at 'root'.
- * Returns NULL if there is no MPS.
- */
- MultiPlanStage* getMultiPlanStage(PlanStage* root) {
- if (root->stageType() == STAGE_MULTI_PLAN) {
- MultiPlanStage* mps = static_cast<MultiPlanStage*>(root);
+/**
+ * Get a pointer to the MultiPlanStage inside the stage tree rooted at 'root'.
+ * Returns NULL if there is no MPS.
+ */
+MultiPlanStage* getMultiPlanStage(PlanStage* root) {
+ if (root->stageType() == STAGE_MULTI_PLAN) {
+ MultiPlanStage* mps = static_cast<MultiPlanStage*>(root);
+ return mps;
+ }
+
+ vector<PlanStage*> children = root->getChildren();
+ for (size_t i = 0; i < children.size(); i++) {
+ MultiPlanStage* mps = getMultiPlanStage(children[i]);
+ if (mps != NULL) {
return mps;
}
+ }
- vector<PlanStage*> children = root->getChildren();
- for (size_t i = 0; i < children.size(); i++) {
- MultiPlanStage* mps = getMultiPlanStage(children[i]);
- if (mps != NULL) {
- return mps;
- }
- }
+ return NULL;
+}
- return NULL;
+/**
+ * Given the SpecificStats object for a stage and the type of the stage, returns the
+ * number of index keys examined by the stage.
+ *
+ * This is used for getting the total number of keys examined by a plan. We need
+ * to collect a 'totalKeysExamined' metric for a regular explain (in which case this
+ * gets called from Explain::generateExecStats()) or for the slow query log / profiler
+ * (in which case this gets called from Explain::getSummaryStats()).
+ */
+size_t getKeysExamined(StageType type, const SpecificStats* specific) {
+ if (STAGE_IXSCAN == type) {
+ const IndexScanStats* spec = static_cast<const IndexScanStats*>(specific);
+ return spec->keysExamined;
+ } else if (STAGE_IDHACK == type) {
+ const IDHackStats* spec = static_cast<const IDHackStats*>(specific);
+ return spec->keysExamined;
+ } else if (STAGE_TEXT == type) {
+ const TextStats* spec = static_cast<const TextStats*>(specific);
+ return spec->keysExamined;
+ } else if (STAGE_COUNT_SCAN == type) {
+ const CountScanStats* spec = static_cast<const CountScanStats*>(specific);
+ return spec->keysExamined;
+ } else if (STAGE_DISTINCT_SCAN == type) {
+ const DistinctScanStats* spec = static_cast<const DistinctScanStats*>(specific);
+ return spec->keysExamined;
}
- /**
- * Given the SpecificStats object for a stage and the type of the stage, returns the
- * number of index keys examined by the stage.
- *
- * This is used for getting the total number of keys examined by a plan. We need
- * to collect a 'totalKeysExamined' metric for a regular explain (in which case this
- * gets called from Explain::generateExecStats()) or for the slow query log / profiler
- * (in which case this gets called from Explain::getSummaryStats()).
- */
- size_t getKeysExamined(StageType type, const SpecificStats* specific) {
- if (STAGE_IXSCAN == type) {
- const IndexScanStats* spec = static_cast<const IndexScanStats*>(specific);
- return spec->keysExamined;
- }
- else if (STAGE_IDHACK == type) {
- const IDHackStats* spec = static_cast<const IDHackStats*>(specific);
- return spec->keysExamined;
- }
- else if (STAGE_TEXT == type) {
- const TextStats* spec = static_cast<const TextStats*>(specific);
- return spec->keysExamined;
- }
- else if (STAGE_COUNT_SCAN == type) {
- const CountScanStats* spec = static_cast<const CountScanStats*>(specific);
- return spec->keysExamined;
- }
- else if (STAGE_DISTINCT_SCAN == type) {
- const DistinctScanStats* spec = static_cast<const DistinctScanStats*>(specific);
- return spec->keysExamined;
- }
-
- return 0;
- }
-
- /**
- * Given the SpecificStats object for a stage and the type of the stage, returns the
- * number of documents examined by the stage.
- *
- * This is used for getting the total number of documents examined by a plan. We need
- * to collect a 'totalDocsExamined' metric for a regular explain (in which case this
- * gets called from Explain::generateExecStats()) or for the slow query log / profiler
- * (in which case this gets called from Explain::getSummaryStats()).
- */
- size_t getDocsExamined(StageType type, const SpecificStats* specific) {
- if (STAGE_IDHACK == type) {
- const IDHackStats* spec = static_cast<const IDHackStats*>(specific);
- return spec->docsExamined;
- }
- else if (STAGE_TEXT == type) {
- const TextStats* spec = static_cast<const TextStats*>(specific);
- return spec->fetches;
- }
- else if (STAGE_FETCH == type) {
- const FetchStats* spec = static_cast<const FetchStats*>(specific);
- return spec->docsExamined;
- }
- else if (STAGE_COLLSCAN == type) {
- const CollectionScanStats* spec = static_cast<const CollectionScanStats*>(specific);
- return spec->docsTested;
- }
+ return 0;
+}
- return 0;
+/**
+ * Given the SpecificStats object for a stage and the type of the stage, returns the
+ * number of documents examined by the stage.
+ *
+ * This is used for getting the total number of documents examined by a plan. We need
+ * to collect a 'totalDocsExamined' metric for a regular explain (in which case this
+ * gets called from Explain::generateExecStats()) or for the slow query log / profiler
+ * (in which case this gets called from Explain::getSummaryStats()).
+ */
+size_t getDocsExamined(StageType type, const SpecificStats* specific) {
+ if (STAGE_IDHACK == type) {
+ const IDHackStats* spec = static_cast<const IDHackStats*>(specific);
+ return spec->docsExamined;
+ } else if (STAGE_TEXT == type) {
+ const TextStats* spec = static_cast<const TextStats*>(specific);
+ return spec->fetches;
+ } else if (STAGE_FETCH == type) {
+ const FetchStats* spec = static_cast<const FetchStats*>(specific);
+ return spec->docsExamined;
+ } else if (STAGE_COLLSCAN == type) {
+ const CollectionScanStats* spec = static_cast<const CollectionScanStats*>(specific);
+ return spec->docsTested;
}
- /**
- * Adds to the plan summary string being built by 'ss' for the execution stage 'stage'.
- */
- void addStageSummaryStr(const PlanStage* stage, mongoutils::str::stream& ss) {
- // First add the stage type string.
- const CommonStats* common = stage->getCommonStats();
- ss << common->stageTypeStr;
-
- // Some leaf nodes also provide info about the index they used.
- const SpecificStats* specific = stage->getSpecificStats();
- if (STAGE_COUNT_SCAN == stage->stageType()) {
- const CountScanStats* spec = static_cast<const CountScanStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_DISTINCT_SCAN == stage->stageType()) {
- const DistinctScanStats* spec = static_cast<const DistinctScanStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_GEO_NEAR_2D == stage->stageType()) {
- const NearStats* spec = static_cast<const NearStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == stage->stageType()) {
- const NearStats* spec = static_cast<const NearStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_IXSCAN == stage->stageType()) {
- const IndexScanStats* spec = static_cast<const IndexScanStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_TEXT == stage->stageType()) {
- const TextStats* spec = static_cast<const TextStats*>(specific);
- ss << " " << spec->indexPrefix;
- }
+ return 0;
+}
+
+/**
+ * Adds to the plan summary string being built by 'ss' for the execution stage 'stage'.
+ */
+void addStageSummaryStr(const PlanStage* stage, mongoutils::str::stream& ss) {
+ // First add the stage type string.
+ const CommonStats* common = stage->getCommonStats();
+ ss << common->stageTypeStr;
+
+ // Some leaf nodes also provide info about the index they used.
+ const SpecificStats* specific = stage->getSpecificStats();
+ if (STAGE_COUNT_SCAN == stage->stageType()) {
+ const CountScanStats* spec = static_cast<const CountScanStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_DISTINCT_SCAN == stage->stageType()) {
+ const DistinctScanStats* spec = static_cast<const DistinctScanStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_GEO_NEAR_2D == stage->stageType()) {
+ const NearStats* spec = static_cast<const NearStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == stage->stageType()) {
+ const NearStats* spec = static_cast<const NearStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_IXSCAN == stage->stageType()) {
+ const IndexScanStats* spec = static_cast<const IndexScanStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_TEXT == stage->stageType()) {
+ const TextStats* spec = static_cast<const TextStats*>(specific);
+ ss << " " << spec->indexPrefix;
}
+}
-} // namespace
+} // namespace
namespace mongo {
- using mongoutils::str::stream;
-
- // static
- void Explain::statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* bob,
- BSONObjBuilder* topLevelBob) {
- invariant(bob);
- invariant(topLevelBob);
-
- // Stop as soon as the BSON object we're building exceeds 10 MB.
- static const int kMaxStatsBSONSize = 10 * 1024 * 1024;
- if (topLevelBob->len() > kMaxStatsBSONSize) {
- bob->append("warning", "stats tree exceeded 10 MB");
- return;
- }
-
- // Stage name.
- bob->append("stage", stats.common.stageTypeStr);
+using mongoutils::str::stream;
+
+// static
+void Explain::statsToBSON(const PlanStageStats& stats,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* bob,
+ BSONObjBuilder* topLevelBob) {
+ invariant(bob);
+ invariant(topLevelBob);
+
+ // Stop as soon as the BSON object we're building exceeds 10 MB.
+ static const int kMaxStatsBSONSize = 10 * 1024 * 1024;
+ if (topLevelBob->len() > kMaxStatsBSONSize) {
+ bob->append("warning", "stats tree exceeded 10 MB");
+ return;
+ }
- // Display the BSON representation of the filter, if there is one.
- if (!stats.common.filter.isEmpty()) {
- bob->append("filter", stats.common.filter);
- }
+ // Stage name.
+ bob->append("stage", stats.common.stageTypeStr);
- // Some top-level exec stats get pulled out of the root stage.
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nReturned", stats.common.advanced);
- bob->appendNumber("executionTimeMillisEstimate", stats.common.executionTimeMillis);
- bob->appendNumber("works", stats.common.works);
- bob->appendNumber("advanced", stats.common.advanced);
- bob->appendNumber("needTime", stats.common.needTime);
- bob->appendNumber("needYield", stats.common.needYield);
- bob->appendNumber("saveState", stats.common.yields);
- bob->appendNumber("restoreState", stats.common.unyields);
- bob->appendNumber("isEOF", stats.common.isEOF);
- bob->appendNumber("invalidates", stats.common.invalidates);
- }
+ // Display the BSON representation of the filter, if there is one.
+ if (!stats.common.filter.isEmpty()) {
+ bob->append("filter", stats.common.filter);
+ }
- // Stage-specific stats
- if (STAGE_AND_HASH == stats.stageType) {
- AndHashStats* spec = static_cast<AndHashStats*>(stats.specific.get());
+ // Some top-level exec stats get pulled out of the root stage.
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nReturned", stats.common.advanced);
+ bob->appendNumber("executionTimeMillisEstimate", stats.common.executionTimeMillis);
+ bob->appendNumber("works", stats.common.works);
+ bob->appendNumber("advanced", stats.common.advanced);
+ bob->appendNumber("needTime", stats.common.needTime);
+ bob->appendNumber("needYield", stats.common.needYield);
+ bob->appendNumber("saveState", stats.common.yields);
+ bob->appendNumber("restoreState", stats.common.unyields);
+ bob->appendNumber("isEOF", stats.common.isEOF);
+ bob->appendNumber("invalidates", stats.common.invalidates);
+ }
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("memUsage", spec->memUsage);
- bob->appendNumber("memLimit", spec->memLimit);
+ // Stage-specific stats
+ if (STAGE_AND_HASH == stats.stageType) {
+ AndHashStats* spec = static_cast<AndHashStats*>(stats.specific.get());
- bob->appendNumber("flaggedButPassed", spec->flaggedButPassed);
- bob->appendNumber("flaggedInProgress", spec->flaggedInProgress);
- for (size_t i = 0; i < spec->mapAfterChild.size(); ++i) {
- bob->appendNumber(string(stream() << "mapAfterChild_" << i),
- spec->mapAfterChild[i]);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("memUsage", spec->memUsage);
+ bob->appendNumber("memLimit", spec->memLimit);
+
+ bob->appendNumber("flaggedButPassed", spec->flaggedButPassed);
+ bob->appendNumber("flaggedInProgress", spec->flaggedInProgress);
+ for (size_t i = 0; i < spec->mapAfterChild.size(); ++i) {
+ bob->appendNumber(string(stream() << "mapAfterChild_" << i),
+ spec->mapAfterChild[i]);
}
}
- else if (STAGE_AND_SORTED == stats.stageType) {
- AndSortedStats* spec = static_cast<AndSortedStats*>(stats.specific.get());
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("flagged", spec->flagged);
- for (size_t i = 0; i < spec->failedAnd.size(); ++i) {
- bob->appendNumber(string(stream() << "failedAnd_" << i),
- spec->failedAnd[i]);
- }
+ } else if (STAGE_AND_SORTED == stats.stageType) {
+ AndSortedStats* spec = static_cast<AndSortedStats*>(stats.specific.get());
+
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("flagged", spec->flagged);
+ for (size_t i = 0; i < spec->failedAnd.size(); ++i) {
+ bob->appendNumber(string(stream() << "failedAnd_" << i), spec->failedAnd[i]);
}
}
- else if (STAGE_COLLSCAN == stats.stageType) {
- CollectionScanStats* spec = static_cast<CollectionScanStats*>(stats.specific.get());
- bob->append("direction", spec->direction > 0 ? "forward" : "backward");
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("docsExamined", spec->docsTested);
- }
+ } else if (STAGE_COLLSCAN == stats.stageType) {
+ CollectionScanStats* spec = static_cast<CollectionScanStats*>(stats.specific.get());
+ bob->append("direction", spec->direction > 0 ? "forward" : "backward");
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("docsExamined", spec->docsTested);
}
- else if (STAGE_COUNT == stats.stageType) {
- CountStats* spec = static_cast<CountStats*>(stats.specific.get());
+ } else if (STAGE_COUNT == stats.stageType) {
+ CountStats* spec = static_cast<CountStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nCounted", spec->nCounted);
- bob->appendNumber("nSkipped", spec->nSkipped);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nCounted", spec->nCounted);
+ bob->appendNumber("nSkipped", spec->nSkipped);
}
- else if (STAGE_COUNT_SCAN == stats.stageType) {
- CountScanStats* spec = static_cast<CountScanStats*>(stats.specific.get());
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("keysExamined", spec->keysExamined);
- }
+ } else if (STAGE_COUNT_SCAN == stats.stageType) {
+ CountScanStats* spec = static_cast<CountScanStats*>(stats.specific.get());
- bob->append("keyPattern", spec->keyPattern);
- bob->append("indexName", spec->indexName);
- bob->appendBool("isMultiKey", spec->isMultiKey);
- bob->appendBool("isUnique", spec->isUnique);
- bob->appendBool("isSparse", spec->isSparse);
- bob->appendBool("isPartial", spec->isPartial);
- bob->append("indexVersion", spec->indexVersion);
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("keysExamined", spec->keysExamined);
}
- else if (STAGE_DELETE == stats.stageType) {
- DeleteStats* spec = static_cast<DeleteStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nWouldDelete", spec->docsDeleted);
- bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
- }
- }
- else if (STAGE_FETCH == stats.stageType) {
- FetchStats* spec = static_cast<FetchStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("docsExamined", spec->docsExamined);
- bob->appendNumber("alreadyHasObj", spec->alreadyHasObj);
- }
- }
- else if (STAGE_GEO_NEAR_2D == stats.stageType
- || STAGE_GEO_NEAR_2DSPHERE == stats.stageType) {
- NearStats* spec = static_cast<NearStats*>(stats.specific.get());
-
- bob->append("keyPattern", spec->keyPattern);
- bob->append("indexName", spec->indexName);
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- BSONArrayBuilder intervalsBob(bob->subarrayStart("searchIntervals"));
- for (vector<IntervalStats>::const_iterator it = spec->intervalStats.begin();
- it != spec->intervalStats.end(); ++it) {
- BSONObjBuilder intervalBob(intervalsBob.subobjStart());
- intervalBob.append("minDistance", it->minDistanceAllowed);
- intervalBob.append("maxDistance", it->maxDistanceAllowed);
- intervalBob.append("maxInclusive", it->inclusiveMaxDistanceAllowed);
- }
- intervalsBob.doneFast();
- }
- }
- else if (STAGE_GROUP == stats.stageType) {
- GroupStats* spec = static_cast<GroupStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nGroups", spec->nGroups);
- }
+ bob->append("keyPattern", spec->keyPattern);
+ bob->append("indexName", spec->indexName);
+ bob->appendBool("isMultiKey", spec->isMultiKey);
+ bob->appendBool("isUnique", spec->isUnique);
+ bob->appendBool("isSparse", spec->isSparse);
+ bob->appendBool("isPartial", spec->isPartial);
+ bob->append("indexVersion", spec->indexVersion);
+ } else if (STAGE_DELETE == stats.stageType) {
+ DeleteStats* spec = static_cast<DeleteStats*>(stats.specific.get());
+
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nWouldDelete", spec->docsDeleted);
+ bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
}
- else if (STAGE_IDHACK == stats.stageType) {
- IDHackStats* spec = static_cast<IDHackStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("keysExamined", spec->keysExamined);
- bob->appendNumber("docsExamined", spec->docsExamined);
- }
+ } else if (STAGE_FETCH == stats.stageType) {
+ FetchStats* spec = static_cast<FetchStats*>(stats.specific.get());
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("docsExamined", spec->docsExamined);
+ bob->appendNumber("alreadyHasObj", spec->alreadyHasObj);
}
- else if (STAGE_IXSCAN == stats.stageType) {
- IndexScanStats* spec = static_cast<IndexScanStats*>(stats.specific.get());
-
- bob->append("keyPattern", spec->keyPattern);
- bob->append("indexName", spec->indexName);
- bob->appendBool("isMultiKey", spec->isMultiKey);
- bob->appendBool("isUnique", spec->isUnique);
- bob->appendBool("isSparse", spec->isSparse);
- bob->appendBool("isPartial", spec->isPartial);
- bob->append("indexVersion", spec->indexVersion);
- bob->append("direction", spec->direction > 0 ? "forward" : "backward");
-
- if ((topLevelBob->len() + spec->indexBounds.objsize()) > kMaxStatsBSONSize) {
- bob->append("warning", "index bounds omitted due to BSON size limit");
- }
- else {
- bob->append("indexBounds", spec->indexBounds);
- }
+ } else if (STAGE_GEO_NEAR_2D == stats.stageType || STAGE_GEO_NEAR_2DSPHERE == stats.stageType) {
+ NearStats* spec = static_cast<NearStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("keysExamined", spec->keysExamined);
- bob->appendNumber("dupsTested", spec->dupsTested);
- bob->appendNumber("dupsDropped", spec->dupsDropped);
- bob->appendNumber("seenInvalidated", spec->seenInvalidated);
- }
- }
- else if (STAGE_OR == stats.stageType) {
- OrStats* spec = static_cast<OrStats*>(stats.specific.get());
+ bob->append("keyPattern", spec->keyPattern);
+ bob->append("indexName", spec->indexName);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("dupsTested", spec->dupsTested);
- bob->appendNumber("dupsDropped", spec->dupsDropped);
- bob->appendNumber("locsForgotten", spec->locsForgotten);
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ BSONArrayBuilder intervalsBob(bob->subarrayStart("searchIntervals"));
+ for (vector<IntervalStats>::const_iterator it = spec->intervalStats.begin();
+ it != spec->intervalStats.end();
+ ++it) {
+ BSONObjBuilder intervalBob(intervalsBob.subobjStart());
+ intervalBob.append("minDistance", it->minDistanceAllowed);
+ intervalBob.append("maxDistance", it->maxDistanceAllowed);
+ intervalBob.append("maxInclusive", it->inclusiveMaxDistanceAllowed);
}
+ intervalsBob.doneFast();
}
- else if (STAGE_LIMIT == stats.stageType) {
- LimitStats* spec = static_cast<LimitStats*>(stats.specific.get());
- bob->appendNumber("limitAmount", spec->limit);
+ } else if (STAGE_GROUP == stats.stageType) {
+ GroupStats* spec = static_cast<GroupStats*>(stats.specific.get());
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nGroups", spec->nGroups);
}
- else if (STAGE_PROJECTION == stats.stageType) {
- ProjectionStats* spec = static_cast<ProjectionStats*>(stats.specific.get());
- bob->append("transformBy", spec->projObj);
+ } else if (STAGE_IDHACK == stats.stageType) {
+ IDHackStats* spec = static_cast<IDHackStats*>(stats.specific.get());
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("keysExamined", spec->keysExamined);
+ bob->appendNumber("docsExamined", spec->docsExamined);
}
- else if (STAGE_SHARDING_FILTER == stats.stageType) {
- ShardingFilterStats* spec = static_cast<ShardingFilterStats*>(stats.specific.get());
+ } else if (STAGE_IXSCAN == stats.stageType) {
+ IndexScanStats* spec = static_cast<IndexScanStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("chunkSkips", spec->chunkSkips);
- }
+ bob->append("keyPattern", spec->keyPattern);
+ bob->append("indexName", spec->indexName);
+ bob->appendBool("isMultiKey", spec->isMultiKey);
+ bob->appendBool("isUnique", spec->isUnique);
+ bob->appendBool("isSparse", spec->isSparse);
+ bob->appendBool("isPartial", spec->isPartial);
+ bob->append("indexVersion", spec->indexVersion);
+ bob->append("direction", spec->direction > 0 ? "forward" : "backward");
+
+ if ((topLevelBob->len() + spec->indexBounds.objsize()) > kMaxStatsBSONSize) {
+ bob->append("warning", "index bounds omitted due to BSON size limit");
+ } else {
+ bob->append("indexBounds", spec->indexBounds);
}
- else if (STAGE_SKIP == stats.stageType) {
- SkipStats* spec = static_cast<SkipStats*>(stats.specific.get());
- bob->appendNumber("skipAmount", spec->skip);
+
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("keysExamined", spec->keysExamined);
+ bob->appendNumber("dupsTested", spec->dupsTested);
+ bob->appendNumber("dupsDropped", spec->dupsDropped);
+ bob->appendNumber("seenInvalidated", spec->seenInvalidated);
}
- else if (STAGE_SORT == stats.stageType) {
- SortStats* spec = static_cast<SortStats*>(stats.specific.get());
- bob->append("sortPattern", spec->sortPattern);
+ } else if (STAGE_OR == stats.stageType) {
+ OrStats* spec = static_cast<OrStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("memUsage", spec->memUsage);
- bob->appendNumber("memLimit", spec->memLimit);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("dupsTested", spec->dupsTested);
+ bob->appendNumber("dupsDropped", spec->dupsDropped);
+ bob->appendNumber("locsForgotten", spec->locsForgotten);
+ }
+ } else if (STAGE_LIMIT == stats.stageType) {
+ LimitStats* spec = static_cast<LimitStats*>(stats.specific.get());
+ bob->appendNumber("limitAmount", spec->limit);
+ } else if (STAGE_PROJECTION == stats.stageType) {
+ ProjectionStats* spec = static_cast<ProjectionStats*>(stats.specific.get());
+ bob->append("transformBy", spec->projObj);
+ } else if (STAGE_SHARDING_FILTER == stats.stageType) {
+ ShardingFilterStats* spec = static_cast<ShardingFilterStats*>(stats.specific.get());
- if (spec->limit > 0) {
- bob->appendNumber("limitAmount", spec->limit);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("chunkSkips", spec->chunkSkips);
}
- else if (STAGE_SORT_MERGE == stats.stageType) {
- MergeSortStats* spec = static_cast<MergeSortStats*>(stats.specific.get());
- bob->append("sortPattern", spec->sortPattern);
+ } else if (STAGE_SKIP == stats.stageType) {
+ SkipStats* spec = static_cast<SkipStats*>(stats.specific.get());
+ bob->appendNumber("skipAmount", spec->skip);
+ } else if (STAGE_SORT == stats.stageType) {
+ SortStats* spec = static_cast<SortStats*>(stats.specific.get());
+ bob->append("sortPattern", spec->sortPattern);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("dupsTested", spec->dupsTested);
- bob->appendNumber("dupsDropped", spec->dupsDropped);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("memUsage", spec->memUsage);
+ bob->appendNumber("memLimit", spec->memLimit);
}
- else if (STAGE_TEXT == stats.stageType) {
- TextStats* spec = static_cast<TextStats*>(stats.specific.get());
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("keysExamined", spec->keysExamined);
- bob->appendNumber("docsExamined", spec->fetches);
- }
- bob->append("indexPrefix", spec->indexPrefix);
- bob->append("indexName", spec->indexName);
- bob->append("parsedTextQuery", spec->parsedTextQuery);
- }
- else if (STAGE_UPDATE == stats.stageType) {
- UpdateStats* spec = static_cast<UpdateStats*>(stats.specific.get());
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nMatched", spec->nMatched);
- bob->appendNumber("nWouldModify", spec->nModified);
- bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
- bob->appendBool("wouldInsert", spec->inserted);
- bob->appendBool("fastmod", spec->fastmod);
- bob->appendBool("fastmodinsert", spec->fastmodinsert);
- }
+ if (spec->limit > 0) {
+ bob->appendNumber("limitAmount", spec->limit);
}
+ } else if (STAGE_SORT_MERGE == stats.stageType) {
+ MergeSortStats* spec = static_cast<MergeSortStats*>(stats.specific.get());
+ bob->append("sortPattern", spec->sortPattern);
- // We're done if there are no children.
- if (stats.children.empty()) {
- return;
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("dupsTested", spec->dupsTested);
+ bob->appendNumber("dupsDropped", spec->dupsDropped);
}
+ } else if (STAGE_TEXT == stats.stageType) {
+ TextStats* spec = static_cast<TextStats*>(stats.specific.get());
- // If there's just one child (a common scenario), avoid making an array. This makes
- // the output more readable by saving a level of nesting. Name the field 'inputStage'
- // rather than 'inputStages'.
- if (1 == stats.children.size()) {
- BSONObjBuilder childBob;
- statsToBSON(*stats.children[0], verbosity, &childBob, topLevelBob);
- bob->append("inputStage", childBob.obj());
- return;
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("keysExamined", spec->keysExamined);
+ bob->appendNumber("docsExamined", spec->fetches);
}
- // There is more than one child. Recursively call statsToBSON(...) on each
- // of them and add them to the 'inputStages' array.
+ bob->append("indexPrefix", spec->indexPrefix);
+ bob->append("indexName", spec->indexName);
+ bob->append("parsedTextQuery", spec->parsedTextQuery);
+ } else if (STAGE_UPDATE == stats.stageType) {
+ UpdateStats* spec = static_cast<UpdateStats*>(stats.specific.get());
- BSONArrayBuilder childrenBob(bob->subarrayStart("inputStages"));
- for (size_t i = 0; i < stats.children.size(); ++i) {
- BSONObjBuilder childBob(childrenBob.subobjStart());
- statsToBSON(*stats.children[i], verbosity, &childBob, topLevelBob);
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nMatched", spec->nMatched);
+ bob->appendNumber("nWouldModify", spec->nModified);
+ bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
+ bob->appendBool("wouldInsert", spec->inserted);
+ bob->appendBool("fastmod", spec->fastmod);
+ bob->appendBool("fastmodinsert", spec->fastmodinsert);
}
- childrenBob.doneFast();
}
- // static
- BSONObj Explain::statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity) {
- BSONObjBuilder bob;
- statsToBSON(stats, &bob, verbosity);
- return bob.obj();
+ // We're done if there are no children.
+ if (stats.children.empty()) {
+ return;
}
- // static
- void Explain::statsToBSON(const PlanStageStats& stats,
- BSONObjBuilder* bob,
- ExplainCommon::Verbosity verbosity) {
- statsToBSON(stats, verbosity, bob, bob);
+ // If there's just one child (a common scenario), avoid making an array. This makes
+ // the output more readable by saving a level of nesting. Name the field 'inputStage'
+ // rather than 'inputStages'.
+ if (1 == stats.children.size()) {
+ BSONObjBuilder childBob;
+ statsToBSON(*stats.children[0], verbosity, &childBob, topLevelBob);
+ bob->append("inputStage", childBob.obj());
+ return;
}
- // static
- void Explain::generatePlannerInfo(PlanExecutor* exec,
- PlanStageStats* winnerStats,
- const vector<PlanStageStats*>& rejectedStats,
- BSONObjBuilder* out) {
- CanonicalQuery* query = exec->getCanonicalQuery();
-
- BSONObjBuilder plannerBob(out->subobjStart("queryPlanner"));;
-
- plannerBob.append("plannerVersion", QueryPlanner::kPlannerVersion);
- plannerBob.append("namespace", exec->ns());
-
- // Find whether there is an index filter set for the query shape. The 'indexFilterSet'
- // field will always be false in the case of EOF or idhack plans.
- bool indexFilterSet = false;
- if (exec->collection() && exec->getCanonicalQuery()) {
- const CollectionInfoCache* infoCache = exec->collection()->infoCache();
- const QuerySettings* querySettings = infoCache->getQuerySettings();
- PlanCacheKey planCacheKey =
- infoCache->getPlanCache()->computeKey(*exec->getCanonicalQuery());
- AllowedIndices* allowedIndicesRaw;
- if (querySettings->getAllowedIndices(planCacheKey, &allowedIndicesRaw)) {
- // Found an index filter set on the query shape.
- std::unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
- indexFilterSet = true;
- }
- }
- plannerBob.append("indexFilterSet", indexFilterSet);
-
- // In general we should have a canonical query, but sometimes we may avoid
- // creating a canonical query as an optimization (specifically, the update system
- // does not canonicalize for idhack updates). In these cases, 'query' is NULL.
- if (NULL != query) {
- BSONObjBuilder parsedQueryBob(plannerBob.subobjStart("parsedQuery"));
- query->root()->toBSON(&parsedQueryBob);
- parsedQueryBob.doneFast();
- }
-
- BSONObjBuilder winningPlanBob(plannerBob.subobjStart("winningPlan"));
- statsToBSON(*winnerStats, &winningPlanBob, ExplainCommon::QUERY_PLANNER);
- winningPlanBob.doneFast();
-
- // Genenerate array of rejected plans.
- BSONArrayBuilder allPlansBob(plannerBob.subarrayStart("rejectedPlans"));
- for (size_t i = 0; i < rejectedStats.size(); i++) {
- BSONObjBuilder childBob(allPlansBob.subobjStart());
- statsToBSON(*rejectedStats[i], &childBob, ExplainCommon::QUERY_PLANNER);
- }
- allPlansBob.doneFast();
+ // There is more than one child. Recursively call statsToBSON(...) on each
+ // of them and add them to the 'inputStages' array.
- plannerBob.doneFast();
+ BSONArrayBuilder childrenBob(bob->subarrayStart("inputStages"));
+ for (size_t i = 0; i < stats.children.size(); ++i) {
+ BSONObjBuilder childBob(childrenBob.subobjStart());
+ statsToBSON(*stats.children[i], verbosity, &childBob, topLevelBob);
}
-
- // static
- void Explain::generateExecStats(PlanStageStats* stats,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out,
- long long totalTimeMillis) {
-
- out->appendNumber("nReturned", stats->common.advanced);
-
- // Time elapsed could might be either precise or approximate.
- if (totalTimeMillis >= 0) {
- out->appendNumber("executionTimeMillis", totalTimeMillis);
+ childrenBob.doneFast();
+}
+
+// static
+BSONObj Explain::statsToBSON(const PlanStageStats& stats, ExplainCommon::Verbosity verbosity) {
+ BSONObjBuilder bob;
+ statsToBSON(stats, &bob, verbosity);
+ return bob.obj();
+}
+
+// static
+void Explain::statsToBSON(const PlanStageStats& stats,
+ BSONObjBuilder* bob,
+ ExplainCommon::Verbosity verbosity) {
+ statsToBSON(stats, verbosity, bob, bob);
+}
+
+// static
+void Explain::generatePlannerInfo(PlanExecutor* exec,
+ PlanStageStats* winnerStats,
+ const vector<PlanStageStats*>& rejectedStats,
+ BSONObjBuilder* out) {
+ CanonicalQuery* query = exec->getCanonicalQuery();
+
+ BSONObjBuilder plannerBob(out->subobjStart("queryPlanner"));
+ ;
+
+ plannerBob.append("plannerVersion", QueryPlanner::kPlannerVersion);
+ plannerBob.append("namespace", exec->ns());
+
+ // Find whether there is an index filter set for the query shape. The 'indexFilterSet'
+ // field will always be false in the case of EOF or idhack plans.
+ bool indexFilterSet = false;
+ if (exec->collection() && exec->getCanonicalQuery()) {
+ const CollectionInfoCache* infoCache = exec->collection()->infoCache();
+ const QuerySettings* querySettings = infoCache->getQuerySettings();
+ PlanCacheKey planCacheKey =
+ infoCache->getPlanCache()->computeKey(*exec->getCanonicalQuery());
+ AllowedIndices* allowedIndicesRaw;
+ if (querySettings->getAllowedIndices(planCacheKey, &allowedIndicesRaw)) {
+ // Found an index filter set on the query shape.
+ std::unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
+ indexFilterSet = true;
}
- else {
- out->appendNumber("executionTimeMillisEstimate", stats->common.executionTimeMillis);
- }
-
- // Flatten the stats tree into a list.
- vector<const PlanStageStats*> statsNodes;
- flattenStatsTree(stats, &statsNodes);
+ }
+ plannerBob.append("indexFilterSet", indexFilterSet);
+
+ // In general we should have a canonical query, but sometimes we may avoid
+ // creating a canonical query as an optimization (specifically, the update system
+ // does not canonicalize for idhack updates). In these cases, 'query' is NULL.
+ if (NULL != query) {
+ BSONObjBuilder parsedQueryBob(plannerBob.subobjStart("parsedQuery"));
+ query->root()->toBSON(&parsedQueryBob);
+ parsedQueryBob.doneFast();
+ }
- // Iterate over all stages in the tree and get the total number of keys/docs examined.
- // These are just aggregations of information already available in the stats tree.
- size_t totalKeysExamined = 0;
- size_t totalDocsExamined = 0;
- for (size_t i = 0; i < statsNodes.size(); ++i) {
+ BSONObjBuilder winningPlanBob(plannerBob.subobjStart("winningPlan"));
+ statsToBSON(*winnerStats, &winningPlanBob, ExplainCommon::QUERY_PLANNER);
+ winningPlanBob.doneFast();
- totalKeysExamined += getKeysExamined(statsNodes[i]->stageType,
- statsNodes[i]->specific.get());
- totalDocsExamined += getDocsExamined(statsNodes[i]->stageType,
- statsNodes[i]->specific.get());
- }
+ // Genenerate array of rejected plans.
+ BSONArrayBuilder allPlansBob(plannerBob.subarrayStart("rejectedPlans"));
+ for (size_t i = 0; i < rejectedStats.size(); i++) {
+ BSONObjBuilder childBob(allPlansBob.subobjStart());
+ statsToBSON(*rejectedStats[i], &childBob, ExplainCommon::QUERY_PLANNER);
+ }
+ allPlansBob.doneFast();
- out->appendNumber("totalKeysExamined", totalKeysExamined);
- out->appendNumber("totalDocsExamined", totalDocsExamined);
+ plannerBob.doneFast();
+}
- // Add the tree of stages, with individual execution stats for each stage.
- BSONObjBuilder stagesBob(out->subobjStart("executionStages"));
- statsToBSON(*stats, &stagesBob, verbosity);
- stagesBob.doneFast();
+// static
+void Explain::generateExecStats(PlanStageStats* stats,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out,
+ long long totalTimeMillis) {
+ out->appendNumber("nReturned", stats->common.advanced);
+
+ // Time elapsed could might be either precise or approximate.
+ if (totalTimeMillis >= 0) {
+ out->appendNumber("executionTimeMillis", totalTimeMillis);
+ } else {
+ out->appendNumber("executionTimeMillisEstimate", stats->common.executionTimeMillis);
}
- // static
- void Explain::generateServerInfo(BSONObjBuilder* out) {
- BSONObjBuilder serverBob(out->subobjStart("serverInfo"));
- out->append("host", getHostNameCached());
- out->appendNumber("port", serverGlobalParams.port);
- out->append("version", versionString);
- out->append("gitVersion", gitVersion());
- serverBob.doneFast();
+ // Flatten the stats tree into a list.
+ vector<const PlanStageStats*> statsNodes;
+ flattenStatsTree(stats, &statsNodes);
+
+ // Iterate over all stages in the tree and get the total number of keys/docs examined.
+ // These are just aggregations of information already available in the stats tree.
+ size_t totalKeysExamined = 0;
+ size_t totalDocsExamined = 0;
+ for (size_t i = 0; i < statsNodes.size(); ++i) {
+ totalKeysExamined +=
+ getKeysExamined(statsNodes[i]->stageType, statsNodes[i]->specific.get());
+ totalDocsExamined +=
+ getDocsExamined(statsNodes[i]->stageType, statsNodes[i]->specific.get());
}
- // static
- void Explain::explainStages(PlanExecutor* exec,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) {
- //
- // Step 1: run the stages as required by the verbosity level.
- //
-
- // Inspect the tree to see if there is a MultiPlanStage.
- MultiPlanStage* mps = getMultiPlanStage(exec->getRootStage());
-
- // Get stats of the winning plan from the trial period, if the verbosity level
- // is high enough and there was a runoff between multiple plans.
- unique_ptr<PlanStageStats> winningStatsTrial;
- if (verbosity >= ExplainCommon::EXEC_ALL_PLANS && NULL != mps) {
- winningStatsTrial.reset(exec->getStats());
- invariant(winningStatsTrial.get());
- }
+ out->appendNumber("totalKeysExamined", totalKeysExamined);
+ out->appendNumber("totalDocsExamined", totalDocsExamined);
+
+ // Add the tree of stages, with individual execution stats for each stage.
+ BSONObjBuilder stagesBob(out->subobjStart("executionStages"));
+ statsToBSON(*stats, &stagesBob, verbosity);
+ stagesBob.doneFast();
+}
+
+// static
+void Explain::generateServerInfo(BSONObjBuilder* out) {
+ BSONObjBuilder serverBob(out->subobjStart("serverInfo"));
+ out->append("host", getHostNameCached());
+ out->appendNumber("port", serverGlobalParams.port);
+ out->append("version", versionString);
+ out->append("gitVersion", gitVersion());
+ serverBob.doneFast();
+}
+
+// static
+void Explain::explainStages(PlanExecutor* exec,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) {
+ //
+ // Step 1: run the stages as required by the verbosity level.
+ //
+
+ // Inspect the tree to see if there is a MultiPlanStage.
+ MultiPlanStage* mps = getMultiPlanStage(exec->getRootStage());
+
+ // Get stats of the winning plan from the trial period, if the verbosity level
+ // is high enough and there was a runoff between multiple plans.
+ unique_ptr<PlanStageStats> winningStatsTrial;
+ if (verbosity >= ExplainCommon::EXEC_ALL_PLANS && NULL != mps) {
+ winningStatsTrial.reset(exec->getStats());
+ invariant(winningStatsTrial.get());
+ }
- // If we need execution stats, then run the plan in order to gather the stats.
- Status executePlanStatus = Status::OK();
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- executePlanStatus = exec->executePlan();
- }
+ // If we need execution stats, then run the plan in order to gather the stats.
+ Status executePlanStatus = Status::OK();
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ executePlanStatus = exec->executePlan();
+ }
- //
- // Step 2: collect plan stats (which also give the structure of the plan tree).
- //
+ //
+ // Step 2: collect plan stats (which also give the structure of the plan tree).
+ //
- // Get stats for the winning plan.
- unique_ptr<PlanStageStats> winningStats(exec->getStats());
+ // Get stats for the winning plan.
+ unique_ptr<PlanStageStats> winningStats(exec->getStats());
- // Get stats for the rejected plans, if more than one plan was considered.
- OwnedPointerVector<PlanStageStats> allPlansStats;
- if (NULL != mps) {
- allPlansStats = mps->generateCandidateStats();
- }
+ // Get stats for the rejected plans, if more than one plan was considered.
+ OwnedPointerVector<PlanStageStats> allPlansStats;
+ if (NULL != mps) {
+ allPlansStats = mps->generateCandidateStats();
+ }
- //
- // Step 3: use the stats trees to produce explain BSON.
- //
+ //
+ // Step 3: use the stats trees to produce explain BSON.
+ //
- if (verbosity >= ExplainCommon::QUERY_PLANNER) {
- generatePlannerInfo(exec, winningStats.get(), allPlansStats.vector(), out);
- }
+ if (verbosity >= ExplainCommon::QUERY_PLANNER) {
+ generatePlannerInfo(exec, winningStats.get(), allPlansStats.vector(), out);
+ }
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- BSONObjBuilder execBob(out->subobjStart("executionStats"));
-
- // If there is an execution error while running the query, the error is reported under
- // the "executionStats" section and the explain as a whole succeeds.
- execBob.append("executionSuccess", executePlanStatus.isOK());
- if (!executePlanStatus.isOK()) {
- execBob.append("errorMessage", executePlanStatus.reason());
- execBob.append("errorCode", executePlanStatus.code());
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ BSONObjBuilder execBob(out->subobjStart("executionStats"));
+
+ // If there is an execution error while running the query, the error is reported under
+ // the "executionStats" section and the explain as a whole succeeds.
+ execBob.append("executionSuccess", executePlanStatus.isOK());
+ if (!executePlanStatus.isOK()) {
+ execBob.append("errorMessage", executePlanStatus.reason());
+ execBob.append("errorCode", executePlanStatus.code());
+ }
+
+ // Generate exec stats BSON for the winning plan.
+ OperationContext* opCtx = exec->getOpCtx();
+ long long totalTimeMillis = CurOp::get(opCtx)->elapsedMillis();
+ generateExecStats(winningStats.get(), verbosity, &execBob, totalTimeMillis);
+
+ // Also generate exec stats for all plans, if the verbosity level is high enough.
+ // These stats reflect what happened during the trial period that ranked the plans.
+ if (verbosity >= ExplainCommon::EXEC_ALL_PLANS) {
+ // If we ranked multiple plans against each other, then add stats collected
+ // from the trial period of the winning plan. The "allPlansExecution" section
+ // will contain an apples-to-apples comparison of the winning plan's stats against
+ // all rejected plans' stats collected during the trial period.
+ if (NULL != mps) {
+ invariant(winningStatsTrial.get());
+ allPlansStats.push_back(winningStatsTrial.release());
}
- // Generate exec stats BSON for the winning plan.
- OperationContext* opCtx = exec->getOpCtx();
- long long totalTimeMillis = CurOp::get(opCtx)->elapsedMillis();
- generateExecStats(winningStats.get(), verbosity, &execBob, totalTimeMillis);
-
- // Also generate exec stats for all plans, if the verbosity level is high enough.
- // These stats reflect what happened during the trial period that ranked the plans.
- if (verbosity >= ExplainCommon::EXEC_ALL_PLANS) {
- // If we ranked multiple plans against each other, then add stats collected
- // from the trial period of the winning plan. The "allPlansExecution" section
- // will contain an apples-to-apples comparison of the winning plan's stats against
- // all rejected plans' stats collected during the trial period.
- if (NULL != mps) {
- invariant(winningStatsTrial.get());
- allPlansStats.push_back(winningStatsTrial.release());
- }
-
- BSONArrayBuilder allPlansBob(execBob.subarrayStart("allPlansExecution"));
- for (size_t i = 0; i < allPlansStats.size(); ++i) {
- BSONObjBuilder planBob(allPlansBob.subobjStart());
- generateExecStats(allPlansStats[i], verbosity, &planBob);
- planBob.doneFast();
- }
- allPlansBob.doneFast();
+ BSONArrayBuilder allPlansBob(execBob.subarrayStart("allPlansExecution"));
+ for (size_t i = 0; i < allPlansStats.size(); ++i) {
+ BSONObjBuilder planBob(allPlansBob.subobjStart());
+ generateExecStats(allPlansStats[i], verbosity, &planBob);
+ planBob.doneFast();
}
-
- execBob.doneFast();
+ allPlansBob.doneFast();
}
- generateServerInfo(out);
- }
-
- // static
- std::string Explain::getPlanSummary(const PlanExecutor* exec) {
- return getPlanSummary(exec->getRootStage());
+ execBob.doneFast();
}
- // static
- std::string Explain::getPlanSummary(const PlanStage* root) {
- std::vector<const PlanStage*> stages;
- flattenExecTree(root, &stages);
-
- // Use this stream to build the plan summary string.
- mongoutils::str::stream ss;
- bool seenLeaf = false;
-
- for (size_t i = 0; i < stages.size(); i++) {
- if (stages[i]->getChildren().empty()) {
- // This is a leaf node. Add to the plan summary string accordingly. Unless
- // this is the first leaf we've seen, add a delimiting string first.
- if (seenLeaf) {
- ss << ", ";
- }
- else {
- seenLeaf = true;
- }
- addStageSummaryStr(stages[i], ss);
+ generateServerInfo(out);
+}
+
+// static
+std::string Explain::getPlanSummary(const PlanExecutor* exec) {
+ return getPlanSummary(exec->getRootStage());
+}
+
+// static
+std::string Explain::getPlanSummary(const PlanStage* root) {
+ std::vector<const PlanStage*> stages;
+ flattenExecTree(root, &stages);
+
+ // Use this stream to build the plan summary string.
+ mongoutils::str::stream ss;
+ bool seenLeaf = false;
+
+ for (size_t i = 0; i < stages.size(); i++) {
+ if (stages[i]->getChildren().empty()) {
+ // This is a leaf node. Add to the plan summary string accordingly. Unless
+ // this is the first leaf we've seen, add a delimiting string first.
+ if (seenLeaf) {
+ ss << ", ";
+ } else {
+ seenLeaf = true;
}
+ addStageSummaryStr(stages[i], ss);
}
-
- return ss;
}
- // static
- void Explain::getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut) {
- invariant(NULL != statsOut);
+ return ss;
+}
- PlanStage* root = exec->getRootStage();
+// static
+void Explain::getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut) {
+ invariant(NULL != statsOut);
- // We can get some of the fields we need from the common stats stored in the
- // root stage of the plan tree.
- const CommonStats* common = root->getCommonStats();
- statsOut->nReturned = common->advanced;
- statsOut->executionTimeMillis = common->executionTimeMillis;
+ PlanStage* root = exec->getRootStage();
- // The other fields are aggregations over the stages in the plan tree. We flatten
- // the tree into a list and then compute these aggregations.
- std::vector<const PlanStage*> stages;
- flattenExecTree(root, &stages);
+ // We can get some of the fields we need from the common stats stored in the
+ // root stage of the plan tree.
+ const CommonStats* common = root->getCommonStats();
+ statsOut->nReturned = common->advanced;
+ statsOut->executionTimeMillis = common->executionTimeMillis;
- for (size_t i = 0; i < stages.size(); i++) {
- statsOut->totalKeysExamined += getKeysExamined(stages[i]->stageType(),
- stages[i]->getSpecificStats());
- statsOut->totalDocsExamined += getDocsExamined(stages[i]->stageType(),
- stages[i]->getSpecificStats());
+ // The other fields are aggregations over the stages in the plan tree. We flatten
+ // the tree into a list and then compute these aggregations.
+ std::vector<const PlanStage*> stages;
+ flattenExecTree(root, &stages);
- if (STAGE_IDHACK == stages[i]->stageType()) {
- statsOut->isIdhack = true;
- }
- if (STAGE_SORT == stages[i]->stageType()) {
- statsOut->hasSortStage = true;
- }
+ for (size_t i = 0; i < stages.size(); i++) {
+ statsOut->totalKeysExamined +=
+ getKeysExamined(stages[i]->stageType(), stages[i]->getSpecificStats());
+ statsOut->totalDocsExamined +=
+ getDocsExamined(stages[i]->stageType(), stages[i]->getSpecificStats());
+
+ if (STAGE_IDHACK == stages[i]->stageType()) {
+ statsOut->isIdhack = true;
+ }
+ if (STAGE_SORT == stages[i]->stageType()) {
+ statsOut->hasSortStage = true;
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index 14a9d1289f5..7a0013294a0 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -38,161 +38,160 @@
namespace mongo {
- class Collection;
- class OperationContext;
+class Collection;
+class OperationContext;
+/**
+ * A container for the summary statistics that the profiler, slow query log, and
+ * other non-explain debug mechanisms may want to collect.
+ */
+struct PlanSummaryStats {
+ PlanSummaryStats()
+ : nReturned(0),
+ totalKeysExamined(0),
+ totalDocsExamined(0),
+ executionTimeMillis(0),
+ isIdhack(false),
+ hasSortStage(false) {}
+
+ // The number of results returned by the plan.
+ size_t nReturned;
+
+ // The total number of index keys examined by the plan.
+ size_t totalKeysExamined;
+
+ // The total number of documents examined by the plan.
+ size_t totalDocsExamined;
+
+ // The number of milliseconds spent inside the root stage's work() method.
+ long long executionTimeMillis;
+
+ // Did this plan use the fast path for key-value retrievals on the _id index?
+ bool isIdhack;
+
+ // Did this plan use an in-memory sort stage?
+ bool hasSortStage;
+};
+
+/**
+ * Namespace for the collection of static methods used to generate explain information.
+ */
+class Explain {
+public:
/**
- * A container for the summary statistics that the profiler, slow query log, and
- * other non-explain debug mechanisms may want to collect.
+ * Get explain BSON for the execution stages contained by 'exec'. Use this function if you
+ * have a PlanExecutor and want to convert it into a human readable explain format. Any
+ * operation which has a query component (e.g. find, update, group) can be explained via
+ * this function.
+ *
+ * The explain information is extracted from 'exec' and added to the out-parameter 'out'.
+ *
+ * The explain information is generated with the level of detail specified by 'verbosity'.
+ *
+ * Does not take ownership of its arguments.
+ *
+ * If there is an error during the execution of the query, the error message and code are
+ * added to the "executionStats" section of the explain.
*/
- struct PlanSummaryStats {
+ static void explainStages(PlanExecutor* exec,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out);
- PlanSummaryStats() : nReturned(0),
- totalKeysExamined(0),
- totalDocsExamined(0),
- executionTimeMillis(0),
- isIdhack(false),
- hasSortStage(false) { }
-
- // The number of results returned by the plan.
- size_t nReturned;
+ /**
+ * Converts the stats tree 'stats' into a corresponding BSON object containing
+ * explain information.
+ *
+ * Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
+ * to execution stats verbosity.
+ */
+ static BSONObj statsToBSON(const PlanStageStats& stats,
+ ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
- // The total number of index keys examined by the plan.
- size_t totalKeysExamined;
+ /**
+ * This version of stats tree to BSON conversion returns the result through the
+ * out-parameter 'bob' rather than returning a BSONObj.
+ *
+ * Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
+ * to execution stats verbosity.
+ */
+ static void statsToBSON(const PlanStageStats& stats,
+ BSONObjBuilder* bob,
+ ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
- // The total number of documents examined by the plan.
- size_t totalDocsExamined;
+ /**
+ * Returns a short plan summary std::string describing the leaves of the query plan.
+ */
+ static std::string getPlanSummary(const PlanExecutor* exec);
+ static std::string getPlanSummary(const PlanStage* root);
- // The number of milliseconds spent inside the root stage's work() method.
- long long executionTimeMillis;
+ /**
+ * Fills out 'statsOut' with summary stats using the execution tree contained
+ * in 'exec'.
+ *
+ * The summary stats are consumed by debug mechanisms such as the profiler and
+ * the slow query log.
+ *
+ * This is a lightweight alternative for explainStages(...) above which is useful
+ * when operations want to request debug information without doing all the work
+ * to generate a full explain.
+ *
+ * Does not take ownership of its arguments.
+ */
+ static void getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut);
- // Did this plan use the fast path for key-value retrievals on the _id index?
- bool isIdhack;
+private:
+ /**
+ * Private helper that does the heavy-lifting for the public statsToBSON(...) functions
+ * declared above.
+ *
+ * Not used except as a helper to the public statsToBSON(...) functions.
+ */
+ static void statsToBSON(const PlanStageStats& stats,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* bob,
+ BSONObjBuilder* topLevelBob);
- // Did this plan use an in-memory sort stage?
- bool hasSortStage;
- };
+ /**
+ * Adds the 'queryPlanner' explain section to the BSON object being built
+ * by 'out'.
+ *
+ * This is a helper for generating explain BSON. It is used by explainStages(...).
+ *
+ * @param exec -- the stage tree for the operation being explained.
+ * @param winnerStats -- the stats tree for the winning plan.
+ * @param rejectedStats -- an array of stats trees, one per rejected plan
+ */
+ static void generatePlannerInfo(PlanExecutor* exec,
+ PlanStageStats* winnerStats,
+ const std::vector<PlanStageStats*>& rejectedStats,
+ BSONObjBuilder* out);
/**
- * Namespace for the collection of static methods used to generate explain information.
+ * Generates the execution stats section for the stats tree 'stats',
+ * adding the resulting BSON to 'out'.
+ *
+ * The 'totalTimeMillis' value passed here will be added to the top level of
+ * the execution stats section, but will not affect the reporting of timing for
+ * individual stages. If 'totalTimeMillis' is not specified, then the default
+ * value of -1 indicates that we should only use the approximate timing information
+ * collected by the stages.
+ *
+ * Stats are generated at the verbosity specified by 'verbosity'.
+ *
+ * This is a helper for generating explain BSON. It is used by explainStages(...).
*/
- class Explain {
- public:
- /**
- * Get explain BSON for the execution stages contained by 'exec'. Use this function if you
- * have a PlanExecutor and want to convert it into a human readable explain format. Any
- * operation which has a query component (e.g. find, update, group) can be explained via
- * this function.
- *
- * The explain information is extracted from 'exec' and added to the out-parameter 'out'.
- *
- * The explain information is generated with the level of detail specified by 'verbosity'.
- *
- * Does not take ownership of its arguments.
- *
- * If there is an error during the execution of the query, the error message and code are
- * added to the "executionStats" section of the explain.
- */
- static void explainStages(PlanExecutor* exec,
+ static void generateExecStats(PlanStageStats* stats,
ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out);
-
- /**
- * Converts the stats tree 'stats' into a corresponding BSON object containing
- * explain information.
- *
- * Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
- * to execution stats verbosity.
- */
- static BSONObj statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
-
- /**
- * This version of stats tree to BSON conversion returns the result through the
- * out-parameter 'bob' rather than returning a BSONObj.
- *
- * Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
- * to execution stats verbosity.
- */
- static void statsToBSON(const PlanStageStats& stats,
- BSONObjBuilder* bob,
- ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
-
- /**
- * Returns a short plan summary std::string describing the leaves of the query plan.
- */
- static std::string getPlanSummary(const PlanExecutor* exec);
- static std::string getPlanSummary(const PlanStage* root);
-
- /**
- * Fills out 'statsOut' with summary stats using the execution tree contained
- * in 'exec'.
- *
- * The summary stats are consumed by debug mechanisms such as the profiler and
- * the slow query log.
- *
- * This is a lightweight alternative for explainStages(...) above which is useful
- * when operations want to request debug information without doing all the work
- * to generate a full explain.
- *
- * Does not take ownership of its arguments.
- */
- static void getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut);
-
- private:
- /**
- * Private helper that does the heavy-lifting for the public statsToBSON(...) functions
- * declared above.
- *
- * Not used except as a helper to the public statsToBSON(...) functions.
- */
- static void statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* bob,
- BSONObjBuilder* topLevelBob);
-
- /**
- * Adds the 'queryPlanner' explain section to the BSON object being built
- * by 'out'.
- *
- * This is a helper for generating explain BSON. It is used by explainStages(...).
- *
- * @param exec -- the stage tree for the operation being explained.
- * @param winnerStats -- the stats tree for the winning plan.
- * @param rejectedStats -- an array of stats trees, one per rejected plan
- */
- static void generatePlannerInfo(PlanExecutor* exec,
- PlanStageStats* winnerStats,
- const std::vector<PlanStageStats*>& rejectedStats,
- BSONObjBuilder* out);
-
- /**
- * Generates the execution stats section for the stats tree 'stats',
- * adding the resulting BSON to 'out'.
- *
- * The 'totalTimeMillis' value passed here will be added to the top level of
- * the execution stats section, but will not affect the reporting of timing for
- * individual stages. If 'totalTimeMillis' is not specified, then the default
- * value of -1 indicates that we should only use the approximate timing information
- * collected by the stages.
- *
- * Stats are generated at the verbosity specified by 'verbosity'.
- *
- * This is a helper for generating explain BSON. It is used by explainStages(...).
- */
- static void generateExecStats(PlanStageStats* stats,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out,
- long long totalTimeMillis = -1);
-
- /**
- * Adds the 'serverInfo' explain section to the BSON object being build
- * by 'out'.
- *
- * This is a helper for generating explain BSON. It is used by explainStages(...).
- */
- static void generateServerInfo(BSONObjBuilder* out);
-
- };
-
-} // namespace
+ BSONObjBuilder* out,
+ long long totalTimeMillis = -1);
+
+ /**
+ * Adds the 'serverInfo' explain section to the BSON object being build
+ * by 'out'.
+ *
+ * This is a helper for generating explain BSON. It is used by explainStages(...).
+ */
+ static void generateServerInfo(BSONObjBuilder* out);
+};
+
+} // namespace
diff --git a/src/mongo/db/query/explain_common.cpp b/src/mongo/db/query/explain_common.cpp
index aeef9df95f6..1f049de6cec 100644
--- a/src/mongo/db/query/explain_common.cpp
+++ b/src/mongo/db/query/explain_common.cpp
@@ -34,9 +34,9 @@
namespace mongo {
- // static
- const char* ExplainCommon::verbosityString(ExplainCommon::Verbosity verbosity) {
- switch (verbosity) {
+// static
+const char* ExplainCommon::verbosityString(ExplainCommon::Verbosity verbosity) {
+ switch (verbosity) {
case QUERY_PLANNER:
return "queryPlanner";
case EXEC_STATS:
@@ -46,31 +46,30 @@ namespace mongo {
default:
invariant(0);
return "unknown";
- }
}
+}
- // static
- Status ExplainCommon::parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity) {
- if (Object != cmdObj.firstElement().type()) {
- return Status(ErrorCodes::BadValue, "explain command requires a nested object");
- }
+// static
+Status ExplainCommon::parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity) {
+ if (Object != cmdObj.firstElement().type()) {
+ return Status(ErrorCodes::BadValue, "explain command requires a nested object");
+ }
- *verbosity = ExplainCommon::EXEC_ALL_PLANS;
- if (!cmdObj["verbosity"].eoo()) {
- const char* verbStr = cmdObj["verbosity"].valuestrsafe();
- if (mongoutils::str::equals(verbStr, "queryPlanner")) {
- *verbosity = ExplainCommon::QUERY_PLANNER;
- }
- else if (mongoutils::str::equals(verbStr, "executionStats")) {
- *verbosity = ExplainCommon::EXEC_STATS;
- }
- else if (!mongoutils::str::equals(verbStr, "allPlansExecution")) {
- return Status(ErrorCodes::BadValue, "verbosity string must be one of "
- "{'queryPlanner', 'executionStats', 'allPlansExecution'}");
- }
+ *verbosity = ExplainCommon::EXEC_ALL_PLANS;
+ if (!cmdObj["verbosity"].eoo()) {
+ const char* verbStr = cmdObj["verbosity"].valuestrsafe();
+ if (mongoutils::str::equals(verbStr, "queryPlanner")) {
+ *verbosity = ExplainCommon::QUERY_PLANNER;
+ } else if (mongoutils::str::equals(verbStr, "executionStats")) {
+ *verbosity = ExplainCommon::EXEC_STATS;
+ } else if (!mongoutils::str::equals(verbStr, "allPlansExecution")) {
+ return Status(ErrorCodes::BadValue,
+ "verbosity string must be one of "
+ "{'queryPlanner', 'executionStats', 'allPlansExecution'}");
}
-
- return Status::OK();
}
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/explain_common.h b/src/mongo/db/query/explain_common.h
index 05bd975295f..f76c6d348cf 100644
--- a/src/mongo/db/query/explain_common.h
+++ b/src/mongo/db/query/explain_common.h
@@ -33,45 +33,44 @@
namespace mongo {
+/**
+ * Utilities used for explain implementations on both mongod and mongos.
+ */
+class ExplainCommon {
+public:
/**
- * Utilities used for explain implementations on both mongod and mongos.
+ * The various supported verbosity levels for explain. The order is
+ * significant: the enum values are assigned in order of increasing verbosity.
*/
- class ExplainCommon {
- public:
- /**
- * The various supported verbosity levels for explain. The order is
- * significant: the enum values are assigned in order of increasing verbosity.
- */
- enum Verbosity {
- // At all verbosities greater than or equal to QUERY_PLANNER, we display information
- // about the plan selected and alternate rejected plans. Does not include any execution-
- // related info. String alias is "queryPlanner".
- QUERY_PLANNER = 0,
-
- // At all verbosities greater than or equal to EXEC_STATS, we display a section of
- // output containing both overall execution stats, and stats per stage in the
- // execution tree. String alias is "execStats".
- EXEC_STATS = 1,
+ enum Verbosity {
+ // At all verbosities greater than or equal to QUERY_PLANNER, we display information
+ // about the plan selected and alternate rejected plans. Does not include any execution-
+ // related info. String alias is "queryPlanner".
+ QUERY_PLANNER = 0,
- // At this second-highest verbosity level, we generate the execution stats for each
- // rejected plan as well as the winning plan. String alias is "allPlansExecution".
- EXEC_ALL_PLANS = 2,
- };
+ // At all verbosities greater than or equal to EXEC_STATS, we display a section of
+ // output containing both overall execution stats, and stats per stage in the
+ // execution tree. String alias is "execStats".
+ EXEC_STATS = 1,
- /**
- * Converts an explain verbosity to its string representation.
- */
- static const char* verbosityString(ExplainCommon::Verbosity verbosity);
+ // At this second-highest verbosity level, we generate the execution stats for each
+ // rejected plan as well as the winning plan. String alias is "allPlansExecution".
+ EXEC_ALL_PLANS = 2,
+ };
- /**
- * Does some basic validation of the command BSON, and retrieves the explain verbosity.
- *
- * Returns a non-OK status if parsing fails.
- *
- * On success, populates "verbosity".
- */
- static Status parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity);
+ /**
+ * Converts an explain verbosity to its string representation.
+ */
+ static const char* verbosityString(ExplainCommon::Verbosity verbosity);
- };
+ /**
+ * Does some basic validation of the command BSON, and retrieves the explain verbosity.
+ *
+ * Returns a non-OK status if parsing fails.
+ *
+ * On success, populates "verbosity".
+ */
+ static Status parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity);
+};
-} // namespace
+} // namespace
diff --git a/src/mongo/db/query/expression_index.cpp b/src/mongo/db/query/expression_index.cpp
index 51cc439da23..67f06314266 100644
--- a/src/mongo/db/query/expression_index.cpp
+++ b/src/mongo/db/query/expression_index.cpp
@@ -39,174 +39,160 @@
namespace mongo {
- using std::set;
-
- BSONObj ExpressionMapping::hash(const BSONElement& value) {
- BSONObjBuilder bob;
- bob.append("", BSONElementHasher::hash64(value, BSONElementHasher::DEFAULT_HASH_SEED));
- return bob.obj();
+using std::set;
+
+BSONObj ExpressionMapping::hash(const BSONElement& value) {
+ BSONObjBuilder bob;
+ bob.append("", BSONElementHasher::hash64(value, BSONElementHasher::DEFAULT_HASH_SEED));
+ return bob.obj();
+}
+
+// For debugging only
+static std::string toCoveringString(const GeoHashConverter& hashConverter,
+ const set<GeoHash>& covering) {
+ string result = "[";
+ for (set<GeoHash>::const_iterator it = covering.begin(); it != covering.end(); ++it) {
+ if (it != covering.begin())
+ result += ", ";
+
+ const GeoHash& geoHash = *it;
+
+ result += hashConverter.unhashToBoxCovering(geoHash).toString();
+ result += " (" + geoHash.toStringHex1() + ")";
}
- // For debugging only
- static std::string toCoveringString(const GeoHashConverter& hashConverter,
- const set<GeoHash>& covering) {
- string result = "[";
- for (set<GeoHash>::const_iterator it = covering.begin(); it != covering.end();
- ++it) {
-
- if (it != covering.begin()) result += ", ";
-
- const GeoHash& geoHash = *it;
-
- result += hashConverter.unhashToBoxCovering(geoHash).toString();
- result += " (" + geoHash.toStringHex1() + ")";
- }
-
- return result + "]";
+ return result + "]";
+}
+
+void ExpressionMapping::cover2d(const R2Region& region,
+ const BSONObj& indexInfoObj,
+ int maxCoveringCells,
+ OrderedIntervalList* oil) {
+ GeoHashConverter::Parameters hashParams;
+ Status paramStatus = GeoHashConverter::parseParameters(indexInfoObj, &hashParams);
+ verify(paramStatus.isOK()); // We validated the parameters when creating the index
+
+ GeoHashConverter hashConverter(hashParams);
+ R2RegionCoverer coverer(&hashConverter);
+ coverer.setMaxLevel(hashConverter.getBits());
+ coverer.setMaxCells(maxCoveringCells);
+
+ // TODO: Maybe slightly optimize by returning results in order
+ vector<GeoHash> unorderedCovering;
+ coverer.getCovering(region, &unorderedCovering);
+ set<GeoHash> covering(unorderedCovering.begin(), unorderedCovering.end());
+
+ for (set<GeoHash>::const_iterator it = covering.begin(); it != covering.end(); ++it) {
+ const GeoHash& geoHash = *it;
+ BSONObjBuilder builder;
+ geoHash.appendHashMin(&builder, "");
+ geoHash.appendHashMax(&builder, "");
+
+ oil->intervals.push_back(IndexBoundsBuilder::makeRangeInterval(builder.obj(), true, true));
}
-
- void ExpressionMapping::cover2d(const R2Region& region,
- const BSONObj& indexInfoObj,
- int maxCoveringCells,
- OrderedIntervalList* oil) {
-
- GeoHashConverter::Parameters hashParams;
- Status paramStatus = GeoHashConverter::parseParameters(indexInfoObj, &hashParams);
- verify(paramStatus.isOK()); // We validated the parameters when creating the index
-
- GeoHashConverter hashConverter(hashParams);
- R2RegionCoverer coverer(&hashConverter);
- coverer.setMaxLevel(hashConverter.getBits());
- coverer.setMaxCells(maxCoveringCells);
-
- // TODO: Maybe slightly optimize by returning results in order
- vector<GeoHash> unorderedCovering;
- coverer.getCovering(region, &unorderedCovering);
- set<GeoHash> covering(unorderedCovering.begin(), unorderedCovering.end());
-
- for (set<GeoHash>::const_iterator it = covering.begin(); it != covering.end();
- ++it) {
-
- const GeoHash& geoHash = *it;
- BSONObjBuilder builder;
- geoHash.appendHashMin(&builder, "");
- geoHash.appendHashMax(&builder, "");
-
- oil->intervals.push_back(IndexBoundsBuilder::makeRangeInterval(builder.obj(),
- true,
- true));
- }
+}
+
+// TODO: what should we really pass in for indexInfoObj?
+void ExpressionMapping::cover2dsphere(const S2Region& region,
+ const BSONObj& indexInfoObj,
+ OrderedIntervalList* oilOut) {
+ int coarsestIndexedLevel;
+ BSONElement ce = indexInfoObj["coarsestIndexedLevel"];
+ if (ce.isNumber()) {
+ coarsestIndexedLevel = ce.numberInt();
+ } else {
+ coarsestIndexedLevel = S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / kRadiusOfEarthInMeters);
}
- // TODO: what should we really pass in for indexInfoObj?
- void ExpressionMapping::cover2dsphere(const S2Region& region,
- const BSONObj& indexInfoObj,
- OrderedIntervalList* oilOut) {
-
- int coarsestIndexedLevel;
- BSONElement ce = indexInfoObj["coarsestIndexedLevel"];
- if (ce.isNumber()) {
- coarsestIndexedLevel = ce.numberInt();
- }
- else {
- coarsestIndexedLevel =
- S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / kRadiusOfEarthInMeters);
+ // The min level of our covering is the level whose cells are the closest match to the
+ // *area* of the region (or the max indexed level, whichever is smaller) The max level
+ // is 4 sizes larger.
+ double edgeLen = sqrt(region.GetRectBound().Area());
+ S2RegionCoverer coverer;
+ coverer.set_min_level(min(coarsestIndexedLevel, 2 + S2::kAvgEdge.GetClosestLevel(edgeLen)));
+ coverer.set_max_level(4 + coverer.min_level());
+
+ std::vector<S2CellId> cover;
+ coverer.GetCovering(region, &cover);
+
+ // Look at the cells we cover and all cells that are within our covering and finer.
+ // Anything with our cover as a strict prefix is contained within the cover and should
+ // be intersection tested.
+ std::set<string> intervalSet;
+ std::set<string> exactSet;
+ for (size_t i = 0; i < cover.size(); ++i) {
+ S2CellId coveredCell = cover[i];
+ intervalSet.insert(coveredCell.toString());
+
+ // Look at the cells that cover us. We want to look at every cell that contains the
+ // covering we would index on if we were to insert the query geometry. We generate
+ // the would-index-with-this-covering and find all the cells strictly containing the
+ // cells in that set, until we hit the coarsest indexed cell. We use equality, not
+ // a prefix match. Why not prefix? Because we've already looked at everything
+ // finer or as fine as our initial covering.
+ //
+ // Say we have a fine point with cell id 212121, we go up one, get 21212, we don't
+ // want to look at cells 21212[not-1] because we know they're not going to intersect
+ // with 212121, but entries inserted with cell value 21212 (no trailing digits) may.
+ // And we've already looked at points with the cell id 211111 from the regex search
+ // created above, so we only want things where the value of the last digit is not
+ // stored (and therefore could be 1).
+
+ while (coveredCell.level() > coarsestIndexedLevel) {
+ // Add the parent cell of the currently covered cell since we aren't at the
+ // coarsest level yet
+ // NOTE: Be careful not to generate cells strictly less than the
+ // coarsestIndexedLevel - this can result in S2 failures when level < 0.
+
+ coveredCell = coveredCell.parent();
+ exactSet.insert(coveredCell.toString());
}
+ }
- // The min level of our covering is the level whose cells are the closest match to the
- // *area* of the region (or the max indexed level, whichever is smaller) The max level
- // is 4 sizes larger.
- double edgeLen = sqrt(region.GetRectBound().Area());
- S2RegionCoverer coverer;
- coverer.set_min_level(min(coarsestIndexedLevel,
- 2 + S2::kAvgEdge.GetClosestLevel(edgeLen)));
- coverer.set_max_level(4 + coverer.min_level());
-
- std::vector<S2CellId> cover;
- coverer.GetCovering(region, &cover);
-
- // Look at the cells we cover and all cells that are within our covering and finer.
- // Anything with our cover as a strict prefix is contained within the cover and should
- // be intersection tested.
- std::set<string> intervalSet;
- std::set<string> exactSet;
- for (size_t i = 0; i < cover.size(); ++i) {
-
- S2CellId coveredCell = cover[i];
- intervalSet.insert(coveredCell.toString());
-
- // Look at the cells that cover us. We want to look at every cell that contains the
- // covering we would index on if we were to insert the query geometry. We generate
- // the would-index-with-this-covering and find all the cells strictly containing the
- // cells in that set, until we hit the coarsest indexed cell. We use equality, not
- // a prefix match. Why not prefix? Because we've already looked at everything
- // finer or as fine as our initial covering.
- //
- // Say we have a fine point with cell id 212121, we go up one, get 21212, we don't
- // want to look at cells 21212[not-1] because we know they're not going to intersect
- // with 212121, but entries inserted with cell value 21212 (no trailing digits) may.
- // And we've already looked at points with the cell id 211111 from the regex search
- // created above, so we only want things where the value of the last digit is not
- // stored (and therefore could be 1).
-
- while (coveredCell.level() > coarsestIndexedLevel) {
-
- // Add the parent cell of the currently covered cell since we aren't at the
- // coarsest level yet
- // NOTE: Be careful not to generate cells strictly less than the
- // coarsestIndexedLevel - this can result in S2 failures when level < 0.
-
- coveredCell = coveredCell.parent();
- exactSet.insert(coveredCell.toString());
- }
+ // We turned the cell IDs into strings which define point intervals or prefixes of
+ // strings we want to look for.
+ std::set<std::string>::iterator exactIt = exactSet.begin();
+ std::set<std::string>::iterator intervalIt = intervalSet.begin();
+ while (exactSet.end() != exactIt && intervalSet.end() != intervalIt) {
+ const std::string& exact = *exactIt;
+ const std::string& ival = *intervalIt;
+ if (exact < ival) {
+ // add exact
+ oilOut->intervals.push_back(IndexBoundsBuilder::makePointInterval(exact));
+ exactIt++;
+ } else {
+ std::string end = ival;
+ end[end.size() - 1]++;
+ oilOut->intervals.push_back(
+ IndexBoundsBuilder::makeRangeInterval(ival, end, true, false));
+ intervalIt++;
}
+ }
- // We turned the cell IDs into strings which define point intervals or prefixes of
- // strings we want to look for.
- std::set<std::string>::iterator exactIt = exactSet.begin();
- std::set<std::string>::iterator intervalIt = intervalSet.begin();
- while (exactSet.end() != exactIt && intervalSet.end() != intervalIt) {
- const std::string& exact = *exactIt;
+ if (exactSet.end() != exactIt) {
+ verify(intervalSet.end() == intervalIt);
+ do {
+ oilOut->intervals.push_back(IndexBoundsBuilder::makePointInterval(*exactIt));
+ exactIt++;
+ } while (exactSet.end() != exactIt);
+ } else if (intervalSet.end() != intervalIt) {
+ verify(exactSet.end() == exactIt);
+ do {
const std::string& ival = *intervalIt;
- if (exact < ival) {
- // add exact
- oilOut->intervals.push_back(IndexBoundsBuilder::makePointInterval(exact));
- exactIt++;
- }
- else {
- std::string end = ival;
- end[end.size() - 1]++;
- oilOut->intervals.push_back(
- IndexBoundsBuilder::makeRangeInterval(ival, end, true, false));
- intervalIt++;
- }
- }
-
- if (exactSet.end() != exactIt) {
- verify(intervalSet.end() == intervalIt);
- do {
- oilOut->intervals.push_back(IndexBoundsBuilder::makePointInterval(*exactIt));
- exactIt++;
- } while (exactSet.end() != exactIt);
- }
- else if (intervalSet.end() != intervalIt) {
- verify(exactSet.end() == exactIt);
- do {
- const std::string& ival = *intervalIt;
- std::string end = ival;
- end[end.size() - 1]++;
- oilOut->intervals.push_back(
- IndexBoundsBuilder::makeRangeInterval(ival, end, true, false));
- intervalIt++;
- } while (intervalSet.end() != intervalIt);
- }
+ std::string end = ival;
+ end[end.size() - 1]++;
+ oilOut->intervals.push_back(
+ IndexBoundsBuilder::makeRangeInterval(ival, end, true, false));
+ intervalIt++;
+ } while (intervalSet.end() != intervalIt);
+ }
- // Make sure that our intervals don't overlap each other and are ordered correctly.
- // This perhaps should only be done in debug mode.
- if (!oilOut->isValidFor(1)) {
- cout << "check your assumptions! OIL = " << oilOut->toString() << std::endl;
- verify(0);
- }
+ // Make sure that our intervals don't overlap each other and are ordered correctly.
+ // This perhaps should only be done in debug mode.
+ if (!oilOut->isValidFor(1)) {
+ cout << "check your assumptions! OIL = " << oilOut->toString() << std::endl;
+ verify(0);
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/expression_index.h b/src/mongo/db/query/expression_index.h
index b50c2037e21..910433924ff 100644
--- a/src/mongo/db/query/expression_index.h
+++ b/src/mongo/db/query/expression_index.h
@@ -32,29 +32,28 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/geo/shapes.h"
-#include "mongo/db/query/index_bounds_builder.h" // For OrderedIntervalList
+#include "mongo/db/query/index_bounds_builder.h" // For OrderedIntervalList
namespace mongo {
- /**
- * Functions that compute expression index mappings.
- *
- * TODO: I think we could structure this more generally with respect to planning.
- */
- class ExpressionMapping {
- public:
-
- static BSONObj hash(const BSONElement& value);
-
- static void cover2d(const R2Region& region,
- const BSONObj& indexInfoObj,
- int maxCoveringCells,
- OrderedIntervalList* oil);
-
- // TODO: what should we really pass in for indexInfoObj?
- static void cover2dsphere(const S2Region& region,
- const BSONObj& indexInfoObj,
- OrderedIntervalList* oilOut);
- };
+/**
+ * Functions that compute expression index mappings.
+ *
+ * TODO: I think we could structure this more generally with respect to planning.
+ */
+class ExpressionMapping {
+public:
+ static BSONObj hash(const BSONElement& value);
+
+ static void cover2d(const R2Region& region,
+ const BSONObj& indexInfoObj,
+ int maxCoveringCells,
+ OrderedIntervalList* oil);
+
+ // TODO: what should we really pass in for indexInfoObj?
+ static void cover2dsphere(const S2Region& region,
+ const BSONObj& indexInfoObj,
+ OrderedIntervalList* oilOut);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/expression_index_knobs.cpp b/src/mongo/db/query/expression_index_knobs.cpp
index 4dd8dfe69c1..94422c3aec1 100644
--- a/src/mongo/db/query/expression_index_knobs.cpp
+++ b/src/mongo/db/query/expression_index_knobs.cpp
@@ -32,8 +32,8 @@
namespace mongo {
- MONGO_EXPORT_SERVER_PARAMETER(internalGeoPredicateQuery2DMaxCoveringCells, int, 16);
+MONGO_EXPORT_SERVER_PARAMETER(internalGeoPredicateQuery2DMaxCoveringCells, int, 16);
- MONGO_EXPORT_SERVER_PARAMETER(internalGeoNearQuery2DMaxCoveringCells, int, 16);
+MONGO_EXPORT_SERVER_PARAMETER(internalGeoNearQuery2DMaxCoveringCells, int, 16);
} // namespace mongo
diff --git a/src/mongo/db/query/expression_index_knobs.h b/src/mongo/db/query/expression_index_knobs.h
index 6dcfbaf2592..c5cfa8169e0 100644
--- a/src/mongo/db/query/expression_index_knobs.h
+++ b/src/mongo/db/query/expression_index_knobs.h
@@ -30,18 +30,18 @@
namespace mongo {
- //
- // Geo Query knobs
- //
+//
+// Geo Query knobs
+//
- /**
- * The maximum number of cells to use for 2D geo query covering for predicate queries
- */
- extern int internalGeoPredicateQuery2DMaxCoveringCells;
+/**
+ * The maximum number of cells to use for 2D geo query covering for predicate queries
+ */
+extern int internalGeoPredicateQuery2DMaxCoveringCells;
- /**
- * The maximum number of cells to use for 2D geo query covering for predicate queries
- */
- extern int internalGeoNearQuery2DMaxCoveringCells;
+/**
+ * The maximum number of cells to use for 2D geo query covering for predicate queries
+ */
+extern int internalGeoNearQuery2DMaxCoveringCells;
} // namespace mongo
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index ffb57194ebf..f170dc8ea04 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -66,698 +66,666 @@ using std::unique_ptr;
namespace mongo {
- // The .h for this in find_constants.h.
- const int32_t MaxBytesToReturnToClientAtOnce = 4 * 1024 * 1024;
-
- // Failpoint for checking whether we've received a getmore.
- MONGO_FP_DECLARE(failReceivedGetmore);
-
- ScopedRecoveryUnitSwapper::ScopedRecoveryUnitSwapper(ClientCursor* cc, OperationContext* txn)
- : _cc(cc),
- _txn(txn),
- _dismissed(false) {
- // Save this for later. We restore it upon destruction.
- _txn->recoveryUnit()->abandonSnapshot();
- _txnPreviousRecoveryUnit.reset(txn->releaseRecoveryUnit());
-
- // Transfer ownership of the RecoveryUnit from the ClientCursor to the OpCtx.
- RecoveryUnit* ccRecoveryUnit = cc->releaseOwnedRecoveryUnit();
- _txnPreviousRecoveryUnitState = txn->setRecoveryUnit(ccRecoveryUnit,
- OperationContext::kNotInUnitOfWork);
+// The .h for this in find_constants.h.
+const int32_t MaxBytesToReturnToClientAtOnce = 4 * 1024 * 1024;
+
+// Failpoint for checking whether we've received a getmore.
+MONGO_FP_DECLARE(failReceivedGetmore);
+
+ScopedRecoveryUnitSwapper::ScopedRecoveryUnitSwapper(ClientCursor* cc, OperationContext* txn)
+ : _cc(cc), _txn(txn), _dismissed(false) {
+ // Save this for later. We restore it upon destruction.
+ _txn->recoveryUnit()->abandonSnapshot();
+ _txnPreviousRecoveryUnit.reset(txn->releaseRecoveryUnit());
+
+ // Transfer ownership of the RecoveryUnit from the ClientCursor to the OpCtx.
+ RecoveryUnit* ccRecoveryUnit = cc->releaseOwnedRecoveryUnit();
+ _txnPreviousRecoveryUnitState =
+ txn->setRecoveryUnit(ccRecoveryUnit, OperationContext::kNotInUnitOfWork);
+}
+
+void ScopedRecoveryUnitSwapper::dismiss() {
+ _dismissed = true;
+}
+
+ScopedRecoveryUnitSwapper::~ScopedRecoveryUnitSwapper() {
+ _txn->recoveryUnit()->abandonSnapshot();
+
+ if (_dismissed) {
+ // Just clean up the recovery unit which we originally got from the ClientCursor.
+ delete _txn->releaseRecoveryUnit();
+ } else {
+ // Swap the RU back into the ClientCursor for subsequent getMores.
+ _cc->setOwnedRecoveryUnit(_txn->releaseRecoveryUnit());
}
- void ScopedRecoveryUnitSwapper::dismiss() {
- _dismissed = true;
- }
-
- ScopedRecoveryUnitSwapper::~ScopedRecoveryUnitSwapper() {
- _txn->recoveryUnit()->abandonSnapshot();
-
- if (_dismissed) {
- // Just clean up the recovery unit which we originally got from the ClientCursor.
- delete _txn->releaseRecoveryUnit();
- }
- else {
- // Swap the RU back into the ClientCursor for subsequent getMores.
- _cc->setOwnedRecoveryUnit(_txn->releaseRecoveryUnit());
- }
+ _txn->setRecoveryUnit(_txnPreviousRecoveryUnit.release(), _txnPreviousRecoveryUnitState);
+}
- _txn->setRecoveryUnit(_txnPreviousRecoveryUnit.release(), _txnPreviousRecoveryUnitState);
+/**
+ * If ntoreturn is zero, we stop generating additional results as soon as we have either 101
+ * documents or at least 1MB of data. On subsequent getmores, there is no limit on the number
+ * of results; we will stop as soon as we have at least 4 MB of data. The idea is that on a
+ * find() where one doesn't use much results, we don't return much, but once getmore kicks in,
+ * we start pushing significant quantities.
+ *
+ * If ntoreturn is non-zero, the we stop building the first batch once we either have ntoreturn
+ * results, or when the result set exceeds 4 MB.
+ */
+bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered) {
+ if (!pq.getBatchSize()) {
+ return (bytesBuffered > 1024 * 1024) || numDocs >= LiteParsedQuery::kDefaultBatchSize;
}
-
- /**
- * If ntoreturn is zero, we stop generating additional results as soon as we have either 101
- * documents or at least 1MB of data. On subsequent getmores, there is no limit on the number
- * of results; we will stop as soon as we have at least 4 MB of data. The idea is that on a
- * find() where one doesn't use much results, we don't return much, but once getmore kicks in,
- * we start pushing significant quantities.
- *
- * If ntoreturn is non-zero, the we stop building the first batch once we either have ntoreturn
- * results, or when the result set exceeds 4 MB.
- */
- bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered) {
- if (!pq.getBatchSize()) {
- return (bytesBuffered > 1024 * 1024) || numDocs >= LiteParsedQuery::kDefaultBatchSize;
- }
- return numDocs >= *pq.getBatchSize() || bytesBuffered > MaxBytesToReturnToClientAtOnce;
+ return numDocs >= *pq.getBatchSize() || bytesBuffered > MaxBytesToReturnToClientAtOnce;
+}
+
+bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered) {
+ return (ntoreturn && numDocs >= ntoreturn) || (bytesBuffered > MaxBytesToReturnToClientAtOnce);
+}
+
+bool isCursorTailable(const ClientCursor* cursor) {
+ return cursor->queryOptions() & QueryOption_CursorTailable;
+}
+
+bool isCursorAwaitData(const ClientCursor* cursor) {
+ return cursor->queryOptions() & QueryOption_AwaitData;
+}
+
+bool shouldSaveCursor(OperationContext* txn,
+ const Collection* collection,
+ PlanExecutor::ExecState finalState,
+ PlanExecutor* exec) {
+ if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) {
+ return false;
}
- bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered) {
- return (ntoreturn && numDocs >= ntoreturn)
- || (bytesBuffered > MaxBytesToReturnToClientAtOnce);
+ const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
+ if (!pq.wantMore() && !pq.isTailable()) {
+ return false;
}
- bool isCursorTailable(const ClientCursor* cursor) {
- return cursor->queryOptions() & QueryOption_CursorTailable;
+ if (!pq.isFromFindCommand() && pq.getBatchSize() && *pq.getBatchSize() == 1) {
+ return false;
}
- bool isCursorAwaitData(const ClientCursor* cursor) {
- return cursor->queryOptions() & QueryOption_AwaitData;
+ // We keep a tailable cursor around unless the collection we're tailing has no
+ // records.
+ //
+ // SERVER-13955: we should be able to create a tailable cursor that waits on
+ // an empty collection. Right now we do not keep a cursor if the collection
+ // has zero records.
+ if (pq.isTailable()) {
+ return collection && collection->numRecords(txn) != 0U;
}
- bool shouldSaveCursor(OperationContext* txn,
- const Collection* collection,
- PlanExecutor::ExecState finalState,
- PlanExecutor* exec) {
- if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) {
- return false;
- }
-
- const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
- if (!pq.wantMore() && !pq.isTailable()) {
- return false;
- }
-
- if (!pq.isFromFindCommand() && pq.getBatchSize() && *pq.getBatchSize() == 1) {
- return false;
- }
+ return !exec->isEOF();
+}
- // We keep a tailable cursor around unless the collection we're tailing has no
- // records.
- //
- // SERVER-13955: we should be able to create a tailable cursor that waits on
- // an empty collection. Right now we do not keep a cursor if the collection
- // has zero records.
- if (pq.isTailable()) {
- return collection && collection->numRecords(txn) != 0U;
- }
+bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
+ PlanExecutor* exec,
+ bool isTailable) {
+ if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) {
+ return false;
+ }
- return !exec->isEOF();
+ if (isTailable) {
+ return true;
}
- bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
- PlanExecutor* exec,
- bool isTailable) {
- if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) {
- return false;
- }
+ return !exec->isEOF();
+}
+
+void beginQueryOp(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& queryObj,
+ int ntoreturn,
+ int ntoskip) {
+ auto curop = CurOp::get(txn);
+ curop->debug().ns = nss.ns();
+ curop->debug().query = queryObj;
+ curop->debug().ntoreturn = ntoreturn;
+ curop->debug().ntoskip = ntoskip;
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ curop->setQuery_inlock(queryObj);
+}
+
+void endQueryOp(OperationContext* txn,
+ PlanExecutor* exec,
+ int dbProfilingLevel,
+ int numResults,
+ CursorId cursorId) {
+ auto curop = CurOp::get(txn);
+ invariant(exec);
+
+ // Fill out basic curop query exec properties.
+ curop->debug().nreturned = numResults;
+ curop->debug().cursorid = (0 == cursorId ? -1 : cursorId);
+ curop->debug().cursorExhausted = (0 == cursorId);
+
+ // Fill out curop based on explain summary statistics.
+ PlanSummaryStats summaryStats;
+ Explain::getSummaryStats(exec, &summaryStats);
+ curop->debug().scanAndOrder = summaryStats.hasSortStage;
+ curop->debug().nscanned = summaryStats.totalKeysExamined;
+ curop->debug().nscannedObjects = summaryStats.totalDocsExamined;
+ curop->debug().idhack = summaryStats.isIdhack;
+
+ const logger::LogComponent queryLogComponent = logger::LogComponent::kQuery;
+ const logger::LogSeverity logLevelOne = logger::LogSeverity::Debug(1);
+
+ // Set debug information for consumption by the profiler and slow query log.
+ if (dbProfilingLevel > 0 || curop->elapsedMillis() > serverGlobalParams.slowMS ||
+ logger::globalLogDomain()->shouldLog(queryLogComponent, logLevelOne)) {
+ // Generate plan summary string.
+ curop->debug().planSummary = Explain::getPlanSummary(exec);
+ }
- if (isTailable) {
- return true;
+ // Set debug information for consumption by the profiler only.
+ if (dbProfilingLevel > 0) {
+ // Get BSON stats.
+ unique_ptr<PlanStageStats> execStats(exec->getStats());
+ BSONObjBuilder statsBob;
+ Explain::statsToBSON(*execStats, &statsBob);
+ curop->debug().execStats.set(statsBob.obj());
+
+ // Replace exec stats with plan summary if stats cannot fit into CachedBSONObj.
+ if (curop->debug().execStats.tooBig() && !curop->debug().planSummary.empty()) {
+ BSONObjBuilder bob;
+ bob.append("summary", curop->debug().planSummary.toString());
+ curop->debug().execStats.set(bob.done());
}
-
- return !exec->isEOF();
}
+}
- void beginQueryOp(OperationContext* txn,
- const NamespaceString& nss,
- const BSONObj& queryObj,
- int ntoreturn,
- int ntoskip) {
- auto curop = CurOp::get(txn);
- curop->debug().ns = nss.ns();
- curop->debug().query = queryObj;
- curop->debug().ntoreturn = ntoreturn;
- curop->debug().ntoskip = ntoskip;
- stdx::lock_guard<Client> lk(*txn->getClient());
- curop->setQuery_inlock(queryObj);
+/**
+ * Called by db/instance.cpp. This is the getMore entry point.
+ *
+ * pass - when QueryOption_AwaitData is in use, the caller will make repeated calls
+ * when this method returns an empty result, incrementing pass on each call.
+ * Thus, pass == 0 indicates this is the first "attempt" before any 'awaiting'.
+ */
+QueryResult::View getMore(OperationContext* txn,
+ const char* ns,
+ int ntoreturn,
+ long long cursorid,
+ int pass,
+ bool& exhaust,
+ bool* isCursorAuthorized) {
+ CurOp& curop = *CurOp::get(txn);
+
+ // For testing, we may want to fail if we receive a getmore.
+ if (MONGO_FAIL_POINT(failReceivedGetmore)) {
+ invariant(0);
}
- void endQueryOp(OperationContext* txn,
- PlanExecutor* exec,
- int dbProfilingLevel,
- int numResults,
- CursorId cursorId) {
- auto curop = CurOp::get(txn);
- invariant(exec);
-
- // Fill out basic curop query exec properties.
- curop->debug().nreturned = numResults;
- curop->debug().cursorid = (0 == cursorId ? -1 : cursorId);
- curop->debug().cursorExhausted = (0 == cursorId);
-
- // Fill out curop based on explain summary statistics.
- PlanSummaryStats summaryStats;
- Explain::getSummaryStats(exec, &summaryStats);
- curop->debug().scanAndOrder = summaryStats.hasSortStage;
- curop->debug().nscanned = summaryStats.totalKeysExamined;
- curop->debug().nscannedObjects = summaryStats.totalDocsExamined;
- curop->debug().idhack = summaryStats.isIdhack;
-
- const logger::LogComponent queryLogComponent = logger::LogComponent::kQuery;
- const logger::LogSeverity logLevelOne = logger::LogSeverity::Debug(1);
-
- // Set debug information for consumption by the profiler and slow query log.
- if (dbProfilingLevel > 0
- || curop->elapsedMillis() > serverGlobalParams.slowMS
- || logger::globalLogDomain()->shouldLog(queryLogComponent, logLevelOne)) {
- // Generate plan summary string.
- curop->debug().planSummary = Explain::getPlanSummary(exec);
- }
+ exhaust = false;
+
+ const NamespaceString nss(ns);
+
+ // Depending on the type of cursor being operated on, we hold locks for the whole getMore,
+ // or none of the getMore, or part of the getMore. The three cases in detail:
+ //
+ // 1) Normal cursor: we lock with "ctx" and hold it for the whole getMore.
+ // 2) Cursor owned by global cursor manager: we don't lock anything. These cursors don't
+ // own any collection state.
+ // 3) Agg cursor: we lock with "ctx", then release, then relock with "unpinDBLock" and
+ // "unpinCollLock". This is because agg cursors handle locking internally (hence the
+ // release), but the pin and unpin of the cursor must occur under the collection lock.
+ // We don't use our AutoGetCollectionForRead "ctx" to relock, because
+ // AutoGetCollectionForRead checks the sharding version (and we want the relock for the
+ // unpin to succeed even if the sharding version has changed).
+ //
+ // Note that we declare our locks before our ClientCursorPin, in order to ensure that the
+ // pin's destructor is called before the lock destructors (so that the unpin occurs under
+ // the lock).
+ std::unique_ptr<AutoGetCollectionForRead> ctx;
+ std::unique_ptr<Lock::DBLock> unpinDBLock;
+ std::unique_ptr<Lock::CollectionLock> unpinCollLock;
+
+ CursorManager* cursorManager;
+ CursorManager* globalCursorManager = CursorManager::getGlobalCursorManager();
+ if (globalCursorManager->ownsCursorId(cursorid)) {
+ cursorManager = globalCursorManager;
+ } else {
+ ctx.reset(new AutoGetCollectionForRead(txn, nss));
+ Collection* collection = ctx->getCollection();
+ uassert(17356, "collection dropped between getMore calls", collection);
+ cursorManager = collection->getCursorManager();
+ }
- // Set debug information for consumption by the profiler only.
- if (dbProfilingLevel > 0) {
- // Get BSON stats.
- unique_ptr<PlanStageStats> execStats(exec->getStats());
- BSONObjBuilder statsBob;
- Explain::statsToBSON(*execStats, &statsBob);
- curop->debug().execStats.set(statsBob.obj());
-
- // Replace exec stats with plan summary if stats cannot fit into CachedBSONObj.
- if (curop->debug().execStats.tooBig() && !curop->debug().planSummary.empty()) {
- BSONObjBuilder bob;
- bob.append("summary", curop->debug().planSummary.toString());
- curop->debug().execStats.set(bob.done());
+ LOG(5) << "Running getMore, cursorid: " << cursorid << endl;
+
+ // This checks to make sure the operation is allowed on a replicated node. Since we are not
+ // passing in a query object (necessary to check SlaveOK query option), the only state where
+ // reads are allowed is PRIMARY (or master in master/slave). This function uasserts if
+ // reads are not okay.
+ Status status = repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(txn, nss, true);
+ uassertStatusOK(status);
+
+ // A pin performs a CC lookup and if there is a CC, increments the CC's pin value so it
+ // doesn't time out. Also informs ClientCursor that there is somebody actively holding the
+ // CC, so don't delete it.
+ ClientCursorPin ccPin(cursorManager, cursorid);
+ ClientCursor* cc = ccPin.c();
+
+ // If we're not being called from DBDirectClient we want to associate the RecoveryUnit
+ // used to create the execution machinery inside the cursor with our OperationContext.
+ // If we throw or otherwise exit this method in a disorderly fashion, we must ensure
+ // that further calls to getMore won't fail, and that the provided OperationContext
+ // has a valid RecoveryUnit. As such, we use RAII to accomplish this.
+ //
+ // This must be destroyed before the ClientCursor is destroyed.
+ std::unique_ptr<ScopedRecoveryUnitSwapper> ruSwapper;
+
+ // These are set in the QueryResult msg we return.
+ int resultFlags = ResultFlag_AwaitCapable;
+
+ int numResults = 0;
+ int startingResult = 0;
+
+ const int InitialBufSize = 512 + sizeof(QueryResult::Value) + MaxBytesToReturnToClientAtOnce;
+
+ BufBuilder bb(InitialBufSize);
+ bb.skip(sizeof(QueryResult::Value));
+
+ if (NULL == cc) {
+ cursorid = 0;
+ resultFlags = ResultFlag_CursorNotFound;
+ } else {
+ // Check for spoofing of the ns such that it does not match the one originally
+ // there for the cursor.
+ uassert(ErrorCodes::Unauthorized,
+ str::stream() << "Requested getMore on namespace " << ns << ", but cursor "
+ << cursorid << " belongs to namespace " << cc->ns(),
+ ns == cc->ns());
+ *isCursorAuthorized = true;
+
+ // Restore the RecoveryUnit if we need to.
+ if (txn->getClient()->isInDirectClient()) {
+ if (cc->hasRecoveryUnit())
+ invariant(txn->recoveryUnit() == cc->getUnownedRecoveryUnit());
+ } else {
+ if (!cc->hasRecoveryUnit()) {
+ // Start using a new RecoveryUnit
+ cc->setOwnedRecoveryUnit(
+ getGlobalServiceContext()->getGlobalStorageEngine()->newRecoveryUnit());
}
+ // Swap RecoveryUnit(s) between the ClientCursor and OperationContext.
+ ruSwapper.reset(new ScopedRecoveryUnitSwapper(cc, txn));
}
- }
- /**
- * Called by db/instance.cpp. This is the getMore entry point.
- *
- * pass - when QueryOption_AwaitData is in use, the caller will make repeated calls
- * when this method returns an empty result, incrementing pass on each call.
- * Thus, pass == 0 indicates this is the first "attempt" before any 'awaiting'.
- */
- QueryResult::View getMore(OperationContext* txn,
- const char* ns,
- int ntoreturn,
- long long cursorid,
- int pass,
- bool& exhaust,
- bool* isCursorAuthorized) {
-
- CurOp& curop = *CurOp::get(txn);
-
- // For testing, we may want to fail if we receive a getmore.
- if (MONGO_FAIL_POINT(failReceivedGetmore)) {
- invariant(0);
+ // Reset timeout timer on the cursor since the cursor is still in use.
+ cc->setIdleTime(0);
+
+ // If the operation that spawned this cursor had a time limit set, apply leftover
+ // time to this getmore.
+ curop.setMaxTimeMicros(cc->getLeftoverMaxTimeMicros());
+ txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+
+ // Ensure that the original query or command object is available in the slow query log,
+ // profiler, and currentOp.
+ curop.debug().query = cc->getQuery();
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ curop.setQuery_inlock(cc->getQuery());
}
- exhaust = false;
-
- const NamespaceString nss(ns);
-
- // Depending on the type of cursor being operated on, we hold locks for the whole getMore,
- // or none of the getMore, or part of the getMore. The three cases in detail:
- //
- // 1) Normal cursor: we lock with "ctx" and hold it for the whole getMore.
- // 2) Cursor owned by global cursor manager: we don't lock anything. These cursors don't
- // own any collection state.
- // 3) Agg cursor: we lock with "ctx", then release, then relock with "unpinDBLock" and
- // "unpinCollLock". This is because agg cursors handle locking internally (hence the
- // release), but the pin and unpin of the cursor must occur under the collection lock.
- // We don't use our AutoGetCollectionForRead "ctx" to relock, because
- // AutoGetCollectionForRead checks the sharding version (and we want the relock for the
- // unpin to succeed even if the sharding version has changed).
- //
- // Note that we declare our locks before our ClientCursorPin, in order to ensure that the
- // pin's destructor is called before the lock destructors (so that the unpin occurs under
- // the lock).
- std::unique_ptr<AutoGetCollectionForRead> ctx;
- std::unique_ptr<Lock::DBLock> unpinDBLock;
- std::unique_ptr<Lock::CollectionLock> unpinCollLock;
-
- CursorManager* cursorManager;
- CursorManager* globalCursorManager = CursorManager::getGlobalCursorManager();
- if (globalCursorManager->ownsCursorId(cursorid)) {
- cursorManager = globalCursorManager;
+ if (0 == pass) {
+ cc->updateSlaveLocation(txn);
}
- else {
- ctx.reset(new AutoGetCollectionForRead(txn, nss));
- Collection* collection = ctx->getCollection();
- uassert( 17356, "collection dropped between getMore calls", collection );
- cursorManager = collection->getCursorManager();
+
+ if (cc->isAggCursor()) {
+ // Agg cursors handle their own locking internally.
+ ctx.reset(); // unlocks
}
- LOG(5) << "Running getMore, cursorid: " << cursorid << endl;
-
- // This checks to make sure the operation is allowed on a replicated node. Since we are not
- // passing in a query object (necessary to check SlaveOK query option), the only state where
- // reads are allowed is PRIMARY (or master in master/slave). This function uasserts if
- // reads are not okay.
- Status status = repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(
- txn,
- nss,
- true);
- uassertStatusOK(status);
-
- // A pin performs a CC lookup and if there is a CC, increments the CC's pin value so it
- // doesn't time out. Also informs ClientCursor that there is somebody actively holding the
- // CC, so don't delete it.
- ClientCursorPin ccPin(cursorManager, cursorid);
- ClientCursor* cc = ccPin.c();
-
- // If we're not being called from DBDirectClient we want to associate the RecoveryUnit
- // used to create the execution machinery inside the cursor with our OperationContext.
- // If we throw or otherwise exit this method in a disorderly fashion, we must ensure
- // that further calls to getMore won't fail, and that the provided OperationContext
- // has a valid RecoveryUnit. As such, we use RAII to accomplish this.
- //
- // This must be destroyed before the ClientCursor is destroyed.
- std::unique_ptr<ScopedRecoveryUnitSwapper> ruSwapper;
-
- // These are set in the QueryResult msg we return.
- int resultFlags = ResultFlag_AwaitCapable;
-
- int numResults = 0;
- int startingResult = 0;
-
- const int InitialBufSize =
- 512 + sizeof(QueryResult::Value) + MaxBytesToReturnToClientAtOnce;
-
- BufBuilder bb(InitialBufSize);
- bb.skip(sizeof(QueryResult::Value));
+ // If we're replaying the oplog, we save the last time that we read.
+ Timestamp slaveReadTill;
- if (NULL == cc) {
- cursorid = 0;
- resultFlags = ResultFlag_CursorNotFound;
- }
- else {
- // Check for spoofing of the ns such that it does not match the one originally
- // there for the cursor.
- uassert(ErrorCodes::Unauthorized,
- str::stream() << "Requested getMore on namespace " << ns << ", but cursor "
- << cursorid << " belongs to namespace " << cc->ns(),
- ns == cc->ns());
- *isCursorAuthorized = true;
-
- // Restore the RecoveryUnit if we need to.
- if (txn->getClient()->isInDirectClient()) {
- if (cc->hasRecoveryUnit())
- invariant(txn->recoveryUnit() == cc->getUnownedRecoveryUnit());
- }
- else {
- if (!cc->hasRecoveryUnit()) {
- // Start using a new RecoveryUnit
- cc->setOwnedRecoveryUnit(
- getGlobalServiceContext()->getGlobalStorageEngine()->newRecoveryUnit());
+ // What number result are we starting at? Used to fill out the reply.
+ startingResult = cc->pos();
- }
- // Swap RecoveryUnit(s) between the ClientCursor and OperationContext.
- ruSwapper.reset(new ScopedRecoveryUnitSwapper(cc, txn));
- }
+ // What gives us results.
+ PlanExecutor* exec = cc->getExecutor();
+ const int queryOptions = cc->queryOptions();
- // Reset timeout timer on the cursor since the cursor is still in use.
- cc->setIdleTime(0);
+ // Get results out of the executor.
+ exec->restoreState(txn);
- // If the operation that spawned this cursor had a time limit set, apply leftover
- // time to this getmore.
- curop.setMaxTimeMicros(cc->getLeftoverMaxTimeMicros());
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ // Add result to output buffer.
+ bb.appendBuf((void*)obj.objdata(), obj.objsize());
- // Ensure that the original query or command object is available in the slow query log,
- // profiler, and currentOp.
- curop.debug().query = cc->getQuery();
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- curop.setQuery_inlock(cc->getQuery());
- }
+ // Count the result.
+ ++numResults;
- if (0 == pass) {
- cc->updateSlaveLocation(txn);
+ // Possibly note slave's position in the oplog.
+ if (queryOptions & QueryOption_OplogReplay) {
+ BSONElement e = obj["ts"];
+ if (Date == e.type() || bsonTimestamp == e.type()) {
+ slaveReadTill = e.timestamp();
+ }
}
- if (cc->isAggCursor()) {
- // Agg cursors handle their own locking internally.
- ctx.reset(); // unlocks
+ if (enoughForGetMore(ntoreturn, numResults, bb.len())) {
+ break;
}
+ }
- // If we're replaying the oplog, we save the last time that we read.
- Timestamp slaveReadTill;
-
- // What number result are we starting at? Used to fill out the reply.
- startingResult = cc->pos();
-
- // What gives us results.
- PlanExecutor* exec = cc->getExecutor();
- const int queryOptions = cc->queryOptions();
+ if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
+ // Propagate this error to caller.
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "getMore executor error, stats: " << Explain::statsToBSON(*stats);
+ uasserted(17406, "getMore executor error: " + WorkingSetCommon::toStatusString(obj));
+ }
- // Get results out of the executor.
- exec->restoreState(txn);
+ const bool shouldSaveCursor = shouldSaveCursorGetMore(state, exec, isCursorTailable(cc));
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- // Add result to output buffer.
- bb.appendBuf((void*)obj.objdata(), obj.objsize());
+ // In order to deregister a cursor, we need to be holding the DB + collection lock and
+ // if the cursor is aggregation, we release these locks.
+ if (cc->isAggCursor()) {
+ invariant(NULL == ctx.get());
+ unpinDBLock.reset(new Lock::DBLock(txn->lockState(), nss.db(), MODE_IS));
+ unpinCollLock.reset(new Lock::CollectionLock(txn->lockState(), nss.ns(), MODE_IS));
+ }
- // Count the result.
- ++numResults;
+ // Our two possible ClientCursorPin cleanup paths are:
+ // 1) If the cursor is not going to be saved, we call deleteUnderlying() on the pin.
+ // 2) If the cursor is going to be saved, we simply let the pin go out of scope. In
+ // this case, the pin's destructor will be invoked, which will call release() on the
+ // pin. Because our ClientCursorPin is declared after our lock is declared, this
+ // will happen under the lock.
+ if (!shouldSaveCursor) {
+ ruSwapper.reset();
+ ccPin.deleteUnderlying();
+
+ // cc is now invalid, as is the executor
+ cursorid = 0;
+ cc = NULL;
+ curop.debug().cursorExhausted = true;
+
+ LOG(5) << "getMore NOT saving client cursor, ended with state "
+ << PlanExecutor::statestr(state) << endl;
+ } else {
+ // Continue caching the ClientCursor.
+ cc->incPos(numResults);
+ exec->saveState();
+ LOG(5) << "getMore saving client cursor ended with state "
+ << PlanExecutor::statestr(state) << endl;
- // Possibly note slave's position in the oplog.
- if (queryOptions & QueryOption_OplogReplay) {
- BSONElement e = obj["ts"];
- if (Date == e.type() || bsonTimestamp == e.type()) {
- slaveReadTill = e.timestamp();
- }
+ if (PlanExecutor::IS_EOF == state && (queryOptions & QueryOption_CursorTailable)) {
+ if (!txn->getClient()->isInDirectClient()) {
+ // Don't stash the RU. Get a new one on the next getMore.
+ ruSwapper->dismiss();
}
- if (enoughForGetMore(ntoreturn, numResults, bb.len())) {
- break;
+ if ((queryOptions & QueryOption_AwaitData) && (numResults == 0) && (pass < 1000)) {
+ // Bubble up to the AwaitData handling code in receivedGetMore which will
+ // try again.
+ return NULL;
}
}
- if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- // Propagate this error to caller.
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "getMore executor error, stats: "
- << Explain::statsToBSON(*stats);
- uasserted(17406, "getMore executor error: " +
- WorkingSetCommon::toStatusString(obj));
+ // Possibly note slave's position in the oplog.
+ if ((queryOptions & QueryOption_OplogReplay) && !slaveReadTill.isNull()) {
+ cc->slaveReadTill(slaveReadTill);
}
- const bool shouldSaveCursor =
- shouldSaveCursorGetMore(state, exec, isCursorTailable(cc));
+ exhaust = (queryOptions & QueryOption_Exhaust);
- // In order to deregister a cursor, we need to be holding the DB + collection lock and
- // if the cursor is aggregation, we release these locks.
- if (cc->isAggCursor()) {
- invariant(NULL == ctx.get());
- unpinDBLock.reset(new Lock::DBLock(txn->lockState(), nss.db(), MODE_IS));
- unpinCollLock.reset(new Lock::CollectionLock(txn->lockState(), nss.ns(), MODE_IS));
- }
+ // If the getmore had a time limit, remaining time is "rolled over" back to the
+ // cursor (for use by future getmore ops).
+ cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
+ }
+ }
- // Our two possible ClientCursorPin cleanup paths are:
- // 1) If the cursor is not going to be saved, we call deleteUnderlying() on the pin.
- // 2) If the cursor is going to be saved, we simply let the pin go out of scope. In
- // this case, the pin's destructor will be invoked, which will call release() on the
- // pin. Because our ClientCursorPin is declared after our lock is declared, this
- // will happen under the lock.
- if (!shouldSaveCursor) {
- ruSwapper.reset();
- ccPin.deleteUnderlying();
-
- // cc is now invalid, as is the executor
- cursorid = 0;
- cc = NULL;
- curop.debug().cursorExhausted = true;
-
- LOG(5) << "getMore NOT saving client cursor, ended with state "
- << PlanExecutor::statestr(state)
- << endl;
- }
- else {
- // Continue caching the ClientCursor.
- cc->incPos(numResults);
- exec->saveState();
- LOG(5) << "getMore saving client cursor ended with state "
- << PlanExecutor::statestr(state)
- << endl;
-
- if (PlanExecutor::IS_EOF == state && (queryOptions & QueryOption_CursorTailable)) {
- if (!txn->getClient()->isInDirectClient()) {
- // Don't stash the RU. Get a new one on the next getMore.
- ruSwapper->dismiss();
- }
-
- if ((queryOptions & QueryOption_AwaitData)
- && (numResults == 0)
- && (pass < 1000)) {
- // Bubble up to the AwaitData handling code in receivedGetMore which will
- // try again.
- return NULL;
- }
- }
+ QueryResult::View qr = bb.buf();
+ qr.msgdata().setLen(bb.len());
+ qr.msgdata().setOperation(opReply);
+ qr.setResultFlags(resultFlags);
+ qr.setCursorId(cursorid);
+ qr.setStartingFrom(startingResult);
+ qr.setNReturned(numResults);
+ bb.decouple();
+ LOG(5) << "getMore returned " << numResults << " results\n";
+ return qr;
+}
+
+std::string runQuery(OperationContext* txn,
+ QueryMessage& q,
+ const NamespaceString& nss,
+ Message& result) {
+ CurOp& curop = *CurOp::get(txn);
+ // Validate the namespace.
+ uassert(16256, str::stream() << "Invalid ns [" << nss.ns() << "]", nss.isValid());
+ invariant(!nss.isCommand());
+
+ // Set curop information.
+ beginQueryOp(txn, nss, q.query, q.ntoreturn, q.ntoskip);
+
+ // Parse the qm into a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq;
+ {
+ CanonicalQuery* cqRaw;
+ Status canonStatus =
+ CanonicalQuery::canonicalize(q, &cqRaw, WhereCallbackReal(txn, nss.db()));
+ if (!canonStatus.isOK()) {
+ uasserted(17287,
+ str::stream() << "Can't canonicalize query: " << canonStatus.toString());
+ }
+ cq.reset(cqRaw);
+ }
+ invariant(cq.get());
+
+ LOG(5) << "Running query:\n" << cq->toString();
+ LOG(2) << "Running query: " << cq->toStringShort();
+
+ // Parse, canonicalize, plan, transcribe, and get a plan executor.
+ AutoGetCollectionForRead ctx(txn, nss);
+ Collection* collection = ctx.getCollection();
+
+ const int dbProfilingLevel =
+ ctx.getDb() ? ctx.getDb()->getProfilingLevel() : serverGlobalParams.defaultProfile;
+
+ // We have a parsed query. Time to get the execution plan for it.
+ std::unique_ptr<PlanExecutor> exec;
+ {
+ PlanExecutor* rawExec;
+ Status execStatus =
+ getExecutorFind(txn, collection, nss, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec);
+ uassertStatusOK(execStatus);
+ exec.reset(rawExec);
+ }
+ const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
- // Possibly note slave's position in the oplog.
- if ((queryOptions & QueryOption_OplogReplay) && !slaveReadTill.isNull()) {
- cc->slaveReadTill(slaveReadTill);
- }
+ // If it's actually an explain, do the explain and return rather than falling through
+ // to the normal query execution loop.
+ if (pq.isExplain()) {
+ BufBuilder bb;
+ bb.skip(sizeof(QueryResult::Value));
- exhaust = (queryOptions & QueryOption_Exhaust);
+ BSONObjBuilder explainBob;
+ Explain::explainStages(exec.get(), ExplainCommon::EXEC_ALL_PLANS, &explainBob);
- // If the getmore had a time limit, remaining time is "rolled over" back to the
- // cursor (for use by future getmore ops).
- cc->setLeftoverMaxTimeMicros( curop.getRemainingMaxTimeMicros() );
- }
- }
+ // Add the resulting object to the return buffer.
+ BSONObj explainObj = explainBob.obj();
+ bb.appendBuf((void*)explainObj.objdata(), explainObj.objsize());
+
+ // TODO: Does this get overwritten/do we really need to set this twice?
+ curop.debug().query = q.query;
+ // Set query result fields.
QueryResult::View qr = bb.buf();
+ bb.decouple();
+ qr.setResultFlagsToOk();
qr.msgdata().setLen(bb.len());
+ curop.debug().responseLength = bb.len();
qr.msgdata().setOperation(opReply);
- qr.setResultFlags(resultFlags);
- qr.setCursorId(cursorid);
- qr.setStartingFrom(startingResult);
- qr.setNReturned(numResults);
- bb.decouple();
- LOG(5) << "getMore returned " << numResults << " results\n";
- return qr;
+ qr.setCursorId(0);
+ qr.setStartingFrom(0);
+ qr.setNReturned(1);
+ result.setData(qr.view2ptr(), true);
+ return "";
}
- std::string runQuery(OperationContext* txn,
- QueryMessage& q,
- const NamespaceString& nss,
- Message &result) {
- CurOp& curop = *CurOp::get(txn);
- // Validate the namespace.
- uassert(16256, str::stream() << "Invalid ns [" << nss.ns() << "]", nss.isValid());
- invariant(!nss.isCommand());
-
- // Set curop information.
- beginQueryOp(txn, nss, q.query, q.ntoreturn, q.ntoskip);
-
- // Parse the qm into a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq;
- {
- CanonicalQuery* cqRaw;
- Status canonStatus = CanonicalQuery::canonicalize(q,
- &cqRaw,
- WhereCallbackReal(txn, nss.db()));
- if (!canonStatus.isOK()) {
- uasserted(17287, str::stream() << "Can't canonicalize query: "
- << canonStatus.toString());
- }
- cq.reset(cqRaw);
- }
- invariant(cq.get());
-
- LOG(5) << "Running query:\n" << cq->toString();
- LOG(2) << "Running query: " << cq->toStringShort();
+ // We freak out later if this changes before we're done with the query.
+ const ChunkVersion shardingVersionAtStart = shardingState.getVersion(nss.ns());
- // Parse, canonicalize, plan, transcribe, and get a plan executor.
- AutoGetCollectionForRead ctx(txn, nss);
- Collection* collection = ctx.getCollection();
+ // Handle query option $maxTimeMS (not used with commands).
+ curop.setMaxTimeMicros(static_cast<unsigned long long>(pq.getMaxTimeMS()) * 1000);
+ txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
- const int dbProfilingLevel = ctx.getDb() ? ctx.getDb()->getProfilingLevel() :
- serverGlobalParams.defaultProfile;
+ // uassert if we are not on a primary, and not a secondary with SlaveOk query parameter set.
+ bool slaveOK = pq.isSlaveOk() || pq.hasReadPref();
+ Status serveReadsStatus =
+ repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(txn, nss, slaveOK);
+ uassertStatusOK(serveReadsStatus);
- // We have a parsed query. Time to get the execution plan for it.
- std::unique_ptr<PlanExecutor> exec;
- {
- PlanExecutor* rawExec;
- Status execStatus = getExecutorFind(txn,
- collection,
- nss,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- uassertStatusOK(execStatus);
- exec.reset(rawExec);
- }
- const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
-
- // If it's actually an explain, do the explain and return rather than falling through
- // to the normal query execution loop.
- if (pq.isExplain()) {
- BufBuilder bb;
- bb.skip(sizeof(QueryResult::Value));
-
- BSONObjBuilder explainBob;
- Explain::explainStages(exec.get(), ExplainCommon::EXEC_ALL_PLANS, &explainBob);
-
- // Add the resulting object to the return buffer.
- BSONObj explainObj = explainBob.obj();
- bb.appendBuf((void*)explainObj.objdata(), explainObj.objsize());
-
- // TODO: Does this get overwritten/do we really need to set this twice?
- curop.debug().query = q.query;
-
- // Set query result fields.
- QueryResult::View qr = bb.buf();
- bb.decouple();
- qr.setResultFlagsToOk();
- qr.msgdata().setLen(bb.len());
- curop.debug().responseLength = bb.len();
- qr.msgdata().setOperation(opReply);
- qr.setCursorId(0);
- qr.setStartingFrom(0);
- qr.setNReturned(1);
- result.setData(qr.view2ptr(), true);
- return "";
- }
+ // Run the query.
+ // bb is used to hold query results
+ // this buffer should contain either requested documents per query or
+ // explain information, but not both
+ BufBuilder bb(32768);
+ bb.skip(sizeof(QueryResult::Value));
- // We freak out later if this changes before we're done with the query.
- const ChunkVersion shardingVersionAtStart = shardingState.getVersion(nss.ns());
-
- // Handle query option $maxTimeMS (not used with commands).
- curop.setMaxTimeMicros(static_cast<unsigned long long>(pq.getMaxTimeMS()) * 1000);
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
-
- // uassert if we are not on a primary, and not a secondary with SlaveOk query parameter set.
- bool slaveOK = pq.isSlaveOk() || pq.hasReadPref();
- Status serveReadsStatus = repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(
- txn,
- nss,
- slaveOK);
- uassertStatusOK(serveReadsStatus);
-
- // Run the query.
- // bb is used to hold query results
- // this buffer should contain either requested documents per query or
- // explain information, but not both
- BufBuilder bb(32768);
- bb.skip(sizeof(QueryResult::Value));
+ // How many results have we obtained from the executor?
+ int numResults = 0;
- // How many results have we obtained from the executor?
- int numResults = 0;
+ // If we're replaying the oplog, we save the last time that we read.
+ Timestamp slaveReadTill;
- // If we're replaying the oplog, we save the last time that we read.
- Timestamp slaveReadTill;
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ // uint64_t numMisplacedDocs = 0;
- BSONObj obj;
- PlanExecutor::ExecState state;
- // uint64_t numMisplacedDocs = 0;
-
- // Get summary info about which plan the executor is using.
- curop.debug().planSummary = Explain::getPlanSummary(exec.get());
+ // Get summary info about which plan the executor is using.
+ curop.debug().planSummary = Explain::getPlanSummary(exec.get());
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- // Add result to output buffer.
- bb.appendBuf((void*)obj.objdata(), obj.objsize());
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ // Add result to output buffer.
+ bb.appendBuf((void*)obj.objdata(), obj.objsize());
- // Count the result.
- ++numResults;
+ // Count the result.
+ ++numResults;
- // Possibly note slave's position in the oplog.
- if (pq.isOplogReplay()) {
- BSONElement e = obj["ts"];
- if (Date == e.type() || bsonTimestamp == e.type()) {
- slaveReadTill = e.timestamp();
- }
- }
-
- if (enoughForFirstBatch(pq, numResults, bb.len())) {
- LOG(5) << "Enough for first batch, wantMore=" << pq.wantMore()
- << " batchSize=" << pq.getBatchSize().value_or(0)
- << " numResults=" << numResults
- << endl;
- break;
+ // Possibly note slave's position in the oplog.
+ if (pq.isOplogReplay()) {
+ BSONElement e = obj["ts"];
+ if (Date == e.type() || bsonTimestamp == e.type()) {
+ slaveReadTill = e.timestamp();
}
}
- // If we cache the executor later, we want to deregister it as it receives notifications
- // anyway by virtue of being cached.
- //
- // If we don't cache the executor later, we are deleting it, so it must be deregistered.
- //
- // So, no matter what, deregister the executor.
- exec->deregisterExec();
-
- // Caller expects exceptions thrown in certain cases.
- if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "Plan executor error during find: " << PlanExecutor::statestr(state)
- << ", stats: " << Explain::statsToBSON(*stats);
- uasserted(17144, "Executor error: " + WorkingSetCommon::toStatusString(obj));
+ if (enoughForFirstBatch(pq, numResults, bb.len())) {
+ LOG(5) << "Enough for first batch, wantMore=" << pq.wantMore()
+ << " batchSize=" << pq.getBatchSize().value_or(0) << " numResults=" << numResults
+ << endl;
+ break;
}
+ }
- // TODO: Currently, chunk ranges are kept around until all ClientCursors created while the
- // chunk belonged on this node are gone. Separating chunk lifetime management from
- // ClientCursor should allow this check to go away.
- if (!shardingState.getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
- // if the version changed during the query we might be missing some data and its safe to
- // send this as mongos can resend at this point
- throw SendStaleConfigException(nss.ns(), "version changed during initial query",
- shardingVersionAtStart,
- shardingState.getVersion(nss.ns()));
- }
-
- // Fill out curop based on query results. If we have a cursorid, we will fill out curop with
- // this cursorid later.
- long long ccId = 0;
-
- if (shouldSaveCursor(txn, collection, state, exec.get())) {
- // We won't use the executor until it's getMore'd.
- exec->saveState();
-
- // Allocate a new ClientCursor. We don't have to worry about leaking it as it's
- // inserted into a global map by its ctor.
- ClientCursor* cc = new ClientCursor(collection->getCursorManager(),
- exec.release(),
- nss.ns(),
- pq.getOptions(),
- pq.getFilter());
- ccId = cc->cursorid();
-
- if (txn->getClient()->isInDirectClient()) {
- cc->setUnownedRecoveryUnit(txn->recoveryUnit());
- }
- else if (state == PlanExecutor::IS_EOF && pq.isTailable()) {
- // Don't stash the RU for tailable cursors at EOF, let them get a new RU on their
- // next getMore.
- }
- else {
- // We stash away the RecoveryUnit in the ClientCursor. It's used for subsequent
- // getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
- txn->recoveryUnit()->abandonSnapshot();
- cc->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- invariant(txn->setRecoveryUnit(storageEngine->newRecoveryUnit(),
- OperationContext::kNotInUnitOfWork)
- == OperationContext::kNotInUnitOfWork);
- }
-
- LOG(5) << "caching executor with cursorid " << ccId
- << " after returning " << numResults << " results" << endl;
-
- // TODO document
- if (pq.isOplogReplay() && !slaveReadTill.isNull()) {
- cc->slaveReadTill(slaveReadTill);
- }
+ // If we cache the executor later, we want to deregister it as it receives notifications
+ // anyway by virtue of being cached.
+ //
+ // If we don't cache the executor later, we are deleting it, so it must be deregistered.
+ //
+ // So, no matter what, deregister the executor.
+ exec->deregisterExec();
+
+ // Caller expects exceptions thrown in certain cases.
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "Plan executor error during find: " << PlanExecutor::statestr(state)
+ << ", stats: " << Explain::statsToBSON(*stats);
+ uasserted(17144, "Executor error: " + WorkingSetCommon::toStatusString(obj));
+ }
- // TODO document
- if (pq.isExhaust()) {
- curop.debug().exhaust = true;
- }
+ // TODO: Currently, chunk ranges are kept around until all ClientCursors created while the
+ // chunk belonged on this node are gone. Separating chunk lifetime management from
+ // ClientCursor should allow this check to go away.
+ if (!shardingState.getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
+ // if the version changed during the query we might be missing some data and its safe to
+ // send this as mongos can resend at this point
+ throw SendStaleConfigException(nss.ns(),
+ "version changed during initial query",
+ shardingVersionAtStart,
+ shardingState.getVersion(nss.ns()));
+ }
- cc->setPos(numResults);
+ // Fill out curop based on query results. If we have a cursorid, we will fill out curop with
+ // this cursorid later.
+ long long ccId = 0;
+
+ if (shouldSaveCursor(txn, collection, state, exec.get())) {
+ // We won't use the executor until it's getMore'd.
+ exec->saveState();
+
+ // Allocate a new ClientCursor. We don't have to worry about leaking it as it's
+ // inserted into a global map by its ctor.
+ ClientCursor* cc = new ClientCursor(collection->getCursorManager(),
+ exec.release(),
+ nss.ns(),
+ pq.getOptions(),
+ pq.getFilter());
+ ccId = cc->cursorid();
+
+ if (txn->getClient()->isInDirectClient()) {
+ cc->setUnownedRecoveryUnit(txn->recoveryUnit());
+ } else if (state == PlanExecutor::IS_EOF && pq.isTailable()) {
+ // Don't stash the RU for tailable cursors at EOF, let them get a new RU on their
+ // next getMore.
+ } else {
+ // We stash away the RecoveryUnit in the ClientCursor. It's used for subsequent
+ // getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
+ txn->recoveryUnit()->abandonSnapshot();
+ cc->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ invariant(txn->setRecoveryUnit(storageEngine->newRecoveryUnit(),
+ OperationContext::kNotInUnitOfWork) ==
+ OperationContext::kNotInUnitOfWork);
+ }
- // If the query had a time limit, remaining time is "rolled over" to the cursor (for
- // use by future getmore ops).
- cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
+ LOG(5) << "caching executor with cursorid " << ccId << " after returning " << numResults
+ << " results" << endl;
- endQueryOp(txn, cc->getExecutor(), dbProfilingLevel, numResults, ccId);
+ // TODO document
+ if (pq.isOplogReplay() && !slaveReadTill.isNull()) {
+ cc->slaveReadTill(slaveReadTill);
}
- else {
- LOG(5) << "Not caching executor but returning " << numResults << " results.\n";
- endQueryOp(txn, exec.get(), dbProfilingLevel, numResults, ccId);
+
+ // TODO document
+ if (pq.isExhaust()) {
+ curop.debug().exhaust = true;
}
- // Add the results from the query into the output buffer.
- result.appendData(bb.buf(), bb.len());
- bb.decouple();
+ cc->setPos(numResults);
- // Fill out the output buffer's header.
- QueryResult::View qr = result.header().view2ptr();
- qr.setCursorId(ccId);
- qr.setResultFlagsToOk();
- qr.msgdata().setOperation(opReply);
- qr.setStartingFrom(0);
- qr.setNReturned(numResults);
+ // If the query had a time limit, remaining time is "rolled over" to the cursor (for
+ // use by future getmore ops).
+ cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
- // curop.debug().exhaust is set above.
- return curop.debug().exhaust ? nss.ns() : "";
+ endQueryOp(txn, cc->getExecutor(), dbProfilingLevel, numResults, ccId);
+ } else {
+ LOG(5) << "Not caching executor but returning " << numResults << " results.\n";
+ endQueryOp(txn, exec.get(), dbProfilingLevel, numResults, ccId);
}
+ // Add the results from the query into the output buffer.
+ result.appendData(bb.buf(), bb.len());
+ bb.decouple();
+
+ // Fill out the output buffer's header.
+ QueryResult::View qr = result.header().view2ptr();
+ qr.setCursorId(ccId);
+ qr.setResultFlagsToOk();
+ qr.msgdata().setOperation(opReply);
+ qr.setStartingFrom(0);
+ qr.setNReturned(numResults);
+
+ // curop.debug().exhaust is set above.
+ return curop.debug().exhaust ? nss.ns() : "";
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index 81803b66364..330afc76b90 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -38,132 +38,132 @@
namespace mongo {
- class NamespaceString;
- class OperationContext;
+class NamespaceString;
+class OperationContext;
- class ScopedRecoveryUnitSwapper {
- public:
- ScopedRecoveryUnitSwapper(ClientCursor* cc, OperationContext* txn);
+class ScopedRecoveryUnitSwapper {
+public:
+ ScopedRecoveryUnitSwapper(ClientCursor* cc, OperationContext* txn);
- ~ScopedRecoveryUnitSwapper();
+ ~ScopedRecoveryUnitSwapper();
- /**
- * Dismissing the RU swapper causes it to simply free the recovery unit rather than swapping
- * it back into the ClientCursor.
- */
- void dismiss();
+ /**
+ * Dismissing the RU swapper causes it to simply free the recovery unit rather than swapping
+ * it back into the ClientCursor.
+ */
+ void dismiss();
- private:
- ClientCursor* _cc;
- OperationContext* _txn;
- bool _dismissed;
+private:
+ ClientCursor* _cc;
+ OperationContext* _txn;
+ bool _dismissed;
- std::unique_ptr<RecoveryUnit> _txnPreviousRecoveryUnit;
- OperationContext::RecoveryUnitState _txnPreviousRecoveryUnitState;
- };
+ std::unique_ptr<RecoveryUnit> _txnPreviousRecoveryUnit;
+ OperationContext::RecoveryUnitState _txnPreviousRecoveryUnitState;
+};
- /**
- * Returns true if enough results have been prepared to stop adding more to the first batch.
- *
- * Should be called *after* adding to the result set rather than before.
- */
- bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered);
+/**
+ * Returns true if enough results have been prepared to stop adding more to the first batch.
+ *
+ * Should be called *after* adding to the result set rather than before.
+ */
+bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered);
- /**
- * Returns true if enough results have been prepared to stop adding more to a getMore batch.
- *
- * Should be called *after* adding to the result set rather than before.
- */
- bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered);
+/**
+ * Returns true if enough results have been prepared to stop adding more to a getMore batch.
+ *
+ * Should be called *after* adding to the result set rather than before.
+ */
+bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered);
- /**
- * Whether or not the ClientCursor* is tailable.
- */
- bool isCursorTailable(const ClientCursor* cursor);
+/**
+ * Whether or not the ClientCursor* is tailable.
+ */
+bool isCursorTailable(const ClientCursor* cursor);
- /**
- * Whether or not the ClientCursor* has the awaitData flag set.
- */
- bool isCursorAwaitData(const ClientCursor* cursor);
+/**
+ * Whether or not the ClientCursor* has the awaitData flag set.
+ */
+bool isCursorAwaitData(const ClientCursor* cursor);
- /**
- * Returns true if we should keep a cursor around because we're expecting to return more query
- * results.
- *
- * If false, the caller should close the cursor and indicate this to the client by sending back
- * a cursor ID of 0.
- */
- bool shouldSaveCursor(OperationContext* txn,
- const Collection* collection,
- PlanExecutor::ExecState finalState,
- PlanExecutor* exec);
+/**
+ * Returns true if we should keep a cursor around because we're expecting to return more query
+ * results.
+ *
+ * If false, the caller should close the cursor and indicate this to the client by sending back
+ * a cursor ID of 0.
+ */
+bool shouldSaveCursor(OperationContext* txn,
+ const Collection* collection,
+ PlanExecutor::ExecState finalState,
+ PlanExecutor* exec);
- /**
- * Similar to shouldSaveCursor(), but used in getMore to determine whether we should keep
- * the cursor around for additional getMores().
- *
- * If false, the caller should close the cursor and indicate this to the client by sending back
- * a cursor ID of 0.
- */
- bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
- PlanExecutor* exec,
- bool isTailable);
+/**
+ * Similar to shouldSaveCursor(), but used in getMore to determine whether we should keep
+ * the cursor around for additional getMores().
+ *
+ * If false, the caller should close the cursor and indicate this to the client by sending back
+ * a cursor ID of 0.
+ */
+bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
+ PlanExecutor* exec,
+ bool isTailable);
- /**
- * Fills out the CurOp for "txn" with information about this query.
- */
- void beginQueryOp(OperationContext* txn,
- const NamespaceString& nss,
- const BSONObj& queryObj,
- int ntoreturn,
- int ntoskip);
+/**
+ * Fills out the CurOp for "txn" with information about this query.
+ */
+void beginQueryOp(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& queryObj,
+ int ntoreturn,
+ int ntoskip);
- /**
- * Fills out CurOp for "txn" with information regarding this query's execution.
- *
- * Uses explain functionality to extract stats from 'exec'.
- *
- * The database profiling level, 'dbProfilingLevel', is used to conditionalize whether or not we
- * do expensive stats gathering.
- */
- void endQueryOp(OperationContext* txn,
- PlanExecutor* exec,
- int dbProfilingLevel,
- int numResults,
- CursorId cursorId);
+/**
+ * Fills out CurOp for "txn" with information regarding this query's execution.
+ *
+ * Uses explain functionality to extract stats from 'exec'.
+ *
+ * The database profiling level, 'dbProfilingLevel', is used to conditionalize whether or not we
+ * do expensive stats gathering.
+ */
+void endQueryOp(OperationContext* txn,
+ PlanExecutor* exec,
+ int dbProfilingLevel,
+ int numResults,
+ CursorId cursorId);
- /**
- * Constructs a PlanExecutor for a query with the oplogReplay option set to true,
- * for the query 'cq' over the collection 'collection'. The PlanExecutor will
- * wrap a singleton OplogStart stage.
- *
- * The oplog start finding hack requires that 'cq' has a $gt or $gte predicate over
- * a field named 'ts'.
- *
- * On success, caller takes ownership of *execOut.
- */
- Status getOplogStartHack(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* cq,
- PlanExecutor** execOut);
+/**
+ * Constructs a PlanExecutor for a query with the oplogReplay option set to true,
+ * for the query 'cq' over the collection 'collection'. The PlanExecutor will
+ * wrap a singleton OplogStart stage.
+ *
+ * The oplog start finding hack requires that 'cq' has a $gt or $gte predicate over
+ * a field named 'ts'.
+ *
+ * On success, caller takes ownership of *execOut.
+ */
+Status getOplogStartHack(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* cq,
+ PlanExecutor** execOut);
- /**
- * Called from the getMore entry point in ops/query.cpp.
- */
- QueryResult::View getMore(OperationContext* txn,
- const char* ns,
- int ntoreturn,
- long long cursorid,
- int pass,
- bool& exhaust,
- bool* isCursorAuthorized);
+/**
+ * Called from the getMore entry point in ops/query.cpp.
+ */
+QueryResult::View getMore(OperationContext* txn,
+ const char* ns,
+ int ntoreturn,
+ long long cursorid,
+ int pass,
+ bool& exhaust,
+ bool* isCursorAuthorized);
- /**
- * Run the query 'q' and place the result in 'result'.
- */
- std::string runQuery(OperationContext* txn,
- QueryMessage& q,
- const NamespaceString& ns,
- Message &result);
+/**
+ * Run the query 'q' and place the result in 'result'.
+ */
+std::string runQuery(OperationContext* txn,
+ QueryMessage& q,
+ const NamespaceString& ns,
+ Message& result);
} // namespace mongo
diff --git a/src/mongo/db/query/find_and_modify_request.cpp b/src/mongo/db/query/find_and_modify_request.cpp
index 1b960198d1a..2db1c61af80 100644
--- a/src/mongo/db/query/find_and_modify_request.cpp
+++ b/src/mongo/db/query/find_and_modify_request.cpp
@@ -37,175 +37,166 @@
namespace mongo {
namespace {
- const char kCmdName[] = "findAndModify";
- const char kQueryField[] = "query";
- const char kSortField[] = "sort";
- const char kRemoveField[] = "remove";
- const char kUpdateField[] = "update";
- const char kNewField[] = "new";
- const char kFieldProjectionField[] = "fields";
- const char kUpsertField[] = "upsert";
- const char kWriteConcernField[] = "writeConcern";
-
-} // unnamed namespace
-
- FindAndModifyRequest::FindAndModifyRequest(NamespaceString fullNs,
- BSONObj query,
- BSONObj updateObj):
- _ns(std::move(fullNs)),
- _query(query.getOwned()),
- _updateObj(updateObj.getOwned()),
- _isRemove(false) {
- }
+const char kCmdName[] = "findAndModify";
+const char kQueryField[] = "query";
+const char kSortField[] = "sort";
+const char kRemoveField[] = "remove";
+const char kUpdateField[] = "update";
+const char kNewField[] = "new";
+const char kFieldProjectionField[] = "fields";
+const char kUpsertField[] = "upsert";
+const char kWriteConcernField[] = "writeConcern";
+
+} // unnamed namespace
+
+FindAndModifyRequest::FindAndModifyRequest(NamespaceString fullNs, BSONObj query, BSONObj updateObj)
+ : _ns(std::move(fullNs)),
+ _query(query.getOwned()),
+ _updateObj(updateObj.getOwned()),
+ _isRemove(false) {}
+
+FindAndModifyRequest FindAndModifyRequest::makeUpdate(NamespaceString fullNs,
+ BSONObj query,
+ BSONObj updateObj) {
+ return FindAndModifyRequest(fullNs, query, updateObj);
+}
- FindAndModifyRequest FindAndModifyRequest::makeUpdate(NamespaceString fullNs,
- BSONObj query,
- BSONObj updateObj) {
- return FindAndModifyRequest(fullNs, query, updateObj);
- }
+FindAndModifyRequest FindAndModifyRequest::makeRemove(NamespaceString fullNs, BSONObj query) {
+ FindAndModifyRequest request(fullNs, query, BSONObj());
+ request._isRemove = true;
+ return request;
+}
- FindAndModifyRequest FindAndModifyRequest::makeRemove(NamespaceString fullNs,
- BSONObj query) {
- FindAndModifyRequest request(fullNs, query, BSONObj());
- request._isRemove = true;
- return request;
- }
+BSONObj FindAndModifyRequest::toBSON() const {
+ BSONObjBuilder builder;
- BSONObj FindAndModifyRequest::toBSON() const {
- BSONObjBuilder builder;
+ builder.append(kCmdName, _ns.coll());
+ builder.append(kQueryField, _query);
- builder.append(kCmdName, _ns.coll());
- builder.append(kQueryField, _query);
+ if (_isRemove) {
+ builder.append(kRemoveField, true);
+ } else {
+ builder.append(kUpdateField, _updateObj);
- if (_isRemove) {
- builder.append(kRemoveField, true);
+ if (_isUpsert) {
+ builder.append(kUpsertField, _isUpsert.get());
}
- else {
- builder.append(kUpdateField, _updateObj);
+ }
- if (_isUpsert) {
- builder.append(kUpsertField, _isUpsert.get());
- }
- }
+ if (_fieldProjection) {
+ builder.append(kFieldProjectionField, _fieldProjection.get());
+ }
- if (_fieldProjection) {
- builder.append(kFieldProjectionField, _fieldProjection.get());
- }
+ if (_sort) {
+ builder.append(kSortField, _sort.get());
+ }
- if (_sort) {
- builder.append(kSortField, _sort.get());
- }
+ if (_shouldReturnNew) {
+ builder.append(kNewField, _shouldReturnNew.get());
+ }
- if (_shouldReturnNew) {
- builder.append(kNewField, _shouldReturnNew.get());
- }
+ if (_writeConcern) {
+ builder.append(kWriteConcernField, _writeConcern->toBSON());
+ }
- if (_writeConcern) {
- builder.append(kWriteConcernField, _writeConcern->toBSON());
- }
+ return builder.obj();
+}
+
+StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceString fullNs,
+ const BSONObj& cmdObj) {
+ BSONObj query = cmdObj.getObjectField(kQueryField);
+ BSONObj fields = cmdObj.getObjectField(kFieldProjectionField);
+ BSONObj updateObj = cmdObj.getObjectField(kUpdateField);
+ BSONObj sort = cmdObj.getObjectField(kSortField);
+ bool shouldReturnNew = cmdObj[kNewField].trueValue();
+ bool isUpsert = cmdObj[kUpsertField].trueValue();
+ bool isRemove = cmdObj[kRemoveField].trueValue();
+ bool isUpdate = cmdObj.hasField(kUpdateField);
- return builder.obj();
+ if (!isRemove && !isUpdate) {
+ return {ErrorCodes::FailedToParse, "Either an update or remove=true must be specified"};
}
- StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceString fullNs,
- const BSONObj& cmdObj) {
- BSONObj query = cmdObj.getObjectField(kQueryField);
- BSONObj fields = cmdObj.getObjectField(kFieldProjectionField);
- BSONObj updateObj = cmdObj.getObjectField(kUpdateField);
- BSONObj sort = cmdObj.getObjectField(kSortField);
- bool shouldReturnNew = cmdObj[kNewField].trueValue();
- bool isUpsert = cmdObj[kUpsertField].trueValue();
- bool isRemove = cmdObj[kRemoveField].trueValue();
- bool isUpdate = cmdObj.hasField(kUpdateField);
-
- if (!isRemove && !isUpdate) {
- return {ErrorCodes::FailedToParse,
- "Either an update or remove=true must be specified"};
+ if (isRemove) {
+ if (isUpdate) {
+ return {ErrorCodes::FailedToParse, "Cannot specify both an update and remove=true"};
}
- if (isRemove) {
- if (isUpdate) {
- return {ErrorCodes::FailedToParse,
- "Cannot specify both an update and remove=true"};
- }
-
- if (isUpsert) {
- return {ErrorCodes::FailedToParse,
- "Cannot specify both upsert=true and remove=true"};
- }
+ if (isUpsert) {
+ return {ErrorCodes::FailedToParse, "Cannot specify both upsert=true and remove=true"};
+ }
- if (shouldReturnNew) {
- return {ErrorCodes::FailedToParse,
+ if (shouldReturnNew) {
+ return {ErrorCodes::FailedToParse,
"Cannot specify both new=true and remove=true;"
" 'remove' always returns the deleted document"};
- }
}
+ }
- FindAndModifyRequest request(std::move(fullNs), query, updateObj);
- request._isRemove = isRemove;
- request.setFieldProjection(fields);
- request.setSort(sort);
-
- if (!isRemove) {
- request.setShouldReturnNew(shouldReturnNew);
- request.setUpsert(isUpsert);
- }
+ FindAndModifyRequest request(std::move(fullNs), query, updateObj);
+ request._isRemove = isRemove;
+ request.setFieldProjection(fields);
+ request.setSort(sort);
- return request;
+ if (!isRemove) {
+ request.setShouldReturnNew(shouldReturnNew);
+ request.setUpsert(isUpsert);
}
- void FindAndModifyRequest::setFieldProjection(BSONObj fields) {
- _fieldProjection = fields.getOwned();
- }
+ return request;
+}
- void FindAndModifyRequest::setSort(BSONObj sort) {
- _sort = sort.getOwned();
- }
+void FindAndModifyRequest::setFieldProjection(BSONObj fields) {
+ _fieldProjection = fields.getOwned();
+}
- void FindAndModifyRequest::setShouldReturnNew(bool shouldReturnNew) {
- dassert(!_isRemove);
- _shouldReturnNew = shouldReturnNew;
- }
+void FindAndModifyRequest::setSort(BSONObj sort) {
+ _sort = sort.getOwned();
+}
- void FindAndModifyRequest::setUpsert(bool upsert) {
- dassert(!_isRemove);
- _isUpsert = upsert;
- }
+void FindAndModifyRequest::setShouldReturnNew(bool shouldReturnNew) {
+ dassert(!_isRemove);
+ _shouldReturnNew = shouldReturnNew;
+}
- void FindAndModifyRequest::setWriteConcern(WriteConcernOptions writeConcern) {
- _writeConcern = std::move(writeConcern);
- }
+void FindAndModifyRequest::setUpsert(bool upsert) {
+ dassert(!_isRemove);
+ _isUpsert = upsert;
+}
- const NamespaceString& FindAndModifyRequest::getNamespaceString() const {
- return _ns;
- }
+void FindAndModifyRequest::setWriteConcern(WriteConcernOptions writeConcern) {
+ _writeConcern = std::move(writeConcern);
+}
- BSONObj FindAndModifyRequest::getQuery() const {
- return _query;
- }
+const NamespaceString& FindAndModifyRequest::getNamespaceString() const {
+ return _ns;
+}
- BSONObj FindAndModifyRequest::getFields() const {
- return _fieldProjection.value_or(BSONObj());
- }
+BSONObj FindAndModifyRequest::getQuery() const {
+ return _query;
+}
- BSONObj FindAndModifyRequest::getUpdateObj() const {
- return _updateObj;
- }
+BSONObj FindAndModifyRequest::getFields() const {
+ return _fieldProjection.value_or(BSONObj());
+}
- BSONObj FindAndModifyRequest::getSort() const {
- return _sort.value_or(BSONObj());
- }
+BSONObj FindAndModifyRequest::getUpdateObj() const {
+ return _updateObj;
+}
- bool FindAndModifyRequest::shouldReturnNew() const {
- return _shouldReturnNew.value_or(false);
- }
+BSONObj FindAndModifyRequest::getSort() const {
+ return _sort.value_or(BSONObj());
+}
- bool FindAndModifyRequest::isUpsert() const {
- return _isUpsert.value_or(false);
- }
+bool FindAndModifyRequest::shouldReturnNew() const {
+ return _shouldReturnNew.value_or(false);
+}
- bool FindAndModifyRequest::isRemove() const {
- return _isRemove;
- }
+bool FindAndModifyRequest::isUpsert() const {
+ return _isUpsert.value_or(false);
+}
+bool FindAndModifyRequest::isRemove() const {
+ return _isRemove;
+}
}
diff --git a/src/mongo/db/query/find_and_modify_request.h b/src/mongo/db/query/find_and_modify_request.h
index 353869abee6..b4252c30805 100644
--- a/src/mongo/db/query/find_and_modify_request.h
+++ b/src/mongo/db/query/find_and_modify_request.h
@@ -36,126 +36,123 @@
namespace mongo {
- template <typename T> class StatusWith;
+template <typename T>
+class StatusWith;
+
+/**
+ * Represents the user-supplied options to the findAndModify command. Note that this
+ * does not offer round trip preservation. For example, for the case where
+ * output = parseBSON(input).toBSON(), 'output' is not guaranteed to be equal to 'input'.
+ * However, the semantic meaning of 'output' will be the same with 'input'.
+ *
+ * The BSONObj members contained within this struct are owned objects.
+ */
+class FindAndModifyRequest {
+public:
+ /**
+ * Creates a new instance of an 'update' type findAndModify request.
+ */
+ static FindAndModifyRequest makeUpdate(NamespaceString fullNs,
+ BSONObj query,
+ BSONObj updateObj);
/**
- * Represents the user-supplied options to the findAndModify command. Note that this
- * does not offer round trip preservation. For example, for the case where
- * output = parseBSON(input).toBSON(), 'output' is not guaranteed to be equal to 'input'.
- * However, the semantic meaning of 'output' will be the same with 'input'.
+ * Creates a new instance of an 'remove' type findAndModify request.
+ */
+ static FindAndModifyRequest makeRemove(NamespaceString fullNs, BSONObj query);
+
+ /**
+ * Create a new instance of FindAndModifyRequest from a valid BSONObj.
+ * Returns an error if the BSONObj is malformed.
+ * Format:
+ *
+ * {
+ * findAndModify: <collection-name>,
+ * query: <document>,
+ * sort: <document>,
+ * remove: <boolean>,
+ * update: <document>,
+ * new: <boolean>,
+ * fields: <document>,
+ * upsert: <boolean>
+ * }
*
- * The BSONObj members contained within this struct are owned objects.
+ * Note: does not parse the writeConcern field or the findAndModify field.
+ */
+ static StatusWith<FindAndModifyRequest> parseFromBSON(NamespaceString fullNs,
+ const BSONObj& cmdObj);
+
+ /**
+ * Serializes this object into a BSON representation. Fields that are not
+ * set will not be part of the the serialized object.
+ */
+ BSONObj toBSON() const;
+
+ const NamespaceString& getNamespaceString() const;
+ BSONObj getQuery() const;
+ BSONObj getFields() const;
+ BSONObj getUpdateObj() const;
+ BSONObj getSort() const;
+ bool shouldReturnNew() const;
+ bool isUpsert() const;
+ bool isRemove() const;
+
+ // Not implemented. Use extractWriteConcern() to get the setting instead.
+ WriteConcernOptions getWriteConcern() const;
+
+ //
+ // Setters for update type request only.
+ //
+
+ /**
+ * If shouldReturnNew is new, the findAndModify response should return the document
+ * after the modification was applied if the query matched a document. Otherwise,
+ * it will return the matched document before the modification.
*/
- class FindAndModifyRequest {
- public:
-
- /**
- * Creates a new instance of an 'update' type findAndModify request.
- */
- static FindAndModifyRequest makeUpdate(NamespaceString fullNs,
- BSONObj query,
- BSONObj updateObj);
-
- /**
- * Creates a new instance of an 'remove' type findAndModify request.
- */
- static FindAndModifyRequest makeRemove(NamespaceString fullNs,
- BSONObj query);
-
- /**
- * Create a new instance of FindAndModifyRequest from a valid BSONObj.
- * Returns an error if the BSONObj is malformed.
- * Format:
- *
- * {
- * findAndModify: <collection-name>,
- * query: <document>,
- * sort: <document>,
- * remove: <boolean>,
- * update: <document>,
- * new: <boolean>,
- * fields: <document>,
- * upsert: <boolean>
- * }
- *
- * Note: does not parse the writeConcern field or the findAndModify field.
- */
- static StatusWith<FindAndModifyRequest> parseFromBSON(NamespaceString fullNs,
- const BSONObj& cmdObj);
-
- /**
- * Serializes this object into a BSON representation. Fields that are not
- * set will not be part of the the serialized object.
- */
- BSONObj toBSON() const;
-
- const NamespaceString& getNamespaceString() const;
- BSONObj getQuery() const;
- BSONObj getFields() const;
- BSONObj getUpdateObj() const;
- BSONObj getSort() const;
- bool shouldReturnNew() const;
- bool isUpsert() const;
- bool isRemove() const;
-
- // Not implemented. Use extractWriteConcern() to get the setting instead.
- WriteConcernOptions getWriteConcern() const;
-
- //
- // Setters for update type request only.
- //
-
- /**
- * If shouldReturnNew is new, the findAndModify response should return the document
- * after the modification was applied if the query matched a document. Otherwise,
- * it will return the matched document before the modification.
- */
- void setShouldReturnNew(bool shouldReturnNew);
-
- void setUpsert(bool upsert);
-
- //
- // Setters for optional parameters
- //
-
- /**
- * Specifies the field to project on the matched document.
- */
- void setFieldProjection(BSONObj fields);
-
- /**
- * Sets the sort order for the query. In cases where the query yields multiple matches,
- * only the first document based on the sort order will be modified/removed.
- */
- void setSort(BSONObj sort);
-
- /**
- * Sets the write concern for this request.
- */
- void setWriteConcern(WriteConcernOptions writeConcern);
-
- private:
- /**
- * Creates a new FindAndModifyRequest with the required fields.
- */
- FindAndModifyRequest(NamespaceString fullNs,
- BSONObj query,
- BSONObj updateObj);
-
- // Required fields
- const NamespaceString _ns;
- const BSONObj _query;
-
- // Required for updates
- const BSONObj _updateObj;
-
- boost::optional<bool> _isUpsert;
- boost::optional<BSONObj> _fieldProjection;
- boost::optional<BSONObj> _sort;
- boost::optional<bool> _shouldReturnNew;
- boost::optional<WriteConcernOptions> _writeConcern;
-
- // Flag used internally to differentiate whether this is an update or remove type request.
- bool _isRemove;
- };
+ void setShouldReturnNew(bool shouldReturnNew);
+
+ void setUpsert(bool upsert);
+
+ //
+ // Setters for optional parameters
+ //
+
+ /**
+ * Specifies the field to project on the matched document.
+ */
+ void setFieldProjection(BSONObj fields);
+
+ /**
+ * Sets the sort order for the query. In cases where the query yields multiple matches,
+ * only the first document based on the sort order will be modified/removed.
+ */
+ void setSort(BSONObj sort);
+
+ /**
+ * Sets the write concern for this request.
+ */
+ void setWriteConcern(WriteConcernOptions writeConcern);
+
+private:
+ /**
+ * Creates a new FindAndModifyRequest with the required fields.
+ */
+ FindAndModifyRequest(NamespaceString fullNs, BSONObj query, BSONObj updateObj);
+
+ // Required fields
+ const NamespaceString _ns;
+ const BSONObj _query;
+
+ // Required for updates
+ const BSONObj _updateObj;
+
+ boost::optional<bool> _isUpsert;
+ boost::optional<BSONObj> _fieldProjection;
+ boost::optional<BSONObj> _sort;
+ boost::optional<bool> _shouldReturnNew;
+ boost::optional<WriteConcernOptions> _writeConcern;
+
+ // Flag used internally to differentiate whether this is an update or remove type request.
+ bool _isRemove;
+};
}
diff --git a/src/mongo/db/query/find_and_modify_request_test.cpp b/src/mongo/db/query/find_and_modify_request_test.cpp
index bde2c48ac2e..27490715e02 100644
--- a/src/mongo/db/query/find_and_modify_request_test.cpp
+++ b/src/mongo/db/query/find_and_modify_request_test.cpp
@@ -35,173 +35,155 @@
namespace mongo {
namespace {
- TEST(FindAndModifyRequest, BasicUpdate) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
-
- BSONObj expectedObj(fromjson(R"json({
+TEST(FindAndModifyRequest, BasicUpdate) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithUpsert) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setUpsert(true);
+TEST(FindAndModifyRequest, UpdateWithUpsert) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setUpsert(true);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
upsert: true
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithUpsertFalse) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setUpsert(false);
+TEST(FindAndModifyRequest, UpdateWithUpsertFalse) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setUpsert(false);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
upsert: false
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithProjection) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- const BSONObj field(BSON("z" << 1));
+TEST(FindAndModifyRequest, UpdateWithProjection) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ const BSONObj field(BSON("z" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setFieldProjection(field);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setFieldProjection(field);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
fields: { z: 1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithNewTrue) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
+TEST(FindAndModifyRequest, UpdateWithNewTrue) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setShouldReturnNew(true);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setShouldReturnNew(true);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
new: true
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithNewFalse) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
+TEST(FindAndModifyRequest, UpdateWithNewFalse) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setShouldReturnNew(false);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setShouldReturnNew(false);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
new: false
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithSort) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- const BSONObj sort(BSON("z" << -1));
+TEST(FindAndModifyRequest, UpdateWithSort) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ const BSONObj sort(BSON("z" << -1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setSort(sort);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setSort(sort);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
sort: { z: -1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithWriteConcern) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
+TEST(FindAndModifyRequest, UpdateWithWriteConcern) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setWriteConcern(writeConcern);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setWriteConcern(writeConcern);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
-
- TEST(FindAndModifyRequest, UpdateWithFullSpec) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- const BSONObj sort(BSON("z" << -1));
- const BSONObj field(BSON("x" << 1 << "y" << 1));
- const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
-
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setFieldProjection(field);
- request.setShouldReturnNew(true);
- request.setSort(sort);
- request.setWriteConcern(writeConcern);
- request.setUpsert(true);
-
- BSONObj expectedObj(fromjson(R"json({
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
+
+TEST(FindAndModifyRequest, UpdateWithFullSpec) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ const BSONObj sort(BSON("z" << -1));
+ const BSONObj field(BSON("x" << 1 << "y" << 1));
+ const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
+
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setFieldProjection(field);
+ request.setShouldReturnNew(true);
+ request.setSort(sort);
+ request.setWriteConcern(writeConcern);
+ request.setUpsert(true);
+
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
@@ -212,85 +194,85 @@ namespace {
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, BasicRemove) {
- const BSONObj query(BSON("x" << 1));
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+TEST(FindAndModifyRequest, BasicRemove) {
+ const BSONObj query(BSON("x" << 1));
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, RemoveWithProjection) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj field(BSON("z" << 1));
+TEST(FindAndModifyRequest, RemoveWithProjection) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj field(BSON("z" << 1));
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- request.setFieldProjection(field);
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+ request.setFieldProjection(field);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
fields: { z: 1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, RemoveWithSort) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj sort(BSON("z" << -1));
+TEST(FindAndModifyRequest, RemoveWithSort) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj sort(BSON("z" << -1));
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- request.setSort(sort);
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+ request.setSort(sort);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
sort: { z: -1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, RemoveWithWriteConcern) {
- const BSONObj query(BSON("x" << 1));
- const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
+TEST(FindAndModifyRequest, RemoveWithWriteConcern) {
+ const BSONObj query(BSON("x" << 1));
+ const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- request.setWriteConcern(writeConcern);
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+ request.setWriteConcern(writeConcern);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, RemoveWithFullSpec) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj sort(BSON("z" << -1));
- const BSONObj field(BSON("x" << 1 << "y" << 1));
- const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
+TEST(FindAndModifyRequest, RemoveWithFullSpec) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj sort(BSON("z" << -1));
+ const BSONObj field(BSON("x" << 1 << "y" << 1));
+ const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- request.setFieldProjection(field);
- request.setSort(sort);
- request.setWriteConcern(writeConcern);
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+ request.setFieldProjection(field);
+ request.setSort(sort);
+ request.setWriteConcern(writeConcern);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
@@ -299,31 +281,31 @@ namespace {
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, ParseWithUpdateOnlyRequiredFields) {
- BSONObj cmdObj(fromjson(R"json({
+TEST(FindAndModifyRequest, ParseWithUpdateOnlyRequiredFields) {
+ BSONObj cmdObj(fromjson(R"json({
query: { x: 1 },
update: { y: 1 }
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_OK(parseStatus.getStatus());
-
- auto request = parseStatus.getValue();
- ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
- ASSERT_EQUALS(false, request.isUpsert());
- ASSERT_EQUALS(false, request.isRemove());
- ASSERT_EQUALS(BSONObj(), request.getFields());
- ASSERT_EQUALS(BSONObj(), request.getSort());
- ASSERT_EQUALS(false, request.shouldReturnNew());
- }
-
- TEST(FindAndModifyRequest, ParseWithUpdateFullSpec) {
- BSONObj cmdObj(fromjson(R"json({
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_OK(parseStatus.getStatus());
+
+ auto request = parseStatus.getValue();
+ ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
+ ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
+ ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
+ ASSERT_EQUALS(false, request.isUpsert());
+ ASSERT_EQUALS(false, request.isRemove());
+ ASSERT_EQUALS(BSONObj(), request.getFields());
+ ASSERT_EQUALS(BSONObj(), request.getSort());
+ ASSERT_EQUALS(false, request.shouldReturnNew());
+}
+
+TEST(FindAndModifyRequest, ParseWithUpdateFullSpec) {
+ BSONObj cmdObj(fromjson(R"json({
query: { x: 1 },
update: { y: 1 },
upsert: true,
@@ -332,42 +314,42 @@ namespace {
new: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_OK(parseStatus.getStatus());
-
- auto request = parseStatus.getValue();
- ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
- ASSERT_EQUALS(true, request.isUpsert());
- ASSERT_EQUALS(false, request.isRemove());
- ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
- ASSERT_EQUALS(BSON("z" << -1), request.getSort());
- ASSERT_EQUALS(true, request.shouldReturnNew());
- }
-
- TEST(FindAndModifyRequest, ParseWithRemoveOnlyRequiredFields) {
- BSONObj cmdObj(fromjson(R"json({
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_OK(parseStatus.getStatus());
+
+ auto request = parseStatus.getValue();
+ ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
+ ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
+ ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
+ ASSERT_EQUALS(true, request.isUpsert());
+ ASSERT_EQUALS(false, request.isRemove());
+ ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
+ ASSERT_EQUALS(BSON("z" << -1), request.getSort());
+ ASSERT_EQUALS(true, request.shouldReturnNew());
+}
+
+TEST(FindAndModifyRequest, ParseWithRemoveOnlyRequiredFields) {
+ BSONObj cmdObj(fromjson(R"json({
query: { x: 1 },
remove: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_OK(parseStatus.getStatus());
-
- auto request = parseStatus.getValue();
- ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
- ASSERT_EQUALS(false, request.isUpsert());
- ASSERT_EQUALS(true, request.isRemove());
- ASSERT_EQUALS(BSONObj(), request.getFields());
- ASSERT_EQUALS(BSONObj(), request.getSort());
- ASSERT_EQUALS(false, request.shouldReturnNew());
- }
-
- TEST(FindAndModifyRequest, ParseWithRemoveFullSpec) {
- BSONObj cmdObj(fromjson(R"json({
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_OK(parseStatus.getStatus());
+
+ auto request = parseStatus.getValue();
+ ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
+ ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
+ ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
+ ASSERT_EQUALS(false, request.isUpsert());
+ ASSERT_EQUALS(true, request.isRemove());
+ ASSERT_EQUALS(BSONObj(), request.getFields());
+ ASSERT_EQUALS(BSONObj(), request.getSort());
+ ASSERT_EQUALS(false, request.shouldReturnNew());
+}
+
+TEST(FindAndModifyRequest, ParseWithRemoveFullSpec) {
+ BSONObj cmdObj(fromjson(R"json({
query: { x: 1 },
remove: true,
fields: { x: 1, y: 1 },
@@ -375,65 +357,65 @@ namespace {
new: false
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_OK(parseStatus.getStatus());
-
- auto request = parseStatus.getValue();
- ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
- ASSERT_EQUALS(false, request.isUpsert());
- ASSERT_EQUALS(true, request.isRemove());
- ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
- ASSERT_EQUALS(BSON("z" << -1), request.getSort());
- ASSERT_EQUALS(false, request.shouldReturnNew());
- }
-
- TEST(FindAndModifyRequest, ParseWithIncompleteSpec) {
- BSONObj cmdObj(fromjson(R"json({
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_OK(parseStatus.getStatus());
+
+ auto request = parseStatus.getValue();
+ ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
+ ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
+ ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
+ ASSERT_EQUALS(false, request.isUpsert());
+ ASSERT_EQUALS(true, request.isRemove());
+ ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
+ ASSERT_EQUALS(BSON("z" << -1), request.getSort());
+ ASSERT_EQUALS(false, request.shouldReturnNew());
+}
+
+TEST(FindAndModifyRequest, ParseWithIncompleteSpec) {
+ BSONObj cmdObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 }
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_NOT_OK(parseStatus.getStatus());
- }
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_NOT_OK(parseStatus.getStatus());
+}
- TEST(FindAndModifyRequest, ParseWithAmbiguousUpdateRemove) {
- BSONObj cmdObj(fromjson(R"json({
+TEST(FindAndModifyRequest, ParseWithAmbiguousUpdateRemove) {
+ BSONObj cmdObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
remove: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_NOT_OK(parseStatus.getStatus());
- }
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_NOT_OK(parseStatus.getStatus());
+}
- TEST(FindAndModifyRequest, ParseWithRemovePlusUpsert) {
- BSONObj cmdObj(fromjson(R"json({
+TEST(FindAndModifyRequest, ParseWithRemovePlusUpsert) {
+ BSONObj cmdObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
upsert: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_NOT_OK(parseStatus.getStatus());
- }
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_NOT_OK(parseStatus.getStatus());
+}
- TEST(FindAndModifyRequest, ParseWithRemoveAndReturnNew) {
- BSONObj cmdObj(fromjson(R"json({
+TEST(FindAndModifyRequest, ParseWithRemoveAndReturnNew) {
+ BSONObj cmdObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
new: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_NOT_OK(parseStatus.getStatus());
- }
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_NOT_OK(parseStatus.getStatus());
+}
-} // unnamed namespace
-} // namespace mongo
+} // unnamed namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/find_constants.h b/src/mongo/db/query/find_constants.h
index e6333798682..68b515a0499 100644
--- a/src/mongo/db/query/find_constants.h
+++ b/src/mongo/db/query/find_constants.h
@@ -28,8 +28,8 @@
namespace mongo {
- // We cut off further objects once we cross this threshold; thus, you might get
- // a little bit more than this, it is a threshold rather than a limit.
- extern const int32_t MaxBytesToReturnToClientAtOnce;
+// We cut off further objects once we cross this threshold; thus, you might get
+// a little bit more than this, it is a threshold rather than a limit.
+extern const int32_t MaxBytesToReturnToClientAtOnce;
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 0f5044273ba..189910bbae1 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -80,1439 +80,1381 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::string;
- using std::vector;
-
- // static
- void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
- std::vector<IndexEntry>* indexEntries) {
- invariant(indexEntries);
-
- // Filter index entries
- // Check BSON objects in AllowedIndices::_indexKeyPatterns against IndexEntry::keyPattern.
- // Removes IndexEntrys that do not match _indexKeyPatterns.
- std::vector<IndexEntry> temp;
- for (std::vector<IndexEntry>::const_iterator i = indexEntries->begin();
- i != indexEntries->end(); ++i) {
- const IndexEntry& indexEntry = *i;
- for (std::vector<BSONObj>::const_iterator j = allowedIndices.indexKeyPatterns.begin();
- j != allowedIndices.indexKeyPatterns.end(); ++j) {
- const BSONObj& index = *j;
- // Copy index entry to temp vector if found in query settings.
- if (0 == indexEntry.keyPattern.woCompare(index)) {
- temp.push_back(indexEntry);
- break;
- }
+using std::unique_ptr;
+using std::endl;
+using std::string;
+using std::vector;
+
+// static
+void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
+ std::vector<IndexEntry>* indexEntries) {
+ invariant(indexEntries);
+
+ // Filter index entries
+ // Check BSON objects in AllowedIndices::_indexKeyPatterns against IndexEntry::keyPattern.
+ // Removes IndexEntrys that do not match _indexKeyPatterns.
+ std::vector<IndexEntry> temp;
+ for (std::vector<IndexEntry>::const_iterator i = indexEntries->begin();
+ i != indexEntries->end();
+ ++i) {
+ const IndexEntry& indexEntry = *i;
+ for (std::vector<BSONObj>::const_iterator j = allowedIndices.indexKeyPatterns.begin();
+ j != allowedIndices.indexKeyPatterns.end();
+ ++j) {
+ const BSONObj& index = *j;
+ // Copy index entry to temp vector if found in query settings.
+ if (0 == indexEntry.keyPattern.woCompare(index)) {
+ temp.push_back(indexEntry);
+ break;
}
}
-
- // Update results.
- temp.swap(*indexEntries);
}
- namespace {
- // The body is below in the "count hack" section but getExecutor calls it.
- bool turnIxscanIntoCount(QuerySolution* soln);
-
- bool filteredIndexBad(const MatchExpression* filter, CanonicalQuery* query) {
- if (!filter)
- return false;
+ // Update results.
+ temp.swap(*indexEntries);
+}
- MatchExpression* queryPredicates = query->root();
- if (!queryPredicates) {
- // Index is filtered, but query has none.
- // Impossible to use index.
- return true;
- }
+namespace {
+// The body is below in the "count hack" section but getExecutor calls it.
+bool turnIxscanIntoCount(QuerySolution* soln);
- return !expression::isSubsetOf(queryPredicates, filter);
- }
- } // namespace
-
-
- void fillOutPlannerParams(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* canonicalQuery,
- QueryPlannerParams* plannerParams) {
- // If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn,
- false);
- while (ii.more()) {
- const IndexDescriptor* desc = ii.next();
-
- IndexCatalogEntry* ice = ii.catalogEntry(desc);
- if (filteredIndexBad(ice->getFilterExpression(), canonicalQuery)) {
- continue;
- }
+bool filteredIndexBad(const MatchExpression* filter, CanonicalQuery* query) {
+ if (!filter)
+ return false;
- plannerParams->indices.push_back(IndexEntry(desc->keyPattern(),
- desc->getAccessMethodName(),
- desc->isMultikey(txn),
- desc->isSparse(),
- desc->unique(),
- desc->indexName(),
- ice->getFilterExpression(),
- desc->infoObj()));
- }
+ MatchExpression* queryPredicates = query->root();
+ if (!queryPredicates) {
+ // Index is filtered, but query has none.
+ // Impossible to use index.
+ return true;
+ }
- // If query supports index filters, filter params.indices by indices in query settings.
- QuerySettings* querySettings = collection->infoCache()->getQuerySettings();
- AllowedIndices* allowedIndicesRaw;
- PlanCacheKey planCacheKey =
- collection->infoCache()->getPlanCache()->computeKey(*canonicalQuery);
-
- // Filter index catalog if index filters are specified for query.
- // Also, signal to planner that application hint should be ignored.
- if (querySettings->getAllowedIndices(planCacheKey, &allowedIndicesRaw)) {
- std::unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
- filterAllowedIndexEntries(*allowedIndices, &plannerParams->indices);
- plannerParams->indexFiltersApplied = true;
- }
+ return !expression::isSubsetOf(queryPredicates, filter);
+}
+} // namespace
- // We will not output collection scans unless there are no indexed solutions. NO_TABLE_SCAN
- // overrides this behavior by not outputting a collscan even if there are no indexed
- // solutions.
- if (storageGlobalParams.noTableScan) {
- const string& ns = canonicalQuery->ns();
- // There are certain cases where we ignore this restriction:
- bool ignore = canonicalQuery->getQueryObj().isEmpty()
- || (string::npos != ns.find(".system."))
- || (0 == ns.find("local."));
- if (!ignore) {
- plannerParams->options |= QueryPlannerParams::NO_TABLE_SCAN;
- }
- }
- // If the caller wants a shard filter, make sure we're actually sharded.
- if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- CollectionMetadataPtr collMetadata =
- shardingState.getCollectionMetadata(canonicalQuery->ns());
+void fillOutPlannerParams(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* canonicalQuery,
+ QueryPlannerParams* plannerParams) {
+ // If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn, false);
+ while (ii.more()) {
+ const IndexDescriptor* desc = ii.next();
- if (collMetadata) {
- plannerParams->shardKey = collMetadata->getKeyPattern();
- }
- else {
- // If there's no metadata don't bother w/the shard filter since we won't know what
- // the key pattern is anyway...
- plannerParams->options &= ~QueryPlannerParams::INCLUDE_SHARD_FILTER;
- }
+ IndexCatalogEntry* ice = ii.catalogEntry(desc);
+ if (filteredIndexBad(ice->getFilterExpression(), canonicalQuery)) {
+ continue;
}
- if (internalQueryPlannerEnableIndexIntersection) {
- plannerParams->options |= QueryPlannerParams::INDEX_INTERSECTION;
- }
+ plannerParams->indices.push_back(IndexEntry(desc->keyPattern(),
+ desc->getAccessMethodName(),
+ desc->isMultikey(txn),
+ desc->isSparse(),
+ desc->unique(),
+ desc->indexName(),
+ ice->getFilterExpression(),
+ desc->infoObj()));
+ }
- plannerParams->options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+ // If query supports index filters, filter params.indices by indices in query settings.
+ QuerySettings* querySettings = collection->infoCache()->getQuerySettings();
+ AllowedIndices* allowedIndicesRaw;
+ PlanCacheKey planCacheKey =
+ collection->infoCache()->getPlanCache()->computeKey(*canonicalQuery);
+
+ // Filter index catalog if index filters are specified for query.
+ // Also, signal to planner that application hint should be ignored.
+ if (querySettings->getAllowedIndices(planCacheKey, &allowedIndicesRaw)) {
+ std::unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
+ filterAllowedIndexEntries(*allowedIndices, &plannerParams->indices);
+ plannerParams->indexFiltersApplied = true;
+ }
- // Doc-level locking storage engines cannot answer predicates implicitly via exact index
- // bounds for index intersection plans, as this can lead to spurious matches.
- //
- // Such storage engines do not use the invalidation framework, and therefore
- // have no need for KEEP_MUTATIONS.
- if (supportsDocLocking()) {
- plannerParams->options |= QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ // We will not output collection scans unless there are no indexed solutions. NO_TABLE_SCAN
+ // overrides this behavior by not outputting a collscan even if there are no indexed
+ // solutions.
+ if (storageGlobalParams.noTableScan) {
+ const string& ns = canonicalQuery->ns();
+ // There are certain cases where we ignore this restriction:
+ bool ignore = canonicalQuery->getQueryObj().isEmpty() ||
+ (string::npos != ns.find(".system.")) || (0 == ns.find("local."));
+ if (!ignore) {
+ plannerParams->options |= QueryPlannerParams::NO_TABLE_SCAN;
}
- else {
- plannerParams->options |= QueryPlannerParams::KEEP_MUTATIONS;
+ }
+
+ // If the caller wants a shard filter, make sure we're actually sharded.
+ if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ CollectionMetadataPtr collMetadata =
+ shardingState.getCollectionMetadata(canonicalQuery->ns());
+
+ if (collMetadata) {
+ plannerParams->shardKey = collMetadata->getKeyPattern();
+ } else {
+ // If there's no metadata don't bother w/the shard filter since we won't know what
+ // the key pattern is anyway...
+ plannerParams->options &= ~QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
}
- namespace {
-
- /**
- * Build an execution tree for the query described in 'canonicalQuery'. Does not take
- * ownership of arguments.
- *
- * If an execution tree could be created, then returns Status::OK() and sets 'rootOut' to
- * the root of the constructed execution tree, and sets 'querySolutionOut' to the associated
- * query solution (if applicable) or NULL.
- *
- * If an execution tree could not be created, returns a Status indicating why and sets both
- * 'rootOut' and 'querySolutionOut' to NULL.
- */
- Status prepareExecution(OperationContext* opCtx,
- Collection* collection,
- WorkingSet* ws,
- CanonicalQuery* canonicalQuery,
- size_t plannerOptions,
- PlanStage** rootOut,
- QuerySolution** querySolutionOut) {
- invariant(canonicalQuery);
- *rootOut = NULL;
- *querySolutionOut = NULL;
-
- // This can happen as we're called by internal clients as well.
- if (NULL == collection) {
- const string& ns = canonicalQuery->ns();
- LOG(2) << "Collection " << ns << " does not exist."
- << " Using EOF plan: " << canonicalQuery->toStringShort();
- *rootOut = new EOFStage();
- return Status::OK();
- }
+ if (internalQueryPlannerEnableIndexIntersection) {
+ plannerParams->options |= QueryPlannerParams::INDEX_INTERSECTION;
+ }
- // Fill out the planning params. We use these for both cached solutions and non-cached.
- QueryPlannerParams plannerParams;
- plannerParams.options = plannerOptions;
- fillOutPlannerParams(opCtx, collection, canonicalQuery, &plannerParams);
+ plannerParams->options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+
+ // Doc-level locking storage engines cannot answer predicates implicitly via exact index
+ // bounds for index intersection plans, as this can lead to spurious matches.
+ //
+ // Such storage engines do not use the invalidation framework, and therefore
+ // have no need for KEEP_MUTATIONS.
+ if (supportsDocLocking()) {
+ plannerParams->options |= QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ } else {
+ plannerParams->options |= QueryPlannerParams::KEEP_MUTATIONS;
+ }
+}
- // If we have an _id index we can use an idhack plan.
- if (IDHackStage::supportsQuery(*canonicalQuery) &&
- collection->getIndexCatalog()->findIdIndex(opCtx)) {
+namespace {
- LOG(2) << "Using idhack: " << canonicalQuery->toStringShort();
+/**
+ * Build an execution tree for the query described in 'canonicalQuery'. Does not take
+ * ownership of arguments.
+ *
+ * If an execution tree could be created, then returns Status::OK() and sets 'rootOut' to
+ * the root of the constructed execution tree, and sets 'querySolutionOut' to the associated
+ * query solution (if applicable) or NULL.
+ *
+ * If an execution tree could not be created, returns a Status indicating why and sets both
+ * 'rootOut' and 'querySolutionOut' to NULL.
+ */
+Status prepareExecution(OperationContext* opCtx,
+ Collection* collection,
+ WorkingSet* ws,
+ CanonicalQuery* canonicalQuery,
+ size_t plannerOptions,
+ PlanStage** rootOut,
+ QuerySolution** querySolutionOut) {
+ invariant(canonicalQuery);
+ *rootOut = NULL;
+ *querySolutionOut = NULL;
+
+ // This can happen as we're called by internal clients as well.
+ if (NULL == collection) {
+ const string& ns = canonicalQuery->ns();
+ LOG(2) << "Collection " << ns << " does not exist."
+ << " Using EOF plan: " << canonicalQuery->toStringShort();
+ *rootOut = new EOFStage();
+ return Status::OK();
+ }
- *rootOut = new IDHackStage(opCtx, collection, canonicalQuery, ws);
+ // Fill out the planning params. We use these for both cached solutions and non-cached.
+ QueryPlannerParams plannerParams;
+ plannerParams.options = plannerOptions;
+ fillOutPlannerParams(opCtx, collection, canonicalQuery, &plannerParams);
- // Might have to filter out orphaned docs.
- if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- *rootOut =
- new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()),
- ws, *rootOut);
- }
+ // If we have an _id index we can use an idhack plan.
+ if (IDHackStage::supportsQuery(*canonicalQuery) &&
+ collection->getIndexCatalog()->findIdIndex(opCtx)) {
+ LOG(2) << "Using idhack: " << canonicalQuery->toStringShort();
- // There might be a projection. The idhack stage will always fetch the full
- // document, so we don't support covered projections. However, we might use the
- // simple inclusion fast path.
- if (NULL != canonicalQuery->getProj()) {
- ProjectionStageParams params(WhereCallbackReal(opCtx, collection->ns().db()));
- params.projObj = canonicalQuery->getProj()->getProjObj();
-
- // Stuff the right data into the params depending on what proj impl we use.
- if (canonicalQuery->getProj()->requiresDocument()
- || canonicalQuery->getProj()->wantIndexKey()) {
- params.fullExpression = canonicalQuery->root();
- params.projImpl = ProjectionStageParams::NO_FAST_PATH;
- }
- else {
- params.projImpl = ProjectionStageParams::SIMPLE_DOC;
- }
+ *rootOut = new IDHackStage(opCtx, collection, canonicalQuery, ws);
- *rootOut = new ProjectionStage(params, ws, *rootOut);
- }
+ // Might have to filter out orphaned docs.
+ if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ *rootOut = new ShardFilterStage(
+ shardingState.getCollectionMetadata(collection->ns()), ws, *rootOut);
+ }
- return Status::OK();
+ // There might be a projection. The idhack stage will always fetch the full
+ // document, so we don't support covered projections. However, we might use the
+ // simple inclusion fast path.
+ if (NULL != canonicalQuery->getProj()) {
+ ProjectionStageParams params(WhereCallbackReal(opCtx, collection->ns().db()));
+ params.projObj = canonicalQuery->getProj()->getProjObj();
+
+ // Stuff the right data into the params depending on what proj impl we use.
+ if (canonicalQuery->getProj()->requiresDocument() ||
+ canonicalQuery->getProj()->wantIndexKey()) {
+ params.fullExpression = canonicalQuery->root();
+ params.projImpl = ProjectionStageParams::NO_FAST_PATH;
+ } else {
+ params.projImpl = ProjectionStageParams::SIMPLE_DOC;
}
- // Tailable: If the query requests tailable the collection must be capped.
- if (canonicalQuery->getParsed().isTailable()) {
- if (!collection->isCapped()) {
- return Status(ErrorCodes::BadValue,
- "error processing query: " + canonicalQuery->toString() +
- " tailable cursor requested on non capped collection");
- }
+ *rootOut = new ProjectionStage(params, ws, *rootOut);
+ }
- // If a sort is specified it must be equal to expectedSort.
- const BSONObj expectedSort = BSON("$natural" << 1);
- const BSONObj& actualSort = canonicalQuery->getParsed().getSort();
- if (!actualSort.isEmpty() && !(actualSort == expectedSort)) {
- return Status(ErrorCodes::BadValue,
- "error processing query: " + canonicalQuery->toString() +
- " invalid sort specified for tailable cursor: "
- + actualSort.toString());
- }
- }
+ return Status::OK();
+ }
- // Try to look up a cached solution for the query.
- CachedSolution* rawCS;
- if (PlanCache::shouldCacheQuery(*canonicalQuery) &&
- collection->infoCache()->getPlanCache()->get(*canonicalQuery, &rawCS).isOK()) {
- // We have a CachedSolution. Have the planner turn it into a QuerySolution.
- std::unique_ptr<CachedSolution> cs(rawCS);
- QuerySolution *qs;
- Status status = QueryPlanner::planFromCache(*canonicalQuery, plannerParams, *cs,
- &qs);
-
- if (status.isOK()) {
- verify(StageBuilder::build(opCtx, collection, *qs, ws, rootOut));
- if ((plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT)
- && turnIxscanIntoCount(qs)) {
-
- LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(*rootOut);
- }
+ // Tailable: If the query requests tailable the collection must be capped.
+ if (canonicalQuery->getParsed().isTailable()) {
+ if (!collection->isCapped()) {
+ return Status(ErrorCodes::BadValue,
+ "error processing query: " + canonicalQuery->toString() +
+ " tailable cursor requested on non capped collection");
+ }
- // Add a CachedPlanStage on top of the previous root.
- //
- // 'decisionWorks' is used to determine whether the existing cache entry should
- // be evicted, and the query replanned.
- //
- // Takes ownership of '*rootOut'.
- *rootOut = new CachedPlanStage(opCtx,
- collection,
- ws,
- canonicalQuery,
- plannerParams,
- cs->decisionWorks,
- *rootOut);
- *querySolutionOut = qs;
- return Status::OK();
- }
+ // If a sort is specified it must be equal to expectedSort.
+ const BSONObj expectedSort = BSON("$natural" << 1);
+ const BSONObj& actualSort = canonicalQuery->getParsed().getSort();
+ if (!actualSort.isEmpty() && !(actualSort == expectedSort)) {
+ return Status(ErrorCodes::BadValue,
+ "error processing query: " + canonicalQuery->toString() +
+ " invalid sort specified for tailable cursor: " +
+ actualSort.toString());
+ }
+ }
+
+ // Try to look up a cached solution for the query.
+ CachedSolution* rawCS;
+ if (PlanCache::shouldCacheQuery(*canonicalQuery) &&
+ collection->infoCache()->getPlanCache()->get(*canonicalQuery, &rawCS).isOK()) {
+ // We have a CachedSolution. Have the planner turn it into a QuerySolution.
+ std::unique_ptr<CachedSolution> cs(rawCS);
+ QuerySolution* qs;
+ Status status = QueryPlanner::planFromCache(*canonicalQuery, plannerParams, *cs, &qs);
+
+ if (status.isOK()) {
+ verify(StageBuilder::build(opCtx, collection, *qs, ws, rootOut));
+ if ((plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) &&
+ turnIxscanIntoCount(qs)) {
+ LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
+ << ", planSummary: " << Explain::getPlanSummary(*rootOut);
}
- if (internalQueryPlanOrChildrenIndependently
- && SubplanStage::canUseSubplanning(*canonicalQuery)) {
+ // Add a CachedPlanStage on top of the previous root.
+ //
+ // 'decisionWorks' is used to determine whether the existing cache entry should
+ // be evicted, and the query replanned.
+ //
+ // Takes ownership of '*rootOut'.
+ *rootOut = new CachedPlanStage(
+ opCtx, collection, ws, canonicalQuery, plannerParams, cs->decisionWorks, *rootOut);
+ *querySolutionOut = qs;
+ return Status::OK();
+ }
+ }
- LOG(2) << "Running query as sub-queries: " << canonicalQuery->toStringShort();
+ if (internalQueryPlanOrChildrenIndependently &&
+ SubplanStage::canUseSubplanning(*canonicalQuery)) {
+ LOG(2) << "Running query as sub-queries: " << canonicalQuery->toStringShort();
- *rootOut = new SubplanStage(opCtx, collection, ws, plannerParams, canonicalQuery);
- return Status::OK();
- }
+ *rootOut = new SubplanStage(opCtx, collection, ws, plannerParams, canonicalQuery);
+ return Status::OK();
+ }
- vector<QuerySolution*> solutions;
- Status status = QueryPlanner::plan(*canonicalQuery, plannerParams, &solutions);
- if (!status.isOK()) {
- return Status(ErrorCodes::BadValue,
- "error processing query: " + canonicalQuery->toString() +
- " planner returned error: " + status.reason());
- }
+ vector<QuerySolution*> solutions;
+ Status status = QueryPlanner::plan(*canonicalQuery, plannerParams, &solutions);
+ if (!status.isOK()) {
+ return Status(ErrorCodes::BadValue,
+ "error processing query: " + canonicalQuery->toString() +
+ " planner returned error: " + status.reason());
+ }
- // We cannot figure out how to answer the query. Perhaps it requires an index
- // we do not have?
- if (0 == solutions.size()) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "error processing query: "
- << canonicalQuery->toString()
- << " No query solutions");
- }
+ // We cannot figure out how to answer the query. Perhaps it requires an index
+ // we do not have?
+ if (0 == solutions.size()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "error processing query: " << canonicalQuery->toString()
+ << " No query solutions");
+ }
- // See if one of our solutions is a fast count hack in disguise.
- if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
- for (size_t i = 0; i < solutions.size(); ++i) {
- if (turnIxscanIntoCount(solutions[i])) {
- // Great, we can use solutions[i]. Clean up the other QuerySolution(s).
- for (size_t j = 0; j < solutions.size(); ++j) {
- if (j != i) {
- delete solutions[j];
- }
- }
-
- // We're not going to cache anything that's fast count.
- verify(StageBuilder::build(opCtx, collection, *solutions[i], ws, rootOut));
-
- LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(*rootOut);
-
- *querySolutionOut = solutions[i];
- return Status::OK();
+ // See if one of our solutions is a fast count hack in disguise.
+ if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ if (turnIxscanIntoCount(solutions[i])) {
+ // Great, we can use solutions[i]. Clean up the other QuerySolution(s).
+ for (size_t j = 0; j < solutions.size(); ++j) {
+ if (j != i) {
+ delete solutions[j];
}
}
- }
- if (1 == solutions.size()) {
- // Only one possible plan. Run it. Build the stages from the solution.
- verify(StageBuilder::build(opCtx, collection, *solutions[0], ws, rootOut));
+ // We're not going to cache anything that's fast count.
+ verify(StageBuilder::build(opCtx, collection, *solutions[i], ws, rootOut));
- LOG(2) << "Only one plan is available; it will be run but will not be cached. "
- << canonicalQuery->toStringShort()
+ LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
<< ", planSummary: " << Explain::getPlanSummary(*rootOut);
- *querySolutionOut = solutions[0];
+ *querySolutionOut = solutions[i];
return Status::OK();
}
- else {
- // Many solutions. Create a MultiPlanStage to pick the best, update the cache,
- // and so on. The working set will be shared by all candidate plans.
- MultiPlanStage* multiPlanStage = new MultiPlanStage(opCtx, collection, canonicalQuery);
-
- for (size_t ix = 0; ix < solutions.size(); ++ix) {
- if (solutions[ix]->cacheData.get()) {
- solutions[ix]->cacheData->indexFilterApplied =
- plannerParams.indexFiltersApplied;
- }
+ }
+ }
- // version of StageBuild::build when WorkingSet is shared
- PlanStage* nextPlanRoot;
- verify(StageBuilder::build(opCtx, collection, *solutions[ix], ws,
- &nextPlanRoot));
+ if (1 == solutions.size()) {
+ // Only one possible plan. Run it. Build the stages from the solution.
+ verify(StageBuilder::build(opCtx, collection, *solutions[0], ws, rootOut));
- // Owns none of the arguments
- multiPlanStage->addPlan(solutions[ix], nextPlanRoot, ws);
- }
+ LOG(2) << "Only one plan is available; it will be run but will not be cached. "
+ << canonicalQuery->toStringShort()
+ << ", planSummary: " << Explain::getPlanSummary(*rootOut);
- *rootOut = multiPlanStage;
- return Status::OK();
+ *querySolutionOut = solutions[0];
+ return Status::OK();
+ } else {
+ // Many solutions. Create a MultiPlanStage to pick the best, update the cache,
+ // and so on. The working set will be shared by all candidate plans.
+ MultiPlanStage* multiPlanStage = new MultiPlanStage(opCtx, collection, canonicalQuery);
+
+ for (size_t ix = 0; ix < solutions.size(); ++ix) {
+ if (solutions[ix]->cacheData.get()) {
+ solutions[ix]->cacheData->indexFilterApplied = plannerParams.indexFiltersApplied;
}
- }
- } // namespace
+ // version of StageBuild::build when WorkingSet is shared
+ PlanStage* nextPlanRoot;
+ verify(StageBuilder::build(opCtx, collection, *solutions[ix], ws, &nextPlanRoot));
- Status getExecutor(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* rawCanonicalQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions) {
- unique_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* root;
- QuerySolution* querySolution;
- Status status = prepareExecution(txn, collection, ws.get(), canonicalQuery.get(),
- plannerOptions, &root, &querySolution);
- if (!status.isOK()) {
- return status;
+ // Owns none of the arguments
+ multiPlanStage->addPlan(solutions[ix], nextPlanRoot, ws);
}
- invariant(root);
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null.
- return PlanExecutor::make(txn, ws.release(), root, querySolution, canonicalQuery.release(),
- collection, yieldPolicy, out);
- }
- Status getExecutor(OperationContext* txn,
- Collection* collection,
- const std::string& ns,
- const BSONObj& unparsedQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions) {
- if (!collection) {
- LOG(2) << "Collection " << ns << " does not exist."
- << " Using EOF stage: " << unparsedQuery.toString();
- EOFStage* eofStage = new EOFStage();
- WorkingSet* ws = new WorkingSet();
- return PlanExecutor::make(txn, ws, eofStage, ns, yieldPolicy, out);
- }
+ *rootOut = multiPlanStage;
+ return Status::OK();
+ }
+}
- if (!CanonicalQuery::isSimpleIdQuery(unparsedQuery) ||
- !collection->getIndexCatalog()->findIdIndex(txn)) {
+} // namespace
- const WhereCallbackReal whereCallback(txn, collection->ns().db());
- CanonicalQuery* cq;
- Status status = CanonicalQuery::canonicalize(collection->ns(), unparsedQuery, &cq,
- whereCallback);
- if (!status.isOK())
- return status;
+Status getExecutor(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out,
+ size_t plannerOptions) {
+ unique_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ PlanStage* root;
+ QuerySolution* querySolution;
+ Status status = prepareExecution(
+ txn, collection, ws.get(), canonicalQuery.get(), plannerOptions, &root, &querySolution);
+ if (!status.isOK()) {
+ return status;
+ }
+ invariant(root);
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be null.
+ return PlanExecutor::make(txn,
+ ws.release(),
+ root,
+ querySolution,
+ canonicalQuery.release(),
+ collection,
+ yieldPolicy,
+ out);
+}
+
+Status getExecutor(OperationContext* txn,
+ Collection* collection,
+ const std::string& ns,
+ const BSONObj& unparsedQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out,
+ size_t plannerOptions) {
+ if (!collection) {
+ LOG(2) << "Collection " << ns << " does not exist."
+ << " Using EOF stage: " << unparsedQuery.toString();
+ EOFStage* eofStage = new EOFStage();
+ WorkingSet* ws = new WorkingSet();
+ return PlanExecutor::make(txn, ws, eofStage, ns, yieldPolicy, out);
+ }
- // Takes ownership of 'cq'.
- return getExecutor(txn, collection, cq, yieldPolicy, out, plannerOptions);
- }
+ if (!CanonicalQuery::isSimpleIdQuery(unparsedQuery) ||
+ !collection->getIndexCatalog()->findIdIndex(txn)) {
+ const WhereCallbackReal whereCallback(txn, collection->ns().db());
+ CanonicalQuery* cq;
+ Status status =
+ CanonicalQuery::canonicalize(collection->ns(), unparsedQuery, &cq, whereCallback);
+ if (!status.isOK())
+ return status;
- LOG(2) << "Using idhack: " << unparsedQuery.toString();
+ // Takes ownership of 'cq'.
+ return getExecutor(txn, collection, cq, yieldPolicy, out, plannerOptions);
+ }
- WorkingSet* ws = new WorkingSet();
- PlanStage* root = new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws);
+ LOG(2) << "Using idhack: " << unparsedQuery.toString();
- // Might have to filter out orphaned docs.
- if (plannerOptions & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- root = new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()), ws,
- root);
- }
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* root = new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws);
- return PlanExecutor::make(txn, ws, root, collection, yieldPolicy, out);
+ // Might have to filter out orphaned docs.
+ if (plannerOptions & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ root =
+ new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()), ws, root);
}
- //
- // Find
- //
+ return PlanExecutor::make(txn, ws, root, collection, yieldPolicy, out);
+}
+
+//
+// Find
+//
namespace {
- /**
- * Returns true if 'me' is a GTE or GE predicate over the "ts" field.
- * Such predicates can be used for the oplog start hack.
- */
- bool isOplogTsPred(const mongo::MatchExpression* me) {
- if (mongo::MatchExpression::GT != me->matchType()
- && mongo::MatchExpression::GTE != me->matchType()) {
- return false;
- }
+/**
+ * Returns true if 'me' is a GTE or GE predicate over the "ts" field.
+ * Such predicates can be used for the oplog start hack.
+ */
+bool isOplogTsPred(const mongo::MatchExpression* me) {
+ if (mongo::MatchExpression::GT != me->matchType() &&
+ mongo::MatchExpression::GTE != me->matchType()) {
+ return false;
+ }
- return mongoutils::str::equals(me->path().rawData(), "ts");
- }
-
- mongo::BSONElement extractOplogTsOptime(const mongo::MatchExpression* me) {
- invariant(isOplogTsPred(me));
- return static_cast<const mongo::ComparisonMatchExpression*>(me)->getData();
- }
-
- Status getOplogStartHack(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* cq,
- PlanExecutor** execOut) {
- invariant(collection);
- invariant(cq);
- unique_ptr<CanonicalQuery> autoCq(cq);
-
- // A query can only do oplog start finding if it has a top-level $gt or $gte predicate over
- // the "ts" field (the operation's timestamp). Find that predicate and pass it to
- // the OplogStart stage.
- MatchExpression* tsExpr = NULL;
- if (MatchExpression::AND == cq->root()->matchType()) {
- // The query has an AND at the top-level. See if any of the children
- // of the AND are $gt or $gte predicates over 'ts'.
- for (size_t i = 0; i < cq->root()->numChildren(); ++i) {
- MatchExpression* me = cq->root()->getChild(i);
- if (isOplogTsPred(me)) {
- tsExpr = me;
- break;
- }
+ return mongoutils::str::equals(me->path().rawData(), "ts");
+}
+
+mongo::BSONElement extractOplogTsOptime(const mongo::MatchExpression* me) {
+ invariant(isOplogTsPred(me));
+ return static_cast<const mongo::ComparisonMatchExpression*>(me)->getData();
+}
+
+Status getOplogStartHack(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* cq,
+ PlanExecutor** execOut) {
+ invariant(collection);
+ invariant(cq);
+ unique_ptr<CanonicalQuery> autoCq(cq);
+
+ // A query can only do oplog start finding if it has a top-level $gt or $gte predicate over
+ // the "ts" field (the operation's timestamp). Find that predicate and pass it to
+ // the OplogStart stage.
+ MatchExpression* tsExpr = NULL;
+ if (MatchExpression::AND == cq->root()->matchType()) {
+ // The query has an AND at the top-level. See if any of the children
+ // of the AND are $gt or $gte predicates over 'ts'.
+ for (size_t i = 0; i < cq->root()->numChildren(); ++i) {
+ MatchExpression* me = cq->root()->getChild(i);
+ if (isOplogTsPred(me)) {
+ tsExpr = me;
+ break;
}
}
- else if (isOplogTsPred(cq->root())) {
- // The root of the tree is a $gt or $gte predicate over 'ts'.
- tsExpr = cq->root();
- }
+ } else if (isOplogTsPred(cq->root())) {
+ // The root of the tree is a $gt or $gte predicate over 'ts'.
+ tsExpr = cq->root();
+ }
- if (NULL == tsExpr) {
- return Status(ErrorCodes::OplogOperationUnsupported,
- "OplogReplay query does not contain top-level "
- "$gt or $gte over the 'ts' field.");
- }
+ if (NULL == tsExpr) {
+ return Status(ErrorCodes::OplogOperationUnsupported,
+ "OplogReplay query does not contain top-level "
+ "$gt or $gte over the 'ts' field.");
+ }
- boost::optional<RecordId> startLoc = boost::none;
+ boost::optional<RecordId> startLoc = boost::none;
- // See if the RecordStore supports the oplogStartHack
- const BSONElement tsElem = extractOplogTsOptime(tsExpr);
- if (tsElem.type() == bsonTimestamp) {
- StatusWith<RecordId> goal = oploghack::keyForOptime(tsElem.timestamp());
- if (goal.isOK()) {
- startLoc = collection->getRecordStore()->oplogStartHack(txn, goal.getValue());
- }
+ // See if the RecordStore supports the oplogStartHack
+ const BSONElement tsElem = extractOplogTsOptime(tsExpr);
+ if (tsElem.type() == bsonTimestamp) {
+ StatusWith<RecordId> goal = oploghack::keyForOptime(tsElem.timestamp());
+ if (goal.isOK()) {
+ startLoc = collection->getRecordStore()->oplogStartHack(txn, goal.getValue());
}
+ }
- if (startLoc) {
- LOG(3) << "Using direct oplog seek";
+ if (startLoc) {
+ LOG(3) << "Using direct oplog seek";
+ } else {
+ LOG(3) << "Using OplogStart stage";
+
+ // Fallback to trying the OplogStart stage.
+ WorkingSet* oplogws = new WorkingSet();
+ OplogStart* stage = new OplogStart(txn, collection, tsExpr, oplogws);
+ PlanExecutor* rawExec;
+
+ // Takes ownership of oplogws and stage.
+ Status execStatus =
+ PlanExecutor::make(txn, oplogws, stage, collection, PlanExecutor::YIELD_AUTO, &rawExec);
+ invariant(execStatus.isOK());
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+
+ // The stage returns a RecordId of where to start.
+ startLoc = RecordId();
+ PlanExecutor::ExecState state = exec->getNext(NULL, startLoc.get_ptr());
+
+ // This is normal. The start of the oplog is the beginning of the collection.
+ if (PlanExecutor::IS_EOF == state) {
+ return getExecutor(
+ txn, collection, autoCq.release(), PlanExecutor::YIELD_AUTO, execOut);
}
- else {
- LOG(3) << "Using OplogStart stage";
-
- // Fallback to trying the OplogStart stage.
- WorkingSet* oplogws = new WorkingSet();
- OplogStart* stage = new OplogStart(txn, collection, tsExpr, oplogws);
- PlanExecutor* rawExec;
-
- // Takes ownership of oplogws and stage.
- Status execStatus = PlanExecutor::make(txn, oplogws, stage, collection,
- PlanExecutor::YIELD_AUTO, &rawExec);
- invariant(execStatus.isOK());
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // The stage returns a RecordId of where to start.
- startLoc = RecordId();
- PlanExecutor::ExecState state = exec->getNext(NULL, startLoc.get_ptr());
-
- // This is normal. The start of the oplog is the beginning of the collection.
- if (PlanExecutor::IS_EOF == state) {
- return getExecutor(txn, collection, autoCq.release(), PlanExecutor::YIELD_AUTO,
- execOut);
- }
- // This is not normal. An error was encountered.
- if (PlanExecutor::ADVANCED != state) {
- return Status(ErrorCodes::InternalError,
- "quick oplog start location had error...?");
- }
+ // This is not normal. An error was encountered.
+ if (PlanExecutor::ADVANCED != state) {
+ return Status(ErrorCodes::InternalError, "quick oplog start location had error...?");
}
+ }
- // Build our collection scan...
- CollectionScanParams params;
- params.collection = collection;
- params.start = *startLoc;
- params.direction = CollectionScanParams::FORWARD;
- params.tailable = cq->getParsed().isTailable();
+ // Build our collection scan...
+ CollectionScanParams params;
+ params.collection = collection;
+ params.start = *startLoc;
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = cq->getParsed().isTailable();
- WorkingSet* ws = new WorkingSet();
- CollectionScan* cs = new CollectionScan(txn, params, ws, cq->root());
- // Takes ownership of 'ws', 'cs', and 'cq'.
- return PlanExecutor::make(txn, ws, cs, autoCq.release(), collection,
- PlanExecutor::YIELD_AUTO, execOut);
- }
+ WorkingSet* ws = new WorkingSet();
+ CollectionScan* cs = new CollectionScan(txn, params, ws, cq->root());
+ // Takes ownership of 'ws', 'cs', and 'cq'.
+ return PlanExecutor::make(
+ txn, ws, cs, autoCq.release(), collection, PlanExecutor::YIELD_AUTO, execOut);
+}
-} // namespace
+} // namespace
- Status getExecutorFind(OperationContext* txn,
- Collection* collection,
- const NamespaceString& nss,
- CanonicalQuery* rawCanonicalQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- std::unique_ptr<CanonicalQuery> cq(rawCanonicalQuery);
- if (NULL != collection && cq->getParsed().isOplogReplay()) {
- return getOplogStartHack(txn, collection, cq.release(), out);
- }
+Status getExecutorFind(OperationContext* txn,
+ Collection* collection,
+ const NamespaceString& nss,
+ CanonicalQuery* rawCanonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ std::unique_ptr<CanonicalQuery> cq(rawCanonicalQuery);
+ if (NULL != collection && cq->getParsed().isOplogReplay()) {
+ return getOplogStartHack(txn, collection, cq.release(), out);
+ }
- size_t options = QueryPlannerParams::DEFAULT;
- if (shardingState.needCollectionMetadata(txn->getClient(), nss.ns())) {
- options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
- }
- return getExecutor(txn, collection, cq.release(), PlanExecutor::YIELD_AUTO, out, options);
+ size_t options = QueryPlannerParams::DEFAULT;
+ if (shardingState.needCollectionMetadata(txn->getClient(), nss.ns())) {
+ options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
+ return getExecutor(txn, collection, cq.release(), PlanExecutor::YIELD_AUTO, out, options);
+}
namespace {
- /**
- * Wrap the specified 'root' plan stage in a ProjectionStage. Does not take ownership of any
- * arguments other than root.
- *
- * If the projection was valid, then return Status::OK() with a pointer to the newly created
- * ProjectionStage. Otherwise, return a status indicating the error reason.
- */
- StatusWith<std::unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
- const NamespaceString& nsString,
- CanonicalQuery* cq,
- const BSONObj& proj,
- bool allowPositional,
- WorkingSet* ws,
- std::unique_ptr<PlanStage> root) {
- invariant(!proj.isEmpty());
-
- ParsedProjection* rawParsedProj;
- Status ppStatus = ParsedProjection::make(proj.getOwned(), cq->root(), &rawParsedProj);
- if (!ppStatus.isOK()) {
- return ppStatus;
- }
- std::unique_ptr<ParsedProjection> pp(rawParsedProj);
-
- // ProjectionExec requires the MatchDetails from the query expression when the projection
- // uses the positional operator. Since the query may no longer match the newly-updated
- // document, we forbid this case.
- if (!allowPositional && pp->requiresMatchDetails()) {
- return {ErrorCodes::BadValue,
- "cannot use a positional projection and return the new document"};
- }
-
- ProjectionStageParams params(WhereCallbackReal(txn, nsString.db()));
- params.projObj = proj;
- params.fullExpression = cq->root();
- return {stdx::make_unique<ProjectionStage>(params, ws, root.release())};
+/**
+ * Wrap the specified 'root' plan stage in a ProjectionStage. Does not take ownership of any
+ * arguments other than root.
+ *
+ * If the projection was valid, then return Status::OK() with a pointer to the newly created
+ * ProjectionStage. Otherwise, return a status indicating the error reason.
+ */
+StatusWith<std::unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
+ const NamespaceString& nsString,
+ CanonicalQuery* cq,
+ const BSONObj& proj,
+ bool allowPositional,
+ WorkingSet* ws,
+ std::unique_ptr<PlanStage> root) {
+ invariant(!proj.isEmpty());
+
+ ParsedProjection* rawParsedProj;
+ Status ppStatus = ParsedProjection::make(proj.getOwned(), cq->root(), &rawParsedProj);
+ if (!ppStatus.isOK()) {
+ return ppStatus;
+ }
+ std::unique_ptr<ParsedProjection> pp(rawParsedProj);
+
+ // ProjectionExec requires the MatchDetails from the query expression when the projection
+ // uses the positional operator. Since the query may no longer match the newly-updated
+ // document, we forbid this case.
+ if (!allowPositional && pp->requiresMatchDetails()) {
+ return {ErrorCodes::BadValue,
+ "cannot use a positional projection and return the new document"};
}
-} // namespace
+ ProjectionStageParams params(WhereCallbackReal(txn, nsString.db()));
+ params.projObj = proj;
+ params.fullExpression = cq->root();
+ return {stdx::make_unique<ProjectionStage>(params, ws, root.release())};
+}
- //
- // Delete
- //
+} // namespace
- Status getExecutorDelete(OperationContext* txn,
- Collection* collection,
- ParsedDelete* parsedDelete,
- PlanExecutor** execOut) {
- const DeleteRequest* request = parsedDelete->getRequest();
-
- const NamespaceString& nss(request->getNamespaceString());
- if (!request->isGod()) {
- if (nss.isSystem()) {
- uassert(12050,
- "cannot delete from system namespace",
- legalClientSystemNS(nss.ns(), true));
- }
- if (nss.ns().find('$') != string::npos) {
- log() << "cannot delete from collection with reserved $ in name: " << nss << endl;
- uasserted(10100, "cannot delete from collection with reserved $ in name");
- }
+//
+// Delete
+//
+
+Status getExecutorDelete(OperationContext* txn,
+ Collection* collection,
+ ParsedDelete* parsedDelete,
+ PlanExecutor** execOut) {
+ const DeleteRequest* request = parsedDelete->getRequest();
+
+ const NamespaceString& nss(request->getNamespaceString());
+ if (!request->isGod()) {
+ if (nss.isSystem()) {
+ uassert(
+ 12050, "cannot delete from system namespace", legalClientSystemNS(nss.ns(), true));
}
-
- if (collection && collection->isCapped()) {
- return Status(ErrorCodes::IllegalOperation,
- str::stream() << "cannot remove from a capped collection: " << nss.ns());
+ if (nss.ns().find('$') != string::npos) {
+ log() << "cannot delete from collection with reserved $ in name: " << nss << endl;
+ uasserted(10100, "cannot delete from collection with reserved $ in name");
}
+ }
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while removing from " << nss.ns());
- }
+ if (collection && collection->isCapped()) {
+ return Status(ErrorCodes::IllegalOperation,
+ str::stream() << "cannot remove from a capped collection: " << nss.ns());
+ }
- DeleteStageParams deleteStageParams;
- deleteStageParams.isMulti = request->isMulti();
- deleteStageParams.fromMigrate = request->isFromMigrate();
- deleteStageParams.isExplain = request->isExplain();
- deleteStageParams.returnDeleted = request->shouldReturnDeleted();
-
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanExecutor::YieldPolicy policy = parsedDelete->canYield() ? PlanExecutor::YIELD_AUTO :
- PlanExecutor::YIELD_MANUAL;
-
- if (!parsedDelete->hasParsedQuery()) {
- // This is the idhack fast-path for getting a PlanExecutor without doing the work
- // to create a CanonicalQuery.
- const BSONObj& unparsedQuery = request->getQuery();
-
- if (!collection) {
- // Treat collections that do not exist as empty collections. Note that the explain
- // reporting machinery always assumes that the root stage for a delete operation is
- // a DeleteStage, so in this case we put a DeleteStage on top of an EOFStage.
- LOG(2) << "Collection " << nss.ns() << " does not exist."
- << " Using EOF stage: " << unparsedQuery.toString();
- DeleteStage* deleteStage = new DeleteStage(txn, deleteStageParams, ws.get(), NULL,
- new EOFStage());
- return PlanExecutor::make(txn, ws.release(), deleteStage, nss.ns(), policy,
- execOut);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss);
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while removing from " << nss.ns());
+ }
- if (CanonicalQuery::isSimpleIdQuery(unparsedQuery)
- && collection->getIndexCatalog()->findIdIndex(txn)
- && request->getProj().isEmpty()) {
- LOG(2) << "Using idhack: " << unparsedQuery.toString();
-
- PlanStage* idHackStage = new IDHackStage(txn,
- collection,
- unparsedQuery["_id"].wrap(),
- ws.get());
- DeleteStage* root = new DeleteStage(txn, deleteStageParams, ws.get(), collection,
- idHackStage);
- return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
- }
+ DeleteStageParams deleteStageParams;
+ deleteStageParams.isMulti = request->isMulti();
+ deleteStageParams.fromMigrate = request->isFromMigrate();
+ deleteStageParams.isExplain = request->isExplain();
+ deleteStageParams.returnDeleted = request->shouldReturnDeleted();
- // If we're here then we don't have a parsed query, but we're also not eligible for
- // the idhack fast path. We need to force canonicalization now.
- Status cqStatus = parsedDelete->parseQueryToCQ();
- if (!cqStatus.isOK()) {
- return cqStatus;
- }
- }
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ PlanExecutor::YieldPolicy policy =
+ parsedDelete->canYield() ? PlanExecutor::YIELD_AUTO : PlanExecutor::YIELD_MANUAL;
- // This is the regular path for when we have a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq(parsedDelete->releaseParsedQuery());
+ if (!parsedDelete->hasParsedQuery()) {
+ // This is the idhack fast-path for getting a PlanExecutor without doing the work
+ // to create a CanonicalQuery.
+ const BSONObj& unparsedQuery = request->getQuery();
- PlanStage* rawRoot;
- QuerySolution* rawQuerySolution;
- const size_t defaultPlannerOptions = 0;
- Status status = prepareExecution(txn, collection, ws.get(), cq.get(),
- defaultPlannerOptions, &rawRoot, &rawQuerySolution);
- if (!status.isOK()) {
- return status;
- }
- invariant(rawRoot);
- std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
- deleteStageParams.canonicalQuery = cq.get();
-
- rawRoot = new DeleteStage(txn, deleteStageParams, ws.get(), collection, rawRoot);
- std::unique_ptr<PlanStage> root(rawRoot);
-
- if (!request->getProj().isEmpty()) {
- invariant(request->shouldReturnDeleted());
-
- const bool allowPositional = true;
- StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(txn,
- nss,
- cq.get(),
- request->getProj(),
- allowPositional,
- ws.get(),
- std::move(root));
- if (!projStatus.isOK()) {
- return projStatus.getStatus();
- }
- root = std::move(projStatus.getValue());
+ if (!collection) {
+ // Treat collections that do not exist as empty collections. Note that the explain
+ // reporting machinery always assumes that the root stage for a delete operation is
+ // a DeleteStage, so in this case we put a DeleteStage on top of an EOFStage.
+ LOG(2) << "Collection " << nss.ns() << " does not exist."
+ << " Using EOF stage: " << unparsedQuery.toString();
+ DeleteStage* deleteStage =
+ new DeleteStage(txn, deleteStageParams, ws.get(), NULL, new EOFStage());
+ return PlanExecutor::make(txn, ws.release(), deleteStage, nss.ns(), policy, execOut);
}
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null.
- return PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- querySolution.release(),
- cq.release(),
- collection,
- policy,
- execOut);
- }
+ if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
+ collection->getIndexCatalog()->findIdIndex(txn) && request->getProj().isEmpty()) {
+ LOG(2) << "Using idhack: " << unparsedQuery.toString();
- //
- // Update
- //
+ PlanStage* idHackStage =
+ new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
+ DeleteStage* root =
+ new DeleteStage(txn, deleteStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
+ }
- namespace {
-
- // TODO: Make this a function on NamespaceString, or make it cleaner.
- inline void validateUpdate(const char* ns ,
- const BSONObj& updateobj,
- const BSONObj& patternOrig) {
- uassert(10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0);
- if (strstr(ns, ".system.")) {
- /* dm: it's very important that system.indexes is never updated as IndexDetails
- has pointers into it */
- uassert(10156,
- str::stream() << "cannot update system collection: "
- << ns << " q: " << patternOrig << " u: " << updateobj,
- legalClientSystemNS(ns , true));
- }
+ // If we're here then we don't have a parsed query, but we're also not eligible for
+ // the idhack fast path. We need to force canonicalization now.
+ Status cqStatus = parsedDelete->parseQueryToCQ();
+ if (!cqStatus.isOK()) {
+ return cqStatus;
}
+ }
- } // namespace
+ // This is the regular path for when we have a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq(parsedDelete->releaseParsedQuery());
- Status getExecutorUpdate(OperationContext* txn,
- Collection* collection,
- ParsedUpdate* parsedUpdate,
- OpDebug* opDebug,
- PlanExecutor** execOut) {
- const UpdateRequest* request = parsedUpdate->getRequest();
- UpdateDriver* driver = parsedUpdate->getDriver();
+ PlanStage* rawRoot;
+ QuerySolution* rawQuerySolution;
+ const size_t defaultPlannerOptions = 0;
+ Status status = prepareExecution(
+ txn, collection, ws.get(), cq.get(), defaultPlannerOptions, &rawRoot, &rawQuerySolution);
+ if (!status.isOK()) {
+ return status;
+ }
+ invariant(rawRoot);
+ std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
+ deleteStageParams.canonicalQuery = cq.get();
- const NamespaceString& nsString = request->getNamespaceString();
- UpdateLifecycle* lifecycle = request->getLifecycle();
+ rawRoot = new DeleteStage(txn, deleteStageParams, ws.get(), collection, rawRoot);
+ std::unique_ptr<PlanStage> root(rawRoot);
- validateUpdate(nsString.ns().c_str(), request->getUpdates(), request->getQuery());
+ if (!request->getProj().isEmpty()) {
+ invariant(request->shouldReturnDeleted());
- // If there is no collection and this is an upsert, callers are supposed to create
- // the collection prior to calling this method. Explain, however, will never do
- // collection or database creation.
- if (!collection && request->isUpsert()) {
- invariant(request->isExplain());
+ const bool allowPositional = true;
+ StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(
+ txn, nss, cq.get(), request->getProj(), allowPositional, ws.get(), std::move(root));
+ if (!projStatus.isOK()) {
+ return projStatus.getStatus();
}
+ root = std::move(projStatus.getValue());
+ }
- // TODO: This seems a bit circuitious.
- opDebug->updateobj = request->getUpdates();
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be null.
+ return PlanExecutor::make(txn,
+ ws.release(),
+ root.release(),
+ querySolution.release(),
+ cq.release(),
+ collection,
+ policy,
+ execOut);
+}
+
+//
+// Update
+//
- // If this is a user-issued update, then we want to return an error: you cannot perform
- // writes on a secondary. If this is an update to a secondary from the replication system,
- // however, then we make an exception and let the write proceed. In this case,
- // shouldCallLogOp() will be false.
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString);
+namespace {
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while performing update on "
- << nsString.ns());
- }
+// TODO: Make this a function on NamespaceString, or make it cleaner.
+inline void validateUpdate(const char* ns, const BSONObj& updateobj, const BSONObj& patternOrig) {
+ uassert(10155, "cannot update reserved $ collection", strchr(ns, '$') == 0);
+ if (strstr(ns, ".system.")) {
+ /* dm: it's very important that system.indexes is never updated as IndexDetails
+ has pointers into it */
+ uassert(10156,
+ str::stream() << "cannot update system collection: " << ns << " q: " << patternOrig
+ << " u: " << updateobj,
+ legalClientSystemNS(ns, true));
+ }
+}
- if (lifecycle) {
- lifecycle->setCollection(collection);
- driver->refreshIndexKeys(lifecycle->getIndexKeys(txn));
- }
+} // namespace
- PlanExecutor::YieldPolicy policy = parsedUpdate->canYield() ? PlanExecutor::YIELD_AUTO :
- PlanExecutor::YIELD_MANUAL;
-
- unique_ptr<WorkingSet> ws(new WorkingSet());
- UpdateStageParams updateStageParams(request, driver, opDebug);
-
- if (!parsedUpdate->hasParsedQuery()) {
- // This is the idhack fast-path for getting a PlanExecutor without doing the work
- // to create a CanonicalQuery.
- const BSONObj& unparsedQuery = request->getQuery();
-
- if (!collection) {
- // Treat collections that do not exist as empty collections. Note that the explain
- // reporting machinery always assumes that the root stage for an update operation is
- // an UpdateStage, so in this case we put an UpdateStage on top of an EOFStage.
- LOG(2) << "Collection " << nsString.ns() << " does not exist."
- << " Using EOF stage: " << unparsedQuery.toString();
- UpdateStage* updateStage = new UpdateStage(txn, updateStageParams, ws.get(),
- collection, new EOFStage());
- return PlanExecutor::make(txn, ws.release(), updateStage, nsString.ns(),
- policy, execOut);
- }
+Status getExecutorUpdate(OperationContext* txn,
+ Collection* collection,
+ ParsedUpdate* parsedUpdate,
+ OpDebug* opDebug,
+ PlanExecutor** execOut) {
+ const UpdateRequest* request = parsedUpdate->getRequest();
+ UpdateDriver* driver = parsedUpdate->getDriver();
- if (CanonicalQuery::isSimpleIdQuery(unparsedQuery)
- && collection->getIndexCatalog()->findIdIndex(txn)
- && request->getProj().isEmpty()) {
+ const NamespaceString& nsString = request->getNamespaceString();
+ UpdateLifecycle* lifecycle = request->getLifecycle();
- LOG(2) << "Using idhack: " << unparsedQuery.toString();
+ validateUpdate(nsString.ns().c_str(), request->getUpdates(), request->getQuery());
- PlanStage* idHackStage = new IDHackStage(txn,
- collection,
- unparsedQuery["_id"].wrap(),
- ws.get());
- UpdateStage* root = new UpdateStage(txn, updateStageParams, ws.get(), collection,
- idHackStage);
- return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
- }
+ // If there is no collection and this is an upsert, callers are supposed to create
+ // the collection prior to calling this method. Explain, however, will never do
+ // collection or database creation.
+ if (!collection && request->isUpsert()) {
+ invariant(request->isExplain());
+ }
- // If we're here then we don't have a parsed query, but we're also not eligible for
- // the idhack fast path. We need to force canonicalization now.
- Status cqStatus = parsedUpdate->parseQueryToCQ();
- if (!cqStatus.isOK()) {
- return cqStatus;
- }
- }
+ // TODO: This seems a bit circuitious.
+ opDebug->updateobj = request->getUpdates();
- // This is the regular path for when we have a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq(parsedUpdate->releaseParsedQuery());
+ // If this is a user-issued update, then we want to return an error: you cannot perform
+ // writes on a secondary. If this is an update to a secondary from the replication system,
+ // however, then we make an exception and let the write proceed. In this case,
+ // shouldCallLogOp() will be false.
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString);
- PlanStage* rawRoot;
- QuerySolution* rawQuerySolution;
- const size_t defaultPlannerOptions = 0;
- Status status = prepareExecution(txn, collection, ws.get(), cq.get(),
- defaultPlannerOptions, &rawRoot, &rawQuerySolution);
- if (!status.isOK()) {
- return status;
- }
- invariant(rawRoot);
- std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
- updateStageParams.canonicalQuery = cq.get();
-
- rawRoot = new UpdateStage(txn, updateStageParams, ws.get(), collection, rawRoot);
- std::unique_ptr<PlanStage> root(rawRoot);
-
- if (!request->getProj().isEmpty()) {
- invariant(request->shouldReturnAnyDocs());
-
- // If the plan stage is to return the newly-updated version of the documents, then it
- // is invalid to use a positional projection because the query expression need not
- // match the array element after the update has been applied.
- const bool allowPositional = request->shouldReturnOldDocs();
- StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(txn,
- nsString,
- cq.get(),
- request->getProj(),
- allowPositional,
- ws.get(),
- std::move(root));
- if (!projStatus.isOK()) {
- return projStatus.getStatus();
- }
- root = std::move(projStatus.getValue());
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while performing update on " << nsString.ns());
+ }
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null. Takes ownership of all args other than 'collection' and 'txn'
- return PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- querySolution.release(),
- cq.release(),
- collection,
- policy,
- execOut);
+ if (lifecycle) {
+ lifecycle->setCollection(collection);
+ driver->refreshIndexKeys(lifecycle->getIndexKeys(txn));
}
- //
- // Group
- //
+ PlanExecutor::YieldPolicy policy =
+ parsedUpdate->canYield() ? PlanExecutor::YIELD_AUTO : PlanExecutor::YIELD_MANUAL;
- Status getExecutorGroup(OperationContext* txn,
- Collection* collection,
- const GroupRequest& request,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut) {
- if (!globalScriptEngine) {
- return Status(ErrorCodes::BadValue, "server-side JavaScript execution is disabled");
- }
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ UpdateStageParams updateStageParams(request, driver, opDebug);
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* root;
- QuerySolution* querySolution;
+ if (!parsedUpdate->hasParsedQuery()) {
+ // This is the idhack fast-path for getting a PlanExecutor without doing the work
+ // to create a CanonicalQuery.
+ const BSONObj& unparsedQuery = request->getQuery();
if (!collection) {
- // Treat collections that do not exist as empty collections. Note that the explain
- // reporting machinery always assumes that the root stage for a group operation is a
- // GroupStage, so in this case we put a GroupStage on top of an EOFStage.
- root = new GroupStage(txn, request, ws.get(), new EOFStage());
- return PlanExecutor::make(txn, ws.release(), root, request.ns, yieldPolicy, execOut);
+ // Treat collections that do not exist as empty collections. Note that the explain
+ // reporting machinery always assumes that the root stage for an update operation is
+ // an UpdateStage, so in this case we put an UpdateStage on top of an EOFStage.
+ LOG(2) << "Collection " << nsString.ns() << " does not exist."
+ << " Using EOF stage: " << unparsedQuery.toString();
+ UpdateStage* updateStage =
+ new UpdateStage(txn, updateStageParams, ws.get(), collection, new EOFStage());
+ return PlanExecutor::make(
+ txn, ws.release(), updateStage, nsString.ns(), policy, execOut);
}
- const NamespaceString nss(request.ns);
- const WhereCallbackReal whereCallback(txn, nss.db());
- CanonicalQuery* rawCanonicalQuery;
- Status canonicalizeStatus = CanonicalQuery::canonicalize(request.ns,
- request.query,
- request.explain,
- &rawCanonicalQuery,
- whereCallback);
- if (!canonicalizeStatus.isOK()) {
- return canonicalizeStatus;
- }
- unique_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
+ if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
+ collection->getIndexCatalog()->findIdIndex(txn) && request->getProj().isEmpty()) {
+ LOG(2) << "Using idhack: " << unparsedQuery.toString();
- const size_t defaultPlannerOptions = 0;
- Status status = prepareExecution(txn, collection, ws.get(), canonicalQuery.get(),
- defaultPlannerOptions, &root, &querySolution);
- if (!status.isOK()) {
- return status;
+ PlanStage* idHackStage =
+ new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
+ UpdateStage* root =
+ new UpdateStage(txn, updateStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
}
- invariant(root);
- root = new GroupStage(txn, request, ws.get(), root);
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null. Takes ownership of all args other than 'collection'.
- return PlanExecutor::make(txn,
- ws.release(),
- root,
- querySolution,
- canonicalQuery.release(),
- collection,
- yieldPolicy,
- execOut);
+ // If we're here then we don't have a parsed query, but we're also not eligible for
+ // the idhack fast path. We need to force canonicalization now.
+ Status cqStatus = parsedUpdate->parseQueryToCQ();
+ if (!cqStatus.isOK()) {
+ return cqStatus;
+ }
}
- //
- // Count hack
- //
+ // This is the regular path for when we have a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq(parsedUpdate->releaseParsedQuery());
- namespace {
+ PlanStage* rawRoot;
+ QuerySolution* rawQuerySolution;
+ const size_t defaultPlannerOptions = 0;
+ Status status = prepareExecution(
+ txn, collection, ws.get(), cq.get(), defaultPlannerOptions, &rawRoot, &rawQuerySolution);
+ if (!status.isOK()) {
+ return status;
+ }
+ invariant(rawRoot);
+ std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
+ updateStageParams.canonicalQuery = cq.get();
+
+ rawRoot = new UpdateStage(txn, updateStageParams, ws.get(), collection, rawRoot);
+ std::unique_ptr<PlanStage> root(rawRoot);
+
+ if (!request->getProj().isEmpty()) {
+ invariant(request->shouldReturnAnyDocs());
+
+ // If the plan stage is to return the newly-updated version of the documents, then it
+ // is invalid to use a positional projection because the query expression need not
+ // match the array element after the update has been applied.
+ const bool allowPositional = request->shouldReturnOldDocs();
+ StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(txn,
+ nsString,
+ cq.get(),
+ request->getProj(),
+ allowPositional,
+ ws.get(),
+ std::move(root));
+ if (!projStatus.isOK()) {
+ return projStatus.getStatus();
+ }
+ root = std::move(projStatus.getValue());
+ }
- /**
- * Returns 'true' if the provided solution 'soln' can be rewritten to use
- * a fast counting stage. Mutates the tree in 'soln->root'.
- *
- * Otherwise, returns 'false'.
- */
- bool turnIxscanIntoCount(QuerySolution* soln) {
- QuerySolutionNode* root = soln->root.get();
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be null. Takes ownership of all args other than 'collection' and 'txn'
+ return PlanExecutor::make(txn,
+ ws.release(),
+ root.release(),
+ querySolution.release(),
+ cq.release(),
+ collection,
+ policy,
+ execOut);
+}
+
+//
+// Group
+//
+
+Status getExecutorGroup(OperationContext* txn,
+ Collection* collection,
+ const GroupRequest& request,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** execOut) {
+ if (!globalScriptEngine) {
+ return Status(ErrorCodes::BadValue, "server-side JavaScript execution is disabled");
+ }
- // Root should be a fetch w/o any filters.
- if (STAGE_FETCH != root->getType()) {
- return false;
- }
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ PlanStage* root;
+ QuerySolution* querySolution;
- if (NULL != root->filter.get()) {
- return false;
- }
+ if (!collection) {
+ // Treat collections that do not exist as empty collections. Note that the explain
+ // reporting machinery always assumes that the root stage for a group operation is a
+ // GroupStage, so in this case we put a GroupStage on top of an EOFStage.
+ root = new GroupStage(txn, request, ws.get(), new EOFStage());
+ return PlanExecutor::make(txn, ws.release(), root, request.ns, yieldPolicy, execOut);
+ }
- // Child should be an ixscan.
- if (STAGE_IXSCAN != root->children[0]->getType()) {
- return false;
- }
+ const NamespaceString nss(request.ns);
+ const WhereCallbackReal whereCallback(txn, nss.db());
+ CanonicalQuery* rawCanonicalQuery;
+ Status canonicalizeStatus = CanonicalQuery::canonicalize(
+ request.ns, request.query, request.explain, &rawCanonicalQuery, whereCallback);
+ if (!canonicalizeStatus.isOK()) {
+ return canonicalizeStatus;
+ }
+ unique_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
+
+ const size_t defaultPlannerOptions = 0;
+ Status status = prepareExecution(txn,
+ collection,
+ ws.get(),
+ canonicalQuery.get(),
+ defaultPlannerOptions,
+ &root,
+ &querySolution);
+ if (!status.isOK()) {
+ return status;
+ }
+ invariant(root);
+
+ root = new GroupStage(txn, request, ws.get(), root);
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be null. Takes ownership of all args other than 'collection'.
+ return PlanExecutor::make(txn,
+ ws.release(),
+ root,
+ querySolution,
+ canonicalQuery.release(),
+ collection,
+ yieldPolicy,
+ execOut);
+}
+
+//
+// Count hack
+//
- IndexScanNode* isn = static_cast<IndexScanNode*>(root->children[0]);
+namespace {
- // No filters allowed and side-stepping isSimpleRange for now. TODO: do we ever see
- // isSimpleRange here? because we could well use it. I just don't think we ever do see
- // it.
+/**
+ * Returns 'true' if the provided solution 'soln' can be rewritten to use
+ * a fast counting stage. Mutates the tree in 'soln->root'.
+ *
+ * Otherwise, returns 'false'.
+ */
+bool turnIxscanIntoCount(QuerySolution* soln) {
+ QuerySolutionNode* root = soln->root.get();
- if (NULL != isn->filter.get() || isn->bounds.isSimpleRange) {
- return false;
- }
+ // Root should be a fetch w/o any filters.
+ if (STAGE_FETCH != root->getType()) {
+ return false;
+ }
- // Make sure the bounds are OK.
- BSONObj startKey;
- bool startKeyInclusive;
- BSONObj endKey;
- bool endKeyInclusive;
-
- if (!IndexBoundsBuilder::isSingleInterval( isn->bounds,
- &startKey,
- &startKeyInclusive,
- &endKey,
- &endKeyInclusive )) {
- return false;
- }
+ if (NULL != root->filter.get()) {
+ return false;
+ }
- // Make the count node that we replace the fetch + ixscan with.
- CountNode* cn = new CountNode();
- cn->indexKeyPattern = isn->indexKeyPattern;
- cn->startKey = startKey;
- cn->startKeyInclusive = startKeyInclusive;
- cn->endKey = endKey;
- cn->endKeyInclusive = endKeyInclusive;
- // Takes ownership of 'cn' and deletes the old root.
- soln->root.reset(cn);
- return true;
- }
+ // Child should be an ixscan.
+ if (STAGE_IXSCAN != root->children[0]->getType()) {
+ return false;
+ }
- /**
- * Returns true if indices contains an index that can be
- * used with DistinctNode. Sets indexOut to the array index
- * of PlannerParams::indices.
- * Look for the index for the fewest fields.
- * Criteria for suitable index is that the index cannot be special
- * (geo, hashed, text, ...).
- *
- * Multikey indices are not suitable for DistinctNode when the projection
- * is on an array element. Arrays are flattened in a multikey index which
- * makes it impossible for the distinct scan stage (plan stage generated from
- * DistinctNode) to select the requested element by array index.
- *
- * Multikey indices cannot be used for the fast distinct hack if the field is dotted.
- * Currently the solution generated for the distinct hack includes a projection stage and
- * the projection stage cannot be covered with a dotted field.
- */
- bool getDistinctNodeIndex(const std::vector<IndexEntry>& indices,
- const std::string& field, size_t* indexOut) {
- invariant(indexOut);
- bool isDottedField = str::contains(field, '.');
- int minFields = std::numeric_limits<int>::max();
- for (size_t i = 0; i < indices.size(); ++i) {
- // Skip special indices.
- if (!IndexNames::findPluginName(indices[i].keyPattern).empty()) {
- continue;
- }
- // Skip multikey indices if we are projecting on a dotted field.
- if (indices[i].multikey && isDottedField) {
- continue;
- }
- int nFields = indices[i].keyPattern.nFields();
- // Pick the index with the lowest number of fields.
- if (nFields < minFields) {
- minFields = nFields;
- *indexOut = i;
- }
- }
- return minFields != std::numeric_limits<int>::max();
- }
+ IndexScanNode* isn = static_cast<IndexScanNode*>(root->children[0]);
- /**
- * Checks dotted field for a projection and truncates the
- * field name if we could be projecting on an array element.
- * Sets 'isIDOut' to true if the projection is on a sub document of _id.
- * For example, _id.a.2, _id.b.c.
- */
- std::string getProjectedDottedField(const std::string& field, bool* isIDOut) {
- // Check if field contains an array index.
- std::vector<std::string> res;
- mongo::splitStringDelim(field, &res, '.');
-
- // Since we could exit early from the loop,
- // we should check _id here and set '*isIDOut' accordingly.
- *isIDOut = ("_id" == res[0]);
-
- // Skip the first dotted component. If the field starts
- // with a number, the number cannot be an array index.
- int arrayIndex = 0;
- for (size_t i = 1; i < res.size(); ++i) {
- if (mongo::parseNumberFromStringWithBase(res[i], 10, &arrayIndex).isOK()) {
- // Array indices cannot be negative numbers (this is not $slice).
- // Negative numbers are allowed as field names.
- if (arrayIndex >= 0) {
- // Generate prefix of field up to (but not including) array index.
- std::vector<std::string> prefixStrings(res);
- prefixStrings.resize(i);
- // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined string
- // to the end of projectedField.
- std::string projectedField;
- mongo::joinStringDelim(prefixStrings, &projectedField, '.');
- return projectedField;
- }
- }
- }
+ // No filters allowed and side-stepping isSimpleRange for now. TODO: do we ever see
+ // isSimpleRange here? because we could well use it. I just don't think we ever do see
+ // it.
- return field;
- }
+ if (NULL != isn->filter.get() || isn->bounds.isSimpleRange) {
+ return false;
+ }
- /**
- * Creates a projection spec for a distinct command from the requested field.
- * In most cases, the projection spec will be {_id: 0, key: 1}.
- * The exceptions are:
- * 1) When the requested field is '_id', the projection spec will {_id: 1}.
- * 2) When the requested field could be an array element (eg. a.0),
- * the projected field will be the prefix of the field up to the array element.
- * For example, a.b.2 => {_id: 0, 'a.b': 1}
- * Note that we can't use a $slice projection because the distinct command filters
- * the results from the executor using the dotted field name. Using $slice will
- * re-order the documents in the array in the results.
- */
- BSONObj getDistinctProjection(const std::string& field) {
- std::string projectedField(field);
-
- bool isID = false;
- if ("_id" == field) {
- isID = true;
- }
- else if (str::contains(field, '.')) {
- projectedField = getProjectedDottedField(field, &isID);
- }
- BSONObjBuilder bob;
- if (!isID) {
- bob.append("_id", 0);
- }
- bob.append(projectedField, 1);
- return bob.obj();
- }
+ // Make sure the bounds are OK.
+ BSONObj startKey;
+ bool startKeyInclusive;
+ BSONObj endKey;
+ bool endKeyInclusive;
- } // namespace
+ if (!IndexBoundsBuilder::isSingleInterval(
+ isn->bounds, &startKey, &startKeyInclusive, &endKey, &endKeyInclusive)) {
+ return false;
+ }
- Status getExecutorCount(OperationContext* txn,
- Collection* collection,
- const CountRequest& request,
- bool explain,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut) {
+ // Make the count node that we replace the fetch + ixscan with.
+ CountNode* cn = new CountNode();
+ cn->indexKeyPattern = isn->indexKeyPattern;
+ cn->startKey = startKey;
+ cn->startKeyInclusive = startKeyInclusive;
+ cn->endKey = endKey;
+ cn->endKeyInclusive = endKeyInclusive;
+ // Takes ownership of 'cn' and deletes the old root.
+ soln->root.reset(cn);
+ return true;
+}
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* root;
- QuerySolution* querySolution;
-
- // If collection exists and the query is empty, no additional canonicalization is needed.
- // If the query is empty, then we can determine the count by just asking the collection
- // for its number of records. This is implemented by the CountStage, and we don't need
- // to create a child for the count stage in this case.
- //
- // If there is a hint, then we can't use a trival count plan as described above.
- if (collection && request.getQuery().isEmpty() && request.getHint().isEmpty()) {
- root = new CountStage(txn, collection, request, ws.get(), NULL);
- return PlanExecutor::make(txn,
- ws.release(),
- root,
- request.getNs(),
- yieldPolicy,
- execOut);
+/**
+ * Returns true if indices contains an index that can be
+ * used with DistinctNode. Sets indexOut to the array index
+ * of PlannerParams::indices.
+ * Look for the index for the fewest fields.
+ * Criteria for suitable index is that the index cannot be special
+ * (geo, hashed, text, ...).
+ *
+ * Multikey indices are not suitable for DistinctNode when the projection
+ * is on an array element. Arrays are flattened in a multikey index which
+ * makes it impossible for the distinct scan stage (plan stage generated from
+ * DistinctNode) to select the requested element by array index.
+ *
+ * Multikey indices cannot be used for the fast distinct hack if the field is dotted.
+ * Currently the solution generated for the distinct hack includes a projection stage and
+ * the projection stage cannot be covered with a dotted field.
+ */
+bool getDistinctNodeIndex(const std::vector<IndexEntry>& indices,
+ const std::string& field,
+ size_t* indexOut) {
+ invariant(indexOut);
+ bool isDottedField = str::contains(field, '.');
+ int minFields = std::numeric_limits<int>::max();
+ for (size_t i = 0; i < indices.size(); ++i) {
+ // Skip special indices.
+ if (!IndexNames::findPluginName(indices[i].keyPattern).empty()) {
+ continue;
}
-
- unique_ptr<CanonicalQuery> cq;
- if (!request.getQuery().isEmpty() || !request.getHint().isEmpty()) {
- // If query or hint is not empty, canonicalize the query before working with collection.
- typedef MatchExpressionParser::WhereCallback WhereCallback;
- CanonicalQuery* rawCq = NULL;
- Status canonStatus = CanonicalQuery::canonicalize(
- request.getNs(),
- request.getQuery(),
- BSONObj(), // sort
- BSONObj(), // projection
- 0, // skip
- 0, // limit
- request.getHint(),
- BSONObj(), // min
- BSONObj(), // max
- false, // snapshot
- explain,
- &rawCq,
- collection ?
- static_cast<const WhereCallback&>(WhereCallbackReal(txn,
- collection->ns().db())) :
- static_cast<const WhereCallback&>(WhereCallbackNoop()));
- if (!canonStatus.isOK()) {
- return canonStatus;
- }
- cq.reset(rawCq);
+ // Skip multikey indices if we are projecting on a dotted field.
+ if (indices[i].multikey && isDottedField) {
+ continue;
+ }
+ int nFields = indices[i].keyPattern.nFields();
+ // Pick the index with the lowest number of fields.
+ if (nFields < minFields) {
+ minFields = nFields;
+ *indexOut = i;
}
+ }
+ return minFields != std::numeric_limits<int>::max();
+}
- if (!collection) {
- // Treat collections that do not exist as empty collections. Note that the explain
- // reporting machinery always assumes that the root stage for a count operation is
- // a CountStage, so in this case we put a CountStage on top of an EOFStage.
- root = new CountStage(txn, collection, request, ws.get(), new EOFStage());
- return PlanExecutor::make(txn,
- ws.release(),
- root,
- request.getNs(),
- yieldPolicy,
- execOut);
+/**
+ * Checks dotted field for a projection and truncates the
+ * field name if we could be projecting on an array element.
+ * Sets 'isIDOut' to true if the projection is on a sub document of _id.
+ * For example, _id.a.2, _id.b.c.
+ */
+std::string getProjectedDottedField(const std::string& field, bool* isIDOut) {
+ // Check if field contains an array index.
+ std::vector<std::string> res;
+ mongo::splitStringDelim(field, &res, '.');
+
+ // Since we could exit early from the loop,
+ // we should check _id here and set '*isIDOut' accordingly.
+ *isIDOut = ("_id" == res[0]);
+
+ // Skip the first dotted component. If the field starts
+ // with a number, the number cannot be an array index.
+ int arrayIndex = 0;
+ for (size_t i = 1; i < res.size(); ++i) {
+ if (mongo::parseNumberFromStringWithBase(res[i], 10, &arrayIndex).isOK()) {
+ // Array indices cannot be negative numbers (this is not $slice).
+ // Negative numbers are allowed as field names.
+ if (arrayIndex >= 0) {
+ // Generate prefix of field up to (but not including) array index.
+ std::vector<std::string> prefixStrings(res);
+ prefixStrings.resize(i);
+ // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined string
+ // to the end of projectedField.
+ std::string projectedField;
+ mongo::joinStringDelim(prefixStrings, &projectedField, '.');
+ return projectedField;
+ }
}
+ }
- invariant(cq.get());
+ return field;
+}
- const size_t plannerOptions = QueryPlannerParams::PRIVATE_IS_COUNT;
- Status prepStatus = prepareExecution(txn, collection, ws.get(), cq.get(), plannerOptions,
- &root, &querySolution);
- if (!prepStatus.isOK()) {
- return prepStatus;
- }
- invariant(root);
-
- // Make a CountStage to be the new root.
- root = new CountStage(txn, collection, request, ws.get(), root);
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be NULL. Takes ownership of all args other than 'collection' and 'txn'
- return PlanExecutor::make(txn,
- ws.release(),
- root,
- querySolution,
- cq.release(),
- collection,
- yieldPolicy,
- execOut);
+/**
+ * Creates a projection spec for a distinct command from the requested field.
+ * In most cases, the projection spec will be {_id: 0, key: 1}.
+ * The exceptions are:
+ * 1) When the requested field is '_id', the projection spec will {_id: 1}.
+ * 2) When the requested field could be an array element (eg. a.0),
+ * the projected field will be the prefix of the field up to the array element.
+ * For example, a.b.2 => {_id: 0, 'a.b': 1}
+ * Note that we can't use a $slice projection because the distinct command filters
+ * the results from the executor using the dotted field name. Using $slice will
+ * re-order the documents in the array in the results.
+ */
+BSONObj getDistinctProjection(const std::string& field) {
+ std::string projectedField(field);
+
+ bool isID = false;
+ if ("_id" == field) {
+ isID = true;
+ } else if (str::contains(field, '.')) {
+ projectedField = getProjectedDottedField(field, &isID);
+ }
+ BSONObjBuilder bob;
+ if (!isID) {
+ bob.append("_id", 0);
}
+ bob.append(projectedField, 1);
+ return bob.obj();
+}
+} // namespace
+
+Status getExecutorCount(OperationContext* txn,
+ Collection* collection,
+ const CountRequest& request,
+ bool explain,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** execOut) {
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ PlanStage* root;
+ QuerySolution* querySolution;
+
+ // If collection exists and the query is empty, no additional canonicalization is needed.
+ // If the query is empty, then we can determine the count by just asking the collection
+ // for its number of records. This is implemented by the CountStage, and we don't need
+ // to create a child for the count stage in this case.
//
- // Distinct hack
- //
+ // If there is a hint, then we can't use a trival count plan as described above.
+ if (collection && request.getQuery().isEmpty() && request.getHint().isEmpty()) {
+ root = new CountStage(txn, collection, request, ws.get(), NULL);
+ return PlanExecutor::make(txn, ws.release(), root, request.getNs(), yieldPolicy, execOut);
+ }
- bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const string& field) {
- QuerySolutionNode* root = soln->root.get();
+ unique_ptr<CanonicalQuery> cq;
+ if (!request.getQuery().isEmpty() || !request.getHint().isEmpty()) {
+ // If query or hint is not empty, canonicalize the query before working with collection.
+ typedef MatchExpressionParser::WhereCallback WhereCallback;
+ CanonicalQuery* rawCq = NULL;
+ Status canonStatus = CanonicalQuery::canonicalize(
+ request.getNs(),
+ request.getQuery(),
+ BSONObj(), // sort
+ BSONObj(), // projection
+ 0, // skip
+ 0, // limit
+ request.getHint(),
+ BSONObj(), // min
+ BSONObj(), // max
+ false, // snapshot
+ explain,
+ &rawCq,
+ collection
+ ? static_cast<const WhereCallback&>(WhereCallbackReal(txn, collection->ns().db()))
+ : static_cast<const WhereCallback&>(WhereCallbackNoop()));
+ if (!canonStatus.isOK()) {
+ return canonStatus;
+ }
+ cq.reset(rawCq);
+ }
- // We're looking for a project on top of an ixscan.
- if (STAGE_PROJECTION == root->getType() && (STAGE_IXSCAN == root->children[0]->getType())) {
- IndexScanNode* isn = static_cast<IndexScanNode*>(root->children[0]);
+ if (!collection) {
+ // Treat collections that do not exist as empty collections. Note that the explain
+ // reporting machinery always assumes that the root stage for a count operation is
+ // a CountStage, so in this case we put a CountStage on top of an EOFStage.
+ root = new CountStage(txn, collection, request, ws.get(), new EOFStage());
+ return PlanExecutor::make(txn, ws.release(), root, request.getNs(), yieldPolicy, execOut);
+ }
- // An additional filter must be applied to the data in the key, so we can't just skip
- // all the keys with a given value; we must examine every one to find the one that (may)
- // pass the filter.
- if (NULL != isn->filter.get()) {
- return false;
- }
+ invariant(cq.get());
- // We only set this when we have special query modifiers (.max() or .min()) or other
- // special cases. Don't want to handle the interactions between those and distinct.
- // Don't think this will ever really be true but if it somehow is, just ignore this
- // soln.
- if (isn->bounds.isSimpleRange) {
- return false;
- }
+ const size_t plannerOptions = QueryPlannerParams::PRIVATE_IS_COUNT;
+ Status prepStatus = prepareExecution(
+ txn, collection, ws.get(), cq.get(), plannerOptions, &root, &querySolution);
+ if (!prepStatus.isOK()) {
+ return prepStatus;
+ }
+ invariant(root);
+
+ // Make a CountStage to be the new root.
+ root = new CountStage(txn, collection, request, ws.get(), root);
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be NULL. Takes ownership of all args other than 'collection' and 'txn'
+ return PlanExecutor::make(
+ txn, ws.release(), root, querySolution, cq.release(), collection, yieldPolicy, execOut);
+}
+
+//
+// Distinct hack
+//
+
+bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const string& field) {
+ QuerySolutionNode* root = soln->root.get();
+
+ // We're looking for a project on top of an ixscan.
+ if (STAGE_PROJECTION == root->getType() && (STAGE_IXSCAN == root->children[0]->getType())) {
+ IndexScanNode* isn = static_cast<IndexScanNode*>(root->children[0]);
+
+ // An additional filter must be applied to the data in the key, so we can't just skip
+ // all the keys with a given value; we must examine every one to find the one that (may)
+ // pass the filter.
+ if (NULL != isn->filter.get()) {
+ return false;
+ }
- // Make a new DistinctNode. We swap this for the ixscan in the provided solution.
- DistinctNode* dn = new DistinctNode();
- dn->indexKeyPattern = isn->indexKeyPattern;
- dn->direction = isn->direction;
- dn->bounds = isn->bounds;
-
- // Figure out which field we're skipping to the next value of. TODO: We currently only
- // try to distinct-hack when there is an index prefixed by the field we're distinct-ing
- // over. Consider removing this code if we stick with that policy.
- dn->fieldNo = 0;
- BSONObjIterator it(isn->indexKeyPattern);
- while (it.more()) {
- if (field == it.next().fieldName()) {
- break;
- }
- dn->fieldNo++;
- }
+ // We only set this when we have special query modifiers (.max() or .min()) or other
+ // special cases. Don't want to handle the interactions between those and distinct.
+ // Don't think this will ever really be true but if it somehow is, just ignore this
+ // soln.
+ if (isn->bounds.isSimpleRange) {
+ return false;
+ }
- // Delete the old index scan, set the child of project to the fast distinct scan.
- delete root->children[0];
- root->children[0] = dn;
- return true;
+ // Make a new DistinctNode. We swap this for the ixscan in the provided solution.
+ DistinctNode* dn = new DistinctNode();
+ dn->indexKeyPattern = isn->indexKeyPattern;
+ dn->direction = isn->direction;
+ dn->bounds = isn->bounds;
+
+ // Figure out which field we're skipping to the next value of. TODO: We currently only
+ // try to distinct-hack when there is an index prefixed by the field we're distinct-ing
+ // over. Consider removing this code if we stick with that policy.
+ dn->fieldNo = 0;
+ BSONObjIterator it(isn->indexKeyPattern);
+ while (it.more()) {
+ if (field == it.next().fieldName()) {
+ break;
+ }
+ dn->fieldNo++;
}
- return false;
+ // Delete the old index scan, set the child of project to the fast distinct scan.
+ delete root->children[0];
+ root->children[0] = dn;
+ return true;
}
- Status getExecutorDistinct(OperationContext* txn,
- Collection* collection,
- const BSONObj& query,
- const std::string& field,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- // This should'a been checked by the distinct command.
- invariant(collection);
-
- // TODO: check for idhack here?
-
- // When can we do a fast distinct hack?
- // 1. There is a plan with just one leaf and that leaf is an ixscan.
- // 2. The ixscan indexes the field we're interested in.
- // 2a: We are correct if the index contains the field but for now we look for prefix.
- // 3. The query is covered/no fetch.
- //
- // We go through normal planning (with limited parameters) to see if we can produce
- // a soln with the above properties.
-
- QueryPlannerParams plannerParams;
- plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- // TODO Need to check if query is compatible with any partial indexes. SERVER-17854.
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn,false);
- while (ii.more()) {
- const IndexDescriptor* desc = ii.next();
- // The distinct hack can work if any field is in the index but it's not always clear
- // if it's a win unless it's the first field.
- if (desc->keyPattern().firstElement().fieldName() == field) {
- plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
- desc->getAccessMethodName(),
- desc->isMultikey(txn),
- desc->isSparse(),
- desc->unique(),
- desc->indexName(),
- NULL,
- desc->infoObj()));
- }
- }
+ return false;
+}
- const WhereCallbackReal whereCallback(txn, collection->ns().db());
+Status getExecutorDistinct(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& query,
+ const std::string& field,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ // This should'a been checked by the distinct command.
+ invariant(collection);
- // If there are no suitable indices for the distinct hack bail out now into regular planning
- // with no projection.
- if (plannerParams.indices.empty()) {
- CanonicalQuery* cq;
- Status status = CanonicalQuery::canonicalize(
- collection->ns().ns(), query, &cq, whereCallback);
- if (!status.isOK()) {
- return status;
- }
+ // TODO: check for idhack here?
- // Takes ownership of 'cq'.
- return getExecutor(txn, collection, cq, yieldPolicy, out);
+ // When can we do a fast distinct hack?
+ // 1. There is a plan with just one leaf and that leaf is an ixscan.
+ // 2. The ixscan indexes the field we're interested in.
+ // 2a: We are correct if the index contains the field but for now we look for prefix.
+ // 3. The query is covered/no fetch.
+ //
+ // We go through normal planning (with limited parameters) to see if we can produce
+ // a soln with the above properties.
+
+ QueryPlannerParams plannerParams;
+ plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ // TODO Need to check if query is compatible with any partial indexes. SERVER-17854.
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn, false);
+ while (ii.more()) {
+ const IndexDescriptor* desc = ii.next();
+ // The distinct hack can work if any field is in the index but it's not always clear
+ // if it's a win unless it's the first field.
+ if (desc->keyPattern().firstElement().fieldName() == field) {
+ plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
+ desc->getAccessMethodName(),
+ desc->isMultikey(txn),
+ desc->isSparse(),
+ desc->unique(),
+ desc->indexName(),
+ NULL,
+ desc->infoObj()));
}
+ }
- //
- // If we're here, we have an index prefixed by the field we're distinct-ing over.
- //
-
- // Applying a projection allows the planner to try to give us covered plans that we can turn
- // into the projection hack. getDistinctProjection deals with .find() projection semantics
- // (ie _id:1 being implied by default).
- BSONObj projection = getDistinctProjection(field);
+ const WhereCallbackReal whereCallback(txn, collection->ns().db());
- // Apply a projection of the key. Empty BSONObj() is for the sort.
+ // If there are no suitable indices for the distinct hack bail out now into regular planning
+ // with no projection.
+ if (plannerParams.indices.empty()) {
CanonicalQuery* cq;
- Status status = CanonicalQuery::canonicalize(collection->ns().ns(),
- query,
- BSONObj(),
- projection,
- &cq,
- whereCallback);
+ Status status =
+ CanonicalQuery::canonicalize(collection->ns().ns(), query, &cq, whereCallback);
if (!status.isOK()) {
return status;
}
- unique_ptr<CanonicalQuery> autoCq(cq);
+ // Takes ownership of 'cq'.
+ return getExecutor(txn, collection, cq, yieldPolicy, out);
+ }
- // If there's no query, we can just distinct-scan one of the indices.
- // Not every index in plannerParams.indices may be suitable. Refer to
- // getDistinctNodeIndex().
- size_t distinctNodeIndex = 0;
- if (query.isEmpty() &&
- getDistinctNodeIndex(plannerParams.indices, field, &distinctNodeIndex)) {
- DistinctNode* dn = new DistinctNode();
- dn->indexKeyPattern = plannerParams.indices[distinctNodeIndex].keyPattern;
- dn->direction = 1;
- IndexBoundsBuilder::allValuesBounds(dn->indexKeyPattern, &dn->bounds);
- dn->fieldNo = 0;
+ //
+ // If we're here, we have an index prefixed by the field we're distinct-ing over.
+ //
- QueryPlannerParams params;
+ // Applying a projection allows the planner to try to give us covered plans that we can turn
+ // into the projection hack. getDistinctProjection deals with .find() projection semantics
+ // (ie _id:1 being implied by default).
+ BSONObj projection = getDistinctProjection(field);
+
+ // Apply a projection of the key. Empty BSONObj() is for the sort.
+ CanonicalQuery* cq;
+ Status status = CanonicalQuery::canonicalize(
+ collection->ns().ns(), query, BSONObj(), projection, &cq, whereCallback);
+ if (!status.isOK()) {
+ return status;
+ }
- // Takes ownership of 'dn'.
- QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(*cq, params, dn);
- invariant(soln);
+ unique_ptr<CanonicalQuery> autoCq(cq);
- WorkingSet* ws = new WorkingSet();
- PlanStage* root;
- verify(StageBuilder::build(txn, collection, *soln, ws, &root));
+ // If there's no query, we can just distinct-scan one of the indices.
+ // Not every index in plannerParams.indices may be suitable. Refer to
+ // getDistinctNodeIndex().
+ size_t distinctNodeIndex = 0;
+ if (query.isEmpty() && getDistinctNodeIndex(plannerParams.indices, field, &distinctNodeIndex)) {
+ DistinctNode* dn = new DistinctNode();
+ dn->indexKeyPattern = plannerParams.indices[distinctNodeIndex].keyPattern;
+ dn->direction = 1;
+ IndexBoundsBuilder::allValuesBounds(dn->indexKeyPattern, &dn->bounds);
+ dn->fieldNo = 0;
- LOG(2) << "Using fast distinct: " << cq->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(root);
+ QueryPlannerParams params;
- // Takes ownership of its arguments (except for 'collection').
- return PlanExecutor::make(txn, ws, root, soln, autoCq.release(), collection,
- yieldPolicy, out);
- }
+ // Takes ownership of 'dn'.
+ QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(*cq, params, dn);
+ invariant(soln);
- // See if we can answer the query in a fast-distinct compatible fashion.
- vector<QuerySolution*> solutions;
- status = QueryPlanner::plan(*cq, plannerParams, &solutions);
- if (!status.isOK()) {
- return getExecutor(txn, collection, autoCq.release(), yieldPolicy, out);
- }
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* root;
+ verify(StageBuilder::build(txn, collection, *soln, ws, &root));
- // We look for a solution that has an ixscan we can turn into a distinctixscan
- for (size_t i = 0; i < solutions.size(); ++i) {
- if (turnIxscanIntoDistinctIxscan(solutions[i], field)) {
- // Great, we can use solutions[i]. Clean up the other QuerySolution(s).
- for (size_t j = 0; j < solutions.size(); ++j) {
- if (j != i) {
- delete solutions[j];
- }
- }
+ LOG(2) << "Using fast distinct: " << cq->toStringShort()
+ << ", planSummary: " << Explain::getPlanSummary(root);
- // Build and return the SSR over solutions[i].
- WorkingSet* ws = new WorkingSet();
- PlanStage* root;
- verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));
+ // Takes ownership of its arguments (except for 'collection').
+ return PlanExecutor::make(
+ txn, ws, root, soln, autoCq.release(), collection, yieldPolicy, out);
+ }
- LOG(2) << "Using fast distinct: " << cq->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(root);
+ // See if we can answer the query in a fast-distinct compatible fashion.
+ vector<QuerySolution*> solutions;
+ status = QueryPlanner::plan(*cq, plannerParams, &solutions);
+ if (!status.isOK()) {
+ return getExecutor(txn, collection, autoCq.release(), yieldPolicy, out);
+ }
- // Takes ownership of 'ws', 'root', 'solutions[i]', and 'autoCq'.
- return PlanExecutor::make(txn, ws, root, solutions[i], autoCq.release(),
- collection, yieldPolicy, out);
+ // We look for a solution that has an ixscan we can turn into a distinctixscan
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ if (turnIxscanIntoDistinctIxscan(solutions[i], field)) {
+ // Great, we can use solutions[i]. Clean up the other QuerySolution(s).
+ for (size_t j = 0; j < solutions.size(); ++j) {
+ if (j != i) {
+ delete solutions[j];
+ }
}
- }
- // If we're here, the planner made a soln with the restricted index set but we couldn't
- // translate any of them into a distinct-compatible soln. So, delete the solutions and just
- // go through normal planning.
- for (size_t i = 0; i < solutions.size(); ++i) {
- delete solutions[i];
- }
+ // Build and return the SSR over solutions[i].
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* root;
+ verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));
- // We drop the projection from the 'cq'. Unfortunately this is not trivial.
- status = CanonicalQuery::canonicalize(collection->ns().ns(), query, &cq, whereCallback);
- if (!status.isOK()) {
- return status;
+ LOG(2) << "Using fast distinct: " << cq->toStringShort()
+ << ", planSummary: " << Explain::getPlanSummary(root);
+
+ // Takes ownership of 'ws', 'root', 'solutions[i]', and 'autoCq'.
+ return PlanExecutor::make(
+ txn, ws, root, solutions[i], autoCq.release(), collection, yieldPolicy, out);
}
+ }
- autoCq.reset(cq);
+ // If we're here, the planner made a soln with the restricted index set but we couldn't
+ // translate any of them into a distinct-compatible soln. So, delete the solutions and just
+ // go through normal planning.
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ delete solutions[i];
+ }
- // Takes ownership of 'autoCq'.
- return getExecutor(txn, collection, autoCq.release(), yieldPolicy, out);
+ // We drop the projection from the 'cq'. Unfortunately this is not trivial.
+ status = CanonicalQuery::canonicalize(collection->ns().ns(), query, &cq, whereCallback);
+ if (!status.isOK()) {
+ return status;
}
+ autoCq.reset(cq);
+
+ // Takes ownership of 'autoCq'.
+ return getExecutor(txn, collection, autoCq.release(), yieldPolicy, out);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index c92f67a00ca..24d99ecc791 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -39,171 +39,171 @@
namespace mongo {
- class Collection;
- class CountRequest;
-
- struct GroupRequest;
-
- /**
- * Filter indexes retrieved from index catalog by
- * allowed indices in query settings.
- * Used by getExecutor().
- * This function is public to facilitate testing.
- */
- void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
- std::vector<IndexEntry>* indexEntries);
-
- /**
- * Fill out the provided 'plannerParams' for the 'canonicalQuery' operating on the collection
- * 'collection'. Exposed for testing.
- */
- void fillOutPlannerParams(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* canonicalQuery,
- QueryPlannerParams* plannerParams);
-
- /**
- * Get a plan executor for a query. Takes ownership of 'rawCanonicalQuery'.
- *
- * If the query is valid and an executor could be created, returns Status::OK()
- * and populates *out with the PlanExecutor.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutor(OperationContext* txn,
+class Collection;
+class CountRequest;
+
+struct GroupRequest;
+
+/**
+ * Filter indexes retrieved from index catalog by
+ * allowed indices in query settings.
+ * Used by getExecutor().
+ * This function is public to facilitate testing.
+ */
+void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
+ std::vector<IndexEntry>* indexEntries);
+
+/**
+ * Fill out the provided 'plannerParams' for the 'canonicalQuery' operating on the collection
+ * 'collection'. Exposed for testing.
+ */
+void fillOutPlannerParams(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* canonicalQuery,
+ QueryPlannerParams* plannerParams);
+
+/**
+ * Get a plan executor for a query. Takes ownership of 'rawCanonicalQuery'.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK()
+ * and populates *out with the PlanExecutor.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutor(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out,
+ size_t plannerOptions = 0);
+
+/**
+ * Get a plan executor for query. This differs from the getExecutor(...) function
+ * above in that the above requires a non-NULL canonical query, whereas this
+ * function can retrieve a plan executor from the raw query object.
+ *
+ * Used to support idhack updates that do not create a canonical query.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK()
+ * and populates *out with the PlanExecutor.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutor(OperationContext* txn,
+ Collection* collection,
+ const std::string& ns,
+ const BSONObj& unparsedQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out,
+ size_t plannerOptions = 0);
+
+/**
+ * Get a plan executor for a .find() operation. Takes ownership of 'rawCanonicalQuery'.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK()
+ * and populates *out with the PlanExecutor.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutorFind(OperationContext* txn,
Collection* collection,
+ const NamespaceString& nss,
CanonicalQuery* rawCanonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions = 0);
-
- /**
- * Get a plan executor for query. This differs from the getExecutor(...) function
- * above in that the above requires a non-NULL canonical query, whereas this
- * function can retrieve a plan executor from the raw query object.
- *
- * Used to support idhack updates that do not create a canonical query.
- *
- * If the query is valid and an executor could be created, returns Status::OK()
- * and populates *out with the PlanExecutor.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutor(OperationContext* txn,
- Collection* collection,
- const std::string& ns,
- const BSONObj& unparsedQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions = 0);
-
- /**
- * Get a plan executor for a .find() operation. Takes ownership of 'rawCanonicalQuery'.
- *
- * If the query is valid and an executor could be created, returns Status::OK()
- * and populates *out with the PlanExecutor.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutorFind(OperationContext* txn,
+ PlanExecutor** out);
+
+/**
+ * If possible, turn the provided QuerySolution into a QuerySolution that uses a DistinctNode
+ * to provide results for the distinct command.
+ *
+ * If the provided solution could be mutated successfully, returns true, otherwise returns
+ * false.
+ */
+bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const std::string& field);
+
+/*
+ * Get an executor for a query executing as part of a distinct command.
+ *
+ * Distinct is unique in that it doesn't care about getting all the results; it just wants all
+ * possible values of a certain field. As such, we can skip lots of data in certain cases (see
+ * body of method for detail).
+ */
+Status getExecutorDistinct(OperationContext* txn,
Collection* collection,
- const NamespaceString& nss,
- CanonicalQuery* rawCanonicalQuery,
+ const BSONObj& query,
+ const std::string& field,
PlanExecutor::YieldPolicy yieldPolicy,
PlanExecutor** out);
- /**
- * If possible, turn the provided QuerySolution into a QuerySolution that uses a DistinctNode
- * to provide results for the distinct command.
- *
- * If the provided solution could be mutated successfully, returns true, otherwise returns
- * false.
- */
- bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const std::string& field);
-
- /*
- * Get an executor for a query executing as part of a distinct command.
- *
- * Distinct is unique in that it doesn't care about getting all the results; it just wants all
- * possible values of a certain field. As such, we can skip lots of data in certain cases (see
- * body of method for detail).
- */
- Status getExecutorDistinct(OperationContext* txn,
- Collection* collection,
- const BSONObj& query,
- const std::string& field,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /*
- * Get a PlanExecutor for a query executing as part of a count command.
- *
- * Count doesn't care about actually examining its results; it just wants to walk through them.
- * As such, with certain covered queries, we can skip the overhead of fetching etc. when
- * executing a count.
- */
- Status getExecutorCount(OperationContext* txn,
- Collection* collection,
- const CountRequest& request,
- bool explain,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut);
-
- /**
- * Get a PlanExecutor for a delete operation. 'parsedDelete' describes the query predicate
- * and delete flags like 'isMulti'. The caller must hold the appropriate MODE_X or MODE_IX
- * locks, and must not release these locks until after the returned PlanExecutor is deleted.
- *
- * The returned PlanExecutor will yield if and only if parsedDelete->canYield().
- *
- * Does not take ownership of its arguments.
- *
- * If the query is valid and an executor could be created, returns Status::OK() and populates
- * *execOut with the PlanExecutor. The caller takes ownership of *execOut.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutorDelete(OperationContext* txn,
- Collection* collection,
- ParsedDelete* parsedDelete,
- PlanExecutor** execOut);
-
- /**
- * Get a PlanExecutor for an update operation. 'parsedUpdate' describes the query predicate
- * and update modifiers. The caller must hold the appropriate MODE_X or MODE_IX locks prior
- * to calling this function, and must not release these locks until after the returned
- * PlanExecutor is deleted.
- *
- * The returned PlanExecutor will yield if and only if parsedUpdate->canYield().
- *
- * Does not take ownership of its arguments.
- *
- * If the query is valid and an executor could be created, returns Status::OK() and populates
- * *out with the PlanExecutor. The caller takes ownership of *execOut.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutorUpdate(OperationContext* txn,
- Collection* collection,
- ParsedUpdate* parsedUpdate,
- OpDebug* opDebug,
- PlanExecutor** execOut);
-
- /**
- * Get a PlanExecutor for a group operation. 'rawCanonicalQuery' describes the predicate for
- * the documents to be grouped.
- *
- * Takes ownership of 'rawCanonicalQuery'. Does not take ownership of other args.
- *
- * If the query is valid and an executor could be created, returns Status::OK() and populates
- * *out with the PlanExecutor.
- *
- * If an executor could not be created, returns a Status indicating why.
- */
- Status getExecutorGroup(OperationContext* txn,
- Collection* collection,
- const GroupRequest& request,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut);
+/*
+ * Get a PlanExecutor for a query executing as part of a count command.
+ *
+ * Count doesn't care about actually examining its results; it just wants to walk through them.
+ * As such, with certain covered queries, we can skip the overhead of fetching etc. when
+ * executing a count.
+ */
+Status getExecutorCount(OperationContext* txn,
+ Collection* collection,
+ const CountRequest& request,
+ bool explain,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** execOut);
+
+/**
+ * Get a PlanExecutor for a delete operation. 'parsedDelete' describes the query predicate
+ * and delete flags like 'isMulti'. The caller must hold the appropriate MODE_X or MODE_IX
+ * locks, and must not release these locks until after the returned PlanExecutor is deleted.
+ *
+ * The returned PlanExecutor will yield if and only if parsedDelete->canYield().
+ *
+ * Does not take ownership of its arguments.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK() and populates
+ * *execOut with the PlanExecutor. The caller takes ownership of *execOut.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutorDelete(OperationContext* txn,
+ Collection* collection,
+ ParsedDelete* parsedDelete,
+ PlanExecutor** execOut);
+
+/**
+ * Get a PlanExecutor for an update operation. 'parsedUpdate' describes the query predicate
+ * and update modifiers. The caller must hold the appropriate MODE_X or MODE_IX locks prior
+ * to calling this function, and must not release these locks until after the returned
+ * PlanExecutor is deleted.
+ *
+ * The returned PlanExecutor will yield if and only if parsedUpdate->canYield().
+ *
+ * Does not take ownership of its arguments.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK() and populates
+ * *out with the PlanExecutor. The caller takes ownership of *execOut.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutorUpdate(OperationContext* txn,
+ Collection* collection,
+ ParsedUpdate* parsedUpdate,
+ OpDebug* opDebug,
+ PlanExecutor** execOut);
+
+/**
+ * Get a PlanExecutor for a group operation. 'rawCanonicalQuery' describes the predicate for
+ * the documents to be grouped.
+ *
+ * Takes ownership of 'rawCanonicalQuery'. Does not take ownership of other args.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK() and populates
+ * *out with the PlanExecutor.
+ *
+ * If an executor could not be created, returns a Status indicating why.
+ */
+Status getExecutorGroup(OperationContext* txn,
+ Collection* collection,
+ const GroupRequest& request,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** execOut);
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp
index 7ec251bb6ee..fdc04609df8 100644
--- a/src/mongo/db/query/get_executor_test.cpp
+++ b/src/mongo/db/query/get_executor_test.cpp
@@ -41,105 +41,102 @@ using namespace mongo;
namespace {
- using std::unique_ptr;
-
- static const char* ns = "somebogusns";
-
- /**
- * Utility functions to create a CanonicalQuery
- */
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
+using std::unique_ptr;
- //
- // get_executor tests
- //
-
- //
- // filterAllowedIndexEntries
- //
-
- /**
- * Test function to check filterAllowedIndexEntries
- */
- void testAllowedIndices(const char* hintKeyPatterns[],
- const char* indexCatalogKeyPatterns[],
- const char* expectedFilteredKeyPatterns[]) {
- PlanCache planCache;
- QuerySettings querySettings;
- AllowedIndices *allowedIndicesRaw;
-
- // getAllowedIndices should return false when query shape is not yet in query settings.
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}"));
- PlanCacheKey key = planCache.computeKey(*cq);
- ASSERT_FALSE(querySettings.getAllowedIndices(key, &allowedIndicesRaw));
-
- // Add entry to query settings.
- std::vector<BSONObj> indexKeyPatterns;
- for (int i=0; hintKeyPatterns[i] != NULL; ++i) {
- indexKeyPatterns.push_back(fromjson(hintKeyPatterns[i]));
- }
- querySettings.setAllowedIndices(*cq, key, indexKeyPatterns);
-
- // Index entry vector should contain 1 entry after filtering.
- ASSERT_TRUE(querySettings.getAllowedIndices(key, &allowedIndicesRaw));
- ASSERT_FALSE(key.empty());
- ASSERT(NULL != allowedIndicesRaw);
- unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
-
- // Indexes from index catalog.
- std::vector<IndexEntry> indexEntries;
- for (int i=0; indexCatalogKeyPatterns[i] != NULL; ++i) {
- indexEntries.push_back(IndexEntry(fromjson(indexCatalogKeyPatterns[i])));
- }
-
- // Apply filter in allowed indices.
- filterAllowedIndexEntries(*allowedIndices, &indexEntries);
- size_t numExpected = 0;
- while (expectedFilteredKeyPatterns[numExpected] != NULL) {
- ASSERT_LESS_THAN(numExpected, indexEntries.size());
- ASSERT_EQUALS(indexEntries[numExpected].keyPattern,
- fromjson(expectedFilteredKeyPatterns[numExpected]));
- numExpected++;
- }
- ASSERT_EQUALS(indexEntries.size(), numExpected);
- }
-
- // Use of index filters to select compound index over single key index.
- TEST(GetExecutorTest, GetAllowedIndices) {
- const char* hintKeyPatterns[] = {"{a: 1, b: 1}", NULL};
- const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: 1, b: 1}", "{a: 1, c: 1}", NULL};
- const char* expectedFilteredKeyPatterns[] = {"{a: 1, b: 1}", NULL};
- testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
- }
+static const char* ns = "somebogusns";
+
+/**
+ * Utility functions to create a CanonicalQuery
+ */
+CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr, const char* projStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+//
+// get_executor tests
+//
+
+//
+// filterAllowedIndexEntries
+//
- // Setting index filter referring to non-existent indexes
- // will effectively disregard the index catalog and
- // result in the planner generating a collection scan.
- TEST(GetExecutorTest, GetAllowedIndicesNonExistentIndexKeyPatterns) {
- const char* hintKeyPatterns[] = {"{nosuchfield: 1}", NULL};
- const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: 1, b: 1}", "{a: 1, c: 1}", NULL};
- const char* expectedFilteredKeyPatterns[] = {NULL};
- testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+/**
+ * Test function to check filterAllowedIndexEntries
+ */
+void testAllowedIndices(const char* hintKeyPatterns[],
+ const char* indexCatalogKeyPatterns[],
+ const char* expectedFilteredKeyPatterns[]) {
+ PlanCache planCache;
+ QuerySettings querySettings;
+ AllowedIndices* allowedIndicesRaw;
+
+ // getAllowedIndices should return false when query shape is not yet in query settings.
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}"));
+ PlanCacheKey key = planCache.computeKey(*cq);
+ ASSERT_FALSE(querySettings.getAllowedIndices(key, &allowedIndicesRaw));
+
+ // Add entry to query settings.
+ std::vector<BSONObj> indexKeyPatterns;
+ for (int i = 0; hintKeyPatterns[i] != NULL; ++i) {
+ indexKeyPatterns.push_back(fromjson(hintKeyPatterns[i]));
+ }
+ querySettings.setAllowedIndices(*cq, key, indexKeyPatterns);
+
+ // Index entry vector should contain 1 entry after filtering.
+ ASSERT_TRUE(querySettings.getAllowedIndices(key, &allowedIndicesRaw));
+ ASSERT_FALSE(key.empty());
+ ASSERT(NULL != allowedIndicesRaw);
+ unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
+
+ // Indexes from index catalog.
+ std::vector<IndexEntry> indexEntries;
+ for (int i = 0; indexCatalogKeyPatterns[i] != NULL; ++i) {
+ indexEntries.push_back(IndexEntry(fromjson(indexCatalogKeyPatterns[i])));
}
- // This test case shows how to force query execution to use
- // an index that orders items in descending order.
- TEST(GetExecutorTest, GetAllowedIndicesDescendingOrder) {
- const char* hintKeyPatterns[] = {"{a: -1}", NULL};
- const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: -1}", NULL};
- const char* expectedFilteredKeyPatterns[] = {"{a: -1}", NULL};
- testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+ // Apply filter in allowed indices.
+ filterAllowedIndexEntries(*allowedIndices, &indexEntries);
+ size_t numExpected = 0;
+ while (expectedFilteredKeyPatterns[numExpected] != NULL) {
+ ASSERT_LESS_THAN(numExpected, indexEntries.size());
+ ASSERT_EQUALS(indexEntries[numExpected].keyPattern,
+ fromjson(expectedFilteredKeyPatterns[numExpected]));
+ numExpected++;
}
+ ASSERT_EQUALS(indexEntries.size(), numExpected);
+}
+
+// Use of index filters to select compound index over single key index.
+TEST(GetExecutorTest, GetAllowedIndices) {
+ const char* hintKeyPatterns[] = {"{a: 1, b: 1}", NULL};
+ const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: 1, b: 1}", "{a: 1, c: 1}", NULL};
+ const char* expectedFilteredKeyPatterns[] = {"{a: 1, b: 1}", NULL};
+ testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+}
+
+// Setting index filter referring to non-existent indexes
+// will effectively disregard the index catalog and
+// result in the planner generating a collection scan.
+TEST(GetExecutorTest, GetAllowedIndicesNonExistentIndexKeyPatterns) {
+ const char* hintKeyPatterns[] = {"{nosuchfield: 1}", NULL};
+ const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: 1, b: 1}", "{a: 1, c: 1}", NULL};
+ const char* expectedFilteredKeyPatterns[] = {NULL};
+ testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+}
+
+// This test case shows how to force query execution to use
+// an index that orders items in descending order.
+TEST(GetExecutorTest, GetAllowedIndicesDescendingOrder) {
+ const char* hintKeyPatterns[] = {"{a: -1}", NULL};
+ const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: -1}", NULL};
+ const char* expectedFilteredKeyPatterns[] = {"{a: -1}", NULL};
+ testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+}
} // namespace
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index 678276f8467..bb3054af79e 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -40,113 +40,104 @@
namespace mongo {
- GetMoreRequest::GetMoreRequest()
- : cursorid(0),
- batchSize(0) { }
-
- GetMoreRequest::GetMoreRequest(const std::string& fullns,
- CursorId id,
- boost::optional<int> sizeOfBatch)
- : nss(fullns),
- cursorid(id),
- batchSize(sizeOfBatch) { }
-
- Status GetMoreRequest::isValid() const {
- if (!nss.isValid()) {
- return Status(ErrorCodes::BadValue, str::stream()
- << "Invalid namespace for getMore: " << nss.ns());
- }
+GetMoreRequest::GetMoreRequest() : cursorid(0), batchSize(0) {}
- if (cursorid == 0) {
- return Status(ErrorCodes::BadValue, "Cursor id for getMore must be non-zero");
- }
+GetMoreRequest::GetMoreRequest(const std::string& fullns,
+ CursorId id,
+ boost::optional<int> sizeOfBatch)
+ : nss(fullns), cursorid(id), batchSize(sizeOfBatch) {}
- if (batchSize && *batchSize <= 0) {
- return Status(ErrorCodes::BadValue, str::stream()
- << "Batch size for getMore must be positive, "
- << "but received: " << *batchSize);
- }
-
- return Status::OK();
+Status GetMoreRequest::isValid() const {
+ if (!nss.isValid()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Invalid namespace for getMore: " << nss.ns());
}
- // static
- std::string GetMoreRequest::parseNs(const std::string& dbname, const BSONObj& cmdObj) {
- BSONElement collElt = cmdObj["collection"];
- const std::string coll = (collElt.type() == BSONType::String) ? collElt.String()
- : "";
+ if (cursorid == 0) {
+ return Status(ErrorCodes::BadValue, "Cursor id for getMore must be non-zero");
+ }
- return str::stream() << dbname << "." << coll;
+ if (batchSize && *batchSize <= 0) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Batch size for getMore must be positive, "
+ << "but received: " << *batchSize);
}
- // static
- StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbname,
- const BSONObj& cmdObj) {
- invariant(!dbname.empty());
+ return Status::OK();
+}
- // Required fields.
- boost::optional<CursorId> cursorid;
- boost::optional<std::string> fullns;
+// static
+std::string GetMoreRequest::parseNs(const std::string& dbname, const BSONObj& cmdObj) {
+ BSONElement collElt = cmdObj["collection"];
+ const std::string coll = (collElt.type() == BSONType::String) ? collElt.String() : "";
- // Optional field.
- boost::optional<int> batchSize;
+ return str::stream() << dbname << "." << coll;
+}
- for (BSONElement el : cmdObj) {
- const char* fieldName = el.fieldName();
- if (str::equals(fieldName, "getMore")) {
- if (el.type() != BSONType::NumberLong) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "Field 'getMore' must be of type long in: " << cmdObj};
- }
+// static
+StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbname,
+ const BSONObj& cmdObj) {
+ invariant(!dbname.empty());
- cursorid = el.Long();
- }
- else if (str::equals(fieldName, "collection")) {
- if (el.type() != BSONType::String) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "Field 'collection' must be of type string in: "
- << cmdObj};
- }
-
- fullns = parseNs(dbname, cmdObj);
- }
- else if (str::equals(fieldName, "batchSize")) {
- if (!el.isNumber()) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "Field 'batchSize' must be a number in: " << cmdObj};
- }
+ // Required fields.
+ boost::optional<CursorId> cursorid;
+ boost::optional<std::string> fullns;
- batchSize = el.numberInt();
+ // Optional field.
+ boost::optional<int> batchSize;
+
+ for (BSONElement el : cmdObj) {
+ const char* fieldName = el.fieldName();
+ if (str::equals(fieldName, "getMore")) {
+ if (el.type() != BSONType::NumberLong) {
+ return {ErrorCodes::TypeMismatch,
+ str::stream() << "Field 'getMore' must be of type long in: " << cmdObj};
}
- else if (str::equals(fieldName, "maxTimeMS")) {
- // maxTimeMS is parsed by the command handling code, so we don't repeat the parsing
- // here.
- continue;
+
+ cursorid = el.Long();
+ } else if (str::equals(fieldName, "collection")) {
+ if (el.type() != BSONType::String) {
+ return {ErrorCodes::TypeMismatch,
+ str::stream()
+ << "Field 'collection' must be of type string in: " << cmdObj};
}
- else if (!str::startsWith(fieldName, "$")) {
- return {ErrorCodes::FailedToParse,
- str::stream() << "Failed to parse: " << cmdObj << ". "
- << "Unrecognized field '" << fieldName << "'."};
+
+ fullns = parseNs(dbname, cmdObj);
+ } else if (str::equals(fieldName, "batchSize")) {
+ if (!el.isNumber()) {
+ return {ErrorCodes::TypeMismatch,
+ str::stream() << "Field 'batchSize' must be a number in: " << cmdObj};
}
- }
- if (!cursorid) {
+ batchSize = el.numberInt();
+ } else if (str::equals(fieldName, "maxTimeMS")) {
+ // maxTimeMS is parsed by the command handling code, so we don't repeat the parsing
+ // here.
+ continue;
+ } else if (!str::startsWith(fieldName, "$")) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field 'getMore' missing in: " << cmdObj};
+ str::stream() << "Failed to parse: " << cmdObj << ". "
+ << "Unrecognized field '" << fieldName << "'."};
}
+ }
- if (!fullns) {
- return {ErrorCodes::FailedToParse,
- str::stream() << "Field 'collection' missing in: " << cmdObj};
- }
+ if (!cursorid) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "Field 'getMore' missing in: " << cmdObj};
+ }
- GetMoreRequest request(*fullns, *cursorid, batchSize);
- Status validStatus = request.isValid();
- if (!validStatus.isOK()) {
- return validStatus;
- }
+ if (!fullns) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "Field 'collection' missing in: " << cmdObj};
+ }
- return request;
+ GetMoreRequest request(*fullns, *cursorid, batchSize);
+ Status validStatus = request.isValid();
+ if (!validStatus.isOK()) {
+ return validStatus;
}
-} // namespace mongo
+ return request;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/getmore_request.h b/src/mongo/db/query/getmore_request.h
index 2d30f96ed75..c967408d008 100644
--- a/src/mongo/db/query/getmore_request.h
+++ b/src/mongo/db/query/getmore_request.h
@@ -38,38 +38,38 @@
namespace mongo {
- struct GetMoreRequest {
- /**
- * Construct an empty request.
- */
- GetMoreRequest();
+struct GetMoreRequest {
+ /**
+ * Construct an empty request.
+ */
+ GetMoreRequest();
- /**
- * Construct a GetMoreRequest from the command specification and db name.
- */
- static StatusWith<GetMoreRequest> parseFromBSON(const std::string& dbname,
- const BSONObj& cmdObj);
+ /**
+ * Construct a GetMoreRequest from the command specification and db name.
+ */
+ static StatusWith<GetMoreRequest> parseFromBSON(const std::string& dbname,
+ const BSONObj& cmdObj);
- static std::string parseNs(const std::string& dbname, const BSONObj& cmdObj);
+ static std::string parseNs(const std::string& dbname, const BSONObj& cmdObj);
- const NamespaceString nss;
- const CursorId cursorid;
+ const NamespaceString nss;
+ const CursorId cursorid;
- // The batch size is optional. If not provided, we will put as many documents into the batch
- // as fit within the byte limit.
- const boost::optional<int> batchSize;
+ // The batch size is optional. If not provided, we will put as many documents into the batch
+ // as fit within the byte limit.
+ const boost::optional<int> batchSize;
- private:
- /**
- * Construct from parsed BSON
- */
- GetMoreRequest(const std::string& fullns, CursorId id, boost::optional<int> batch);
+private:
+ /**
+ * Construct from parsed BSON
+ */
+ GetMoreRequest(const std::string& fullns, CursorId id, boost::optional<int> batch);
- /**
- * Returns a non-OK status if there are semantic errors in the parsed request
- * (e.g. a negative batchSize).
- */
- Status isValid() const;
- };
+ /**
+ * Returns a non-OK status if there are semantic errors in the parsed request
+ * (e.g. a negative batchSize).
+ */
+ Status isValid() const;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/getmore_request_test.cpp b/src/mongo/db/query/getmore_request_test.cpp
index bd2f8c8b242..abb50b693f6 100644
--- a/src/mongo/db/query/getmore_request_test.cpp
+++ b/src/mongo/db/query/getmore_request_test.cpp
@@ -37,138 +37,152 @@
namespace {
- using namespace mongo;
-
- TEST(GetMoreRequestTest, parseFromBSONEmptyCommandObject) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db", BSONObj());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONCursorIdNotNumeric) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << "not a number" <<
- "collection" << "coll"));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONCursorIdNotLongLong) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << "not a number" <<
- "collection" << 123));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONMissingCollection) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123)));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONCollectionNotString) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << 456));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONBatchSizeNotInteger) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) <<
- "collection" << "coll" <<
- "batchSize" << "not a number"));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONInvalidCursorId) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(0) << "collection" << "coll"));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONNegativeCursorId) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(-123) << "collection" << "coll"));
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(-123), result.getValue().cursorid);
- ASSERT_FALSE(result.getValue().batchSize);
- }
-
- TEST(GetMoreRequestTest, parseFromBSONUnrecognizedFieldName) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) <<
- "collection" << "coll" <<
- "unknown_field" << 1));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSize) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "batchSize" << -1));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSizeOfZero) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "batchSize" << 0));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONNoBatchSize) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll"));
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
- ASSERT_FALSE(result.getValue().batchSize);
- }
-
- TEST(GetMoreRequestTest, parseFromBSONBatchSizeProvided) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "batchSize" << 200));
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
- ASSERT(result.getValue().batchSize);
- ASSERT_EQUALS(200, *result.getValue().batchSize);
- }
-
- TEST(GetMoreRequestTest, parseFromBSONIgnoreDollarPrefixedFields) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "$foo" << "bar"));
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
- }
-
- TEST(GetMoreRequestTest, parseFromBSONIgnoreMaxTimeMS) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "maxTimeMS" << 100));
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
- }
-
-} // namespace
+using namespace mongo;
+
+TEST(GetMoreRequestTest, parseFromBSONEmptyCommandObject) {
+ StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db", BSONObj());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONCursorIdNotNumeric) {
+ StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db",
+ BSON("getMore"
+ << "not a number"
+ << "collection"
+ << "coll"));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONCursorIdNotLongLong) {
+ StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db",
+ BSON("getMore"
+ << "not a number"
+ << "collection" << 123));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONMissingCollection) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db", BSON("getMore" << CursorId(123)));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONCollectionNotString) {
+ StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
+ "db", BSON("getMore" << CursorId(123) << "collection" << 456));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONBatchSizeNotInteger) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "batchSize"
+ << "not a number"));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONInvalidCursorId) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(0) << "collection"
+ << "coll"));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONNegativeCursorId) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(-123) << "collection"
+ << "coll"));
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(-123), result.getValue().cursorid);
+ ASSERT_FALSE(result.getValue().batchSize);
+}
+
+TEST(GetMoreRequestTest, parseFromBSONUnrecognizedFieldName) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "unknown_field" << 1));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSize) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "batchSize" << -1));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSizeOfZero) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "batchSize" << 0));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONNoBatchSize) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"));
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
+ ASSERT_FALSE(result.getValue().batchSize);
+}
+
+TEST(GetMoreRequestTest, parseFromBSONBatchSizeProvided) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "batchSize" << 200));
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
+ ASSERT(result.getValue().batchSize);
+ ASSERT_EQUALS(200, *result.getValue().batchSize);
+}
+
+TEST(GetMoreRequestTest, parseFromBSONIgnoreDollarPrefixedFields) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "$foo"
+ << "bar"));
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
+}
+
+TEST(GetMoreRequestTest, parseFromBSONIgnoreMaxTimeMS) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "maxTimeMS" << 100));
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
+}
+
+} // namespace
diff --git a/src/mongo/db/query/index_bounds.cpp b/src/mongo/db/query/index_bounds.cpp
index f7592994e29..dafd6e7fce8 100644
--- a/src/mongo/db/query/index_bounds.cpp
+++ b/src/mongo/db/query/index_bounds.cpp
@@ -33,520 +33,529 @@
namespace mongo {
- using std::string;
- using std::vector;
+using std::string;
+using std::vector;
- namespace {
+namespace {
- // Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
- int sgn(int i) {
- if (i == 0)
- return 0;
- return i > 0 ? 1 : -1;
- }
-
- /**
- * Returns BEHIND if the key is behind the interval.
- * Returns WITHIN if the key is within the interval.
- * Returns AHEAD if the key is ahead the interval.
- *
- * All directions are oriented along 'direction'.
- */
- IndexBoundsChecker::Location intervalCmp(const Interval& interval, const BSONElement& key,
- const int expectedDirection) {
- int cmp = sgn(key.woCompare(interval.start, false));
- bool startOK = (cmp == expectedDirection) || (cmp == 0 && interval.startInclusive);
- if (!startOK) { return IndexBoundsChecker::BEHIND; }
-
- cmp = sgn(key.woCompare(interval.end, false));
- bool endOK = (cmp == -expectedDirection) || (cmp == 0 && interval.endInclusive);
- if (!endOK) { return IndexBoundsChecker::AHEAD; }
-
- return IndexBoundsChecker::WITHIN;
- }
+// Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
+int sgn(int i) {
+ if (i == 0)
+ return 0;
+ return i > 0 ? 1 : -1;
+}
- } // namespace
-
- // For debugging.
- size_t IndexBounds::size() const {
- return fields.size();
+/**
+ * Returns BEHIND if the key is behind the interval.
+ * Returns WITHIN if the key is within the interval.
+ * Returns AHEAD if the key is ahead the interval.
+ *
+ * All directions are oriented along 'direction'.
+ */
+IndexBoundsChecker::Location intervalCmp(const Interval& interval,
+ const BSONElement& key,
+ const int expectedDirection) {
+ int cmp = sgn(key.woCompare(interval.start, false));
+ bool startOK = (cmp == expectedDirection) || (cmp == 0 && interval.startInclusive);
+ if (!startOK) {
+ return IndexBoundsChecker::BEHIND;
}
- string IndexBounds::getFieldName(size_t i) const {
- return i < size() ? fields[i].name : "";
+ cmp = sgn(key.woCompare(interval.end, false));
+ bool endOK = (cmp == -expectedDirection) || (cmp == 0 && interval.endInclusive);
+ if (!endOK) {
+ return IndexBoundsChecker::AHEAD;
}
- size_t IndexBounds::getNumIntervals(size_t i) const {
- return i < size() ? fields[i].intervals.size() : 0;
- }
+ return IndexBoundsChecker::WITHIN;
+}
- Interval IndexBounds::getInterval(size_t i, size_t j) const {
- if (i < size() && j < fields[i].intervals.size()) {
- return fields[i].intervals[j];
- }
- else {
- return Interval();
- }
- }
+} // namespace
- string OrderedIntervalList::toString() const {
- mongoutils::str::stream ss;
- ss << "['" << name << "']: ";
- for (size_t j = 0; j < intervals.size(); ++j) {
- ss << intervals[j].toString();
- if (j < intervals.size() - 1) {
- ss << ", ";
- }
- }
- return ss;
- }
+// For debugging.
+size_t IndexBounds::size() const {
+ return fields.size();
+}
- // static
- void OrderedIntervalList::complement() {
- BSONObjBuilder minBob;
- minBob.appendMinKey("");
- BSONObj minObj = minBob.obj();
-
- // We complement by scanning the entire range of BSON values
- // from MinKey to MaxKey. The value from which we must begin
- // the next complemented interval is kept in 'curBoundary'.
- BSONElement curBoundary = minObj.firstElement();
-
- // If 'curInclusive' is true, then 'curBoundary' is
- // included in one of the original intervals, and hence
- // should not be included in the complement (and vice-versa
- // if 'curInclusive' is false).
- bool curInclusive = false;
-
- // We will build up a list of intervals that represents
- // the inversion of those in the OIL.
- vector<Interval> newIntervals;
- for (size_t j = 0; j < intervals.size(); ++j) {
- Interval curInt = intervals[j];
- if (0 != curInt.start.woCompare(curBoundary) ||
- (!curInclusive && !curInt.startInclusive)) {
- // Make a new interval from 'curBoundary' to
- // the start of 'curInterval'.
- BSONObjBuilder intBob;
- intBob.append(curBoundary);
- intBob.append(curInt.start);
- Interval newInt(intBob.obj(), !curInclusive, !curInt.startInclusive);
- newIntervals.push_back(newInt);
- }
+string IndexBounds::getFieldName(size_t i) const {
+ return i < size() ? fields[i].name : "";
+}
- // Reset the boundary for the next iteration.
- curBoundary = curInt.end;
- curInclusive = curInt.endInclusive;
- }
+size_t IndexBounds::getNumIntervals(size_t i) const {
+ return i < size() ? fields[i].intervals.size() : 0;
+}
- // We may have to add a final interval which ends in MaxKey.
- BSONObjBuilder maxBob;
- maxBob.appendMaxKey("");
- BSONObj maxObj = maxBob.obj();
- BSONElement maxKey = maxObj.firstElement();
- if (0 != maxKey.woCompare(curBoundary) || !curInclusive) {
+Interval IndexBounds::getInterval(size_t i, size_t j) const {
+ if (i < size() && j < fields[i].intervals.size()) {
+ return fields[i].intervals[j];
+ } else {
+ return Interval();
+ }
+}
+
+string OrderedIntervalList::toString() const {
+ mongoutils::str::stream ss;
+ ss << "['" << name << "']: ";
+ for (size_t j = 0; j < intervals.size(); ++j) {
+ ss << intervals[j].toString();
+ if (j < intervals.size() - 1) {
+ ss << ", ";
+ }
+ }
+ return ss;
+}
+
+// static
+void OrderedIntervalList::complement() {
+ BSONObjBuilder minBob;
+ minBob.appendMinKey("");
+ BSONObj minObj = minBob.obj();
+
+ // We complement by scanning the entire range of BSON values
+ // from MinKey to MaxKey. The value from which we must begin
+ // the next complemented interval is kept in 'curBoundary'.
+ BSONElement curBoundary = minObj.firstElement();
+
+ // If 'curInclusive' is true, then 'curBoundary' is
+ // included in one of the original intervals, and hence
+ // should not be included in the complement (and vice-versa
+ // if 'curInclusive' is false).
+ bool curInclusive = false;
+
+ // We will build up a list of intervals that represents
+ // the inversion of those in the OIL.
+ vector<Interval> newIntervals;
+ for (size_t j = 0; j < intervals.size(); ++j) {
+ Interval curInt = intervals[j];
+ if (0 != curInt.start.woCompare(curBoundary) || (!curInclusive && !curInt.startInclusive)) {
+ // Make a new interval from 'curBoundary' to
+ // the start of 'curInterval'.
BSONObjBuilder intBob;
intBob.append(curBoundary);
- intBob.append(maxKey);
- Interval newInt(intBob.obj(), !curInclusive, true);
+ intBob.append(curInt.start);
+ Interval newInt(intBob.obj(), !curInclusive, !curInt.startInclusive);
newIntervals.push_back(newInt);
}
- // Replace the old list of intervals with the new one.
- intervals.clear();
- intervals.insert(intervals.end(), newIntervals.begin(), newIntervals.end());
+ // Reset the boundary for the next iteration.
+ curBoundary = curInt.end;
+ curInclusive = curInt.endInclusive;
}
- string IndexBounds::toString() const {
- mongoutils::str::stream ss;
- if (isSimpleRange) {
- ss << "[" << startKey.toString() << ", ";
- if (endKey.isEmpty()) {
+ // We may have to add a final interval which ends in MaxKey.
+ BSONObjBuilder maxBob;
+ maxBob.appendMaxKey("");
+ BSONObj maxObj = maxBob.obj();
+ BSONElement maxKey = maxObj.firstElement();
+ if (0 != maxKey.woCompare(curBoundary) || !curInclusive) {
+ BSONObjBuilder intBob;
+ intBob.append(curBoundary);
+ intBob.append(maxKey);
+ Interval newInt(intBob.obj(), !curInclusive, true);
+ newIntervals.push_back(newInt);
+ }
+
+ // Replace the old list of intervals with the new one.
+ intervals.clear();
+ intervals.insert(intervals.end(), newIntervals.begin(), newIntervals.end());
+}
+
+string IndexBounds::toString() const {
+ mongoutils::str::stream ss;
+ if (isSimpleRange) {
+ ss << "[" << startKey.toString() << ", ";
+ if (endKey.isEmpty()) {
+ ss << "]";
+ } else {
+ ss << endKey.toString();
+ if (endKeyInclusive) {
ss << "]";
+ } else {
+ ss << ")";
}
- else {
- ss << endKey.toString();
- if (endKeyInclusive) {
- ss << "]";
- }
- else {
- ss << ")";
- }
- }
- return ss;
}
- for (size_t i = 0; i < fields.size(); ++i) {
- if (i > 0) {
- ss << ", ";
- }
- ss << "field #" << i << fields[i].toString();
- }
-
return ss;
}
+ for (size_t i = 0; i < fields.size(); ++i) {
+ if (i > 0) {
+ ss << ", ";
+ }
+ ss << "field #" << i << fields[i].toString();
+ }
- BSONObj IndexBounds::toBSON() const {
- BSONObjBuilder bob;
- vector<OrderedIntervalList>::const_iterator itField;
- for (itField = fields.begin(); itField != fields.end(); ++itField) {
- BSONArrayBuilder fieldBuilder(bob.subarrayStart(itField->name));
-
- vector<Interval>::const_iterator itInterval;
- for (itInterval = itField->intervals.begin()
- ; itInterval != itField->intervals.end()
- ; ++itInterval) {
- std::string intervalStr = itInterval->toString();
-
- // Insulate against hitting BSON size limit.
- if ((bob.len() + (int)intervalStr.size()) > BSONObjMaxUserSize) {
- fieldBuilder.append("warning: bounds truncated due to BSON size limit");
- fieldBuilder.doneFast();
- return bob.obj();
- }
-
- fieldBuilder.append(intervalStr);
+ return ss;
+}
+
+BSONObj IndexBounds::toBSON() const {
+ BSONObjBuilder bob;
+ vector<OrderedIntervalList>::const_iterator itField;
+ for (itField = fields.begin(); itField != fields.end(); ++itField) {
+ BSONArrayBuilder fieldBuilder(bob.subarrayStart(itField->name));
+
+ vector<Interval>::const_iterator itInterval;
+ for (itInterval = itField->intervals.begin(); itInterval != itField->intervals.end();
+ ++itInterval) {
+ std::string intervalStr = itInterval->toString();
+
+ // Insulate against hitting BSON size limit.
+ if ((bob.len() + (int)intervalStr.size()) > BSONObjMaxUserSize) {
+ fieldBuilder.append("warning: bounds truncated due to BSON size limit");
+ fieldBuilder.doneFast();
+ return bob.obj();
}
- fieldBuilder.doneFast();
+ fieldBuilder.append(intervalStr);
}
- return bob.obj();
+ fieldBuilder.doneFast();
}
- //
- // Validity checking for bounds
- //
+ return bob.obj();
+}
- bool OrderedIntervalList::isValidFor(int expectedOrientation) const {
- // Make sure each interval's start is oriented correctly with respect to its end.
- for (size_t j = 0; j < intervals.size(); ++j) {
- // false means don't consider field name.
- int cmp = sgn(intervals[j].end.woCompare(intervals[j].start, false));
+//
+// Validity checking for bounds
+//
- if (cmp == 0 && intervals[j].startInclusive
- && intervals[j].endInclusive) { continue; }
+bool OrderedIntervalList::isValidFor(int expectedOrientation) const {
+ // Make sure each interval's start is oriented correctly with respect to its end.
+ for (size_t j = 0; j < intervals.size(); ++j) {
+ // false means don't consider field name.
+ int cmp = sgn(intervals[j].end.woCompare(intervals[j].start, false));
- if (cmp != expectedOrientation) {
- return false;
- }
+ if (cmp == 0 && intervals[j].startInclusive && intervals[j].endInclusive) {
+ continue;
}
- // Make sure each interval is oriented correctly with respect to its neighbors.
- for (size_t j = 1; j < intervals.size(); ++j) {
- int cmp = sgn(intervals[j].start.woCompare(intervals[j - 1].end, false));
-
- // TODO: We could care if the end of one interval is the start of another. The bounds
- // are still valid but they're a bit sloppy; they could have been combined to form one
- // interval if either of them is inclusive.
- if (0 == cmp) { continue; }
-
- if (cmp != expectedOrientation) {
- return false;
- }
+ if (cmp != expectedOrientation) {
+ return false;
}
- return true;
}
- bool IndexBounds::isValidFor(const BSONObj& keyPattern, int direction) {
- if (isSimpleRange) {
- return direction == sgn(endKey.woCompare(startKey, keyPattern, false));
+ // Make sure each interval is oriented correctly with respect to its neighbors.
+ for (size_t j = 1; j < intervals.size(); ++j) {
+ int cmp = sgn(intervals[j].start.woCompare(intervals[j - 1].end, false));
+
+ // TODO: We could care if the end of one interval is the start of another. The bounds
+ // are still valid but they're a bit sloppy; they could have been combined to form one
+ // interval if either of them is inclusive.
+ if (0 == cmp) {
+ continue;
}
- BSONObjIterator it(keyPattern);
+ if (cmp != expectedOrientation) {
+ return false;
+ }
+ }
+ return true;
+}
- for (size_t i = 0; i < fields.size(); ++i) {
- // We expect a bound for each field in the index.
- if (!it.more()) { return false; }
- BSONElement elt = it.next();
+bool IndexBounds::isValidFor(const BSONObj& keyPattern, int direction) {
+ if (isSimpleRange) {
+ return direction == sgn(endKey.woCompare(startKey, keyPattern, false));
+ }
- const OrderedIntervalList& field = fields[i];
+ BSONObjIterator it(keyPattern);
- // Make sure the names match up.
- if (field.name != elt.fieldName()) { return false; }
+ for (size_t i = 0; i < fields.size(); ++i) {
+ // We expect a bound for each field in the index.
+ if (!it.more()) {
+ return false;
+ }
+ BSONElement elt = it.next();
- // Special indices are all inserted increasing. elt.number() will return 0 if it's
- // not a number. Special indices are strings, not numbers.
- int expectedOrientation = direction * ((elt.number() >= 0) ? 1 : -1);
+ const OrderedIntervalList& field = fields[i];
- if (!field.isValidFor(expectedOrientation)) {
- return false;
- }
+ // Make sure the names match up.
+ if (field.name != elt.fieldName()) {
+ return false;
}
- return !it.more();
+ // Special indices are all inserted increasing. elt.number() will return 0 if it's
+ // not a number. Special indices are strings, not numbers.
+ int expectedOrientation = direction * ((elt.number() >= 0) ? 1 : -1);
+
+ if (!field.isValidFor(expectedOrientation)) {
+ return false;
+ }
}
- //
- // Iteration over index bounds
- //
+ return !it.more();
+}
+
+//
+// Iteration over index bounds
+//
+
+IndexBoundsChecker::IndexBoundsChecker(const IndexBounds* bounds,
+ const BSONObj& keyPattern,
+ int scanDirection)
+ : _bounds(bounds), _curInterval(bounds->fields.size(), 0) {
+ BSONObjIterator it(keyPattern);
+ while (it.more()) {
+ int indexDirection = it.next().number() >= 0 ? 1 : -1;
+ _expectedDirection.push_back(indexDirection * scanDirection);
+ }
+}
- IndexBoundsChecker::IndexBoundsChecker(const IndexBounds* bounds, const BSONObj& keyPattern,
- int scanDirection)
- : _bounds(bounds), _curInterval(bounds->fields.size(), 0) {
+bool IndexBoundsChecker::getStartSeekPoint(IndexSeekPoint* out) {
+ out->prefixLen = 0;
+ out->prefixExclusive = false;
+ out->keySuffix.resize(_bounds->fields.size());
+ out->suffixInclusive.resize(_bounds->fields.size());
- BSONObjIterator it(keyPattern);
- while (it.more()) {
- int indexDirection = it.next().number() >= 0 ? 1 : -1;
- _expectedDirection.push_back(indexDirection * scanDirection);
+ for (size_t i = 0; i < _bounds->fields.size(); ++i) {
+ if (0 == _bounds->fields[i].intervals.size()) {
+ return false;
}
+ out->keySuffix[i] = &_bounds->fields[i].intervals[0].start;
+ out->suffixInclusive[i] = _bounds->fields[i].intervals[0].startInclusive;
}
- bool IndexBoundsChecker::getStartSeekPoint(IndexSeekPoint* out) {
- out->prefixLen = 0;
- out->prefixExclusive = false;
- out->keySuffix.resize(_bounds->fields.size());
- out->suffixInclusive.resize(_bounds->fields.size());
+ return true;
+}
+
+bool IndexBoundsChecker::findLeftmostProblem(const vector<BSONElement>& keyValues,
+ size_t* where,
+ Location* what) {
+ // For each field in the index key, see if it's in the interval it should be.
+ for (size_t i = 0; i < _curInterval.size(); ++i) {
+ const OrderedIntervalList& field = _bounds->fields[i];
+ const Interval& currentInterval = field.intervals[_curInterval[i]];
+ Location cmp = intervalCmp(currentInterval, keyValues[i], _expectedDirection[i]);
+
+ // If it's not in the interval we think it is...
+ if (0 != cmp) {
+ *where = i;
+ *what = cmp;
+ return true;
+ }
+ }
- for (size_t i = 0; i < _bounds->fields.size(); ++i) {
- if (0 == _bounds->fields[i].intervals.size()) {
- return false;
- }
- out->keySuffix[i] = &_bounds->fields[i].intervals[0].start;
- out->suffixInclusive[i] = _bounds->fields[i].intervals[0].startInclusive;
+ return false;
+}
+
+bool IndexBoundsChecker::spaceLeftToAdvance(size_t fieldsToCheck,
+ const vector<BSONElement>& keyValues) {
+ // Check end conditions. Since we need to move the keys before
+ // firstNonContainedField forward, let's make sure that those fields are not at the
+ // end of their bounds.
+ for (size_t i = 0; i < fieldsToCheck; ++i) {
+ // Field 'i' isn't at its last interval. There's possibly a key we could move forward
+ // to, either in the current interval or the next one.
+ if (_curInterval[i] != _bounds->fields[i].intervals.size() - 1) {
+ return true;
}
- return true;
- }
+ // Field 'i' is at its last interval.
+ const Interval& ival = _bounds->fields[i].intervals[_curInterval[i]];
- bool IndexBoundsChecker::findLeftmostProblem(const vector<BSONElement>& keyValues,
- size_t* where,
- Location* what) {
- // For each field in the index key, see if it's in the interval it should be.
- for (size_t i = 0; i < _curInterval.size(); ++i) {
- const OrderedIntervalList& field = _bounds->fields[i];
- const Interval& currentInterval = field.intervals[_curInterval[i]];
- Location cmp = intervalCmp(currentInterval, keyValues[i], _expectedDirection[i]);
-
- // If it's not in the interval we think it is...
- if (0 != cmp) {
- *where = i;
- *what = cmp;
- return true;
- }
+ // We're OK if it's an open interval. There are an infinite number of keys between any
+ // key and the end point...
+ if (!ival.endInclusive) {
+ return true;
}
- return false;
+ // If it's a closed interval, we're fine so long as we haven't hit the end point of
+ // the interval.
+ if (-_expectedDirection[i] == sgn(keyValues[i].woCompare(ival.end, false))) {
+ return true;
+ }
}
- bool IndexBoundsChecker::spaceLeftToAdvance(size_t fieldsToCheck,
- const vector<BSONElement>& keyValues) {
- // Check end conditions. Since we need to move the keys before
- // firstNonContainedField forward, let's make sure that those fields are not at the
- // end of their bounds.
- for (size_t i = 0; i < fieldsToCheck; ++i) {
- // Field 'i' isn't at its last interval. There's possibly a key we could move forward
- // to, either in the current interval or the next one.
- if (_curInterval[i] != _bounds->fields[i].intervals.size() - 1) {
- return true;
- }
+ return false;
+}
+
+bool IndexBoundsChecker::isValidKey(const BSONObj& key) {
+ BSONObjIterator it(key);
+ size_t curOil = 0;
+ while (it.more()) {
+ BSONElement elt = it.next();
+ size_t whichInterval;
+ Location loc = findIntervalForField(
+ elt, _bounds->fields[curOil], _expectedDirection[curOil], &whichInterval);
+ if (WITHIN != loc) {
+ return false;
+ }
+ ++curOil;
+ }
+ return true;
+}
+
+IndexBoundsChecker::KeyState IndexBoundsChecker::checkKey(const BSONObj& key, IndexSeekPoint* out) {
+ verify(_curInterval.size() > 0);
+ out->keySuffix.resize(_curInterval.size());
+ out->suffixInclusive.resize(_curInterval.size());
+
+ // It's useful later to go from a field number to the value for that field. Store these.
+ // TODO: on optimization pass, populate the vector as-needed and keep the vector around as a
+ // member variable
+ vector<BSONElement> keyValues;
+ BSONObjIterator keyIt(key);
+ while (keyIt.more()) {
+ keyValues.push_back(keyIt.next());
+ }
+ verify(keyValues.size() == _curInterval.size());
- // Field 'i' is at its last interval.
- const Interval& ival = _bounds->fields[i].intervals[_curInterval[i]];
+ size_t firstNonContainedField;
+ Location orientation;
- // We're OK if it's an open interval. There are an infinite number of keys between any
- // key and the end point...
- if (!ival.endInclusive) {
- return true;
- }
+ if (!findLeftmostProblem(keyValues, &firstNonContainedField, &orientation)) {
+ // All fields in the index are within the current interval. Caller can use the key.
+ return VALID;
+ }
- // If it's a closed interval, we're fine so long as we haven't hit the end point of
- // the interval.
- if (-_expectedDirection[i] == sgn(keyValues[i].woCompare(ival.end, false))) {
- return true;
- }
+ // Field number 'firstNonContainedField' of the index key is before its current interval.
+ if (BEHIND == orientation) {
+ // It's behind our current interval, but our current interval could be wrong. Start all
+ // intervals from firstNonContainedField to the right over...
+ for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
+ _curInterval[i] = 0;
}
- return false;
- }
-
- bool IndexBoundsChecker::isValidKey(const BSONObj& key) {
- BSONObjIterator it(key);
- size_t curOil = 0;
- while (it.more()) {
- BSONElement elt = it.next();
- size_t whichInterval;
- Location loc = findIntervalForField(elt, _bounds->fields[curOil], _expectedDirection[curOil], &whichInterval);
- if (WITHIN != loc) {
- return false;
- }
- ++curOil;
+ // ...and try again. This call modifies 'orientation', so we may check its value again
+ // in the clause below if field number 'firstNonContainedField' isn't in its first
+ // interval.
+ if (!findLeftmostProblem(keyValues, &firstNonContainedField, &orientation)) {
+ return VALID;
}
- return true;
}
- IndexBoundsChecker::KeyState IndexBoundsChecker::checkKey(const BSONObj& key,
- IndexSeekPoint* out) {
- verify(_curInterval.size() > 0);
- out->keySuffix.resize(_curInterval.size());
- out->suffixInclusive.resize(_curInterval.size());
-
- // It's useful later to go from a field number to the value for that field. Store these.
- // TODO: on optimization pass, populate the vector as-needed and keep the vector around as a
- // member variable
- vector<BSONElement> keyValues;
- BSONObjIterator keyIt(key);
- while (keyIt.more()) {
- keyValues.push_back(keyIt.next());
+ // Field number 'firstNonContainedField' of the index key is before all current intervals.
+ if (BEHIND == orientation) {
+ // Tell the caller to move forward to the start of the current interval.
+ out->keyPrefix = key.getOwned();
+ out->prefixLen = firstNonContainedField;
+ out->prefixExclusive = false;
+
+ for (size_t j = firstNonContainedField; j < _curInterval.size(); ++j) {
+ const OrderedIntervalList& oil = _bounds->fields[j];
+ out->keySuffix[j] = &oil.intervals[_curInterval[j]].start;
+ out->suffixInclusive[j] = oil.intervals[_curInterval[j]].startInclusive;
}
- verify(keyValues.size() == _curInterval.size());
- size_t firstNonContainedField;
- Location orientation;
+ return MUST_ADVANCE;
+ }
- if (!findLeftmostProblem(keyValues, &firstNonContainedField, &orientation)) {
- // All fields in the index are within the current interval. Caller can use the key.
- return VALID;
- }
+ verify(AHEAD == orientation);
+
+ // Field number 'firstNonContainedField' of the index key is after interval we think it's
+ // in. Fields 0 through 'firstNonContained-1' are within their current intervals and we can
+ // ignore them.
+ while (firstNonContainedField < _curInterval.size()) {
+ // Find the interval that contains our field.
+ size_t newIntervalForField;
+
+ Location where = findIntervalForField(keyValues[firstNonContainedField],
+ _bounds->fields[firstNonContainedField],
+ _expectedDirection[firstNonContainedField],
+ &newIntervalForField);
+
+ if (WITHIN == where) {
+ // Found a new interval for field firstNonContainedField. Move our internal choice
+ // of interval to that.
+ _curInterval[firstNonContainedField] = newIntervalForField;
+ // Let's find valid intervals for fields to the right.
+ ++firstNonContainedField;
+ } else if (BEHIND == where) {
+ // firstNonContained field is between the intervals (newIntervalForField-1) and
+ // newIntervalForField. We have to tell the caller to move forward until he at
+ // least hits our new current interval.
+ _curInterval[firstNonContainedField] = newIntervalForField;
+
+ // All other fields to the right start at their first interval.
+ for (size_t i = firstNonContainedField + 1; i < _curInterval.size(); ++i) {
+ _curInterval[i] = 0;
+ }
- // Field number 'firstNonContainedField' of the index key is before its current interval.
- if (BEHIND == orientation) {
- // It's behind our current interval, but our current interval could be wrong. Start all
- // intervals from firstNonContainedField to the right over...
+ out->keyPrefix = key.getOwned();
+ out->prefixLen = firstNonContainedField;
+ out->prefixExclusive = false;
for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
- _curInterval[i] = 0;
+ const OrderedIntervalList& oil = _bounds->fields[i];
+ out->keySuffix[i] = &oil.intervals[_curInterval[i]].start;
+ out->suffixInclusive[i] = oil.intervals[_curInterval[i]].startInclusive;
}
- // ...and try again. This call modifies 'orientation', so we may check its value again
- // in the clause below if field number 'firstNonContainedField' isn't in its first
- // interval.
- if (!findLeftmostProblem(keyValues, &firstNonContainedField, &orientation)) {
- return VALID;
+ return MUST_ADVANCE;
+ } else {
+ verify(AHEAD == where);
+ // Field number 'firstNonContainedField' cannot possibly be placed into an interval,
+ // as it is already past its last possible interval. The caller must move forward
+ // to a key with a greater value for the previous field.
+
+ // If all fields to the left have hit the end of their intervals, we can't ask them
+ // to move forward and we should stop iterating.
+ if (!spaceLeftToAdvance(firstNonContainedField, keyValues)) {
+ return DONE;
}
- }
- // Field number 'firstNonContainedField' of the index key is before all current intervals.
- if (BEHIND == orientation) {
- // Tell the caller to move forward to the start of the current interval.
out->keyPrefix = key.getOwned();
out->prefixLen = firstNonContainedField;
- out->prefixExclusive = false;
+ out->prefixExclusive = true;
- for (size_t j = firstNonContainedField; j < _curInterval.size(); ++j) {
- const OrderedIntervalList& oil = _bounds->fields[j];
- out->keySuffix[j] = &oil.intervals[_curInterval[j]].start;
- out->suffixInclusive[j] = oil.intervals[_curInterval[j]].startInclusive;
+ for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
+ _curInterval[i] = 0;
}
+ // If movePastKeyElts is true, we don't examine any fields after the keyEltsToUse
+ // fields of the key. As such we don't populate the out/incOut.
return MUST_ADVANCE;
}
-
- verify(AHEAD == orientation);
-
- // Field number 'firstNonContainedField' of the index key is after interval we think it's
- // in. Fields 0 through 'firstNonContained-1' are within their current intervals and we can
- // ignore them.
- while (firstNonContainedField < _curInterval.size()) {
- // Find the interval that contains our field.
- size_t newIntervalForField;
-
- Location where = findIntervalForField(keyValues[firstNonContainedField],
- _bounds->fields[firstNonContainedField],
- _expectedDirection[firstNonContainedField],
- &newIntervalForField);
-
- if (WITHIN == where) {
- // Found a new interval for field firstNonContainedField. Move our internal choice
- // of interval to that.
- _curInterval[firstNonContainedField] = newIntervalForField;
- // Let's find valid intervals for fields to the right.
- ++firstNonContainedField;
- }
- else if (BEHIND == where) {
- // firstNonContained field is between the intervals (newIntervalForField-1) and
- // newIntervalForField. We have to tell the caller to move forward until he at
- // least hits our new current interval.
- _curInterval[firstNonContainedField] = newIntervalForField;
-
- // All other fields to the right start at their first interval.
- for (size_t i = firstNonContainedField + 1; i < _curInterval.size(); ++i) {
- _curInterval[i] = 0;
- }
-
- out->keyPrefix = key.getOwned();
- out->prefixLen = firstNonContainedField;
- out->prefixExclusive = false;
- for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
- const OrderedIntervalList& oil = _bounds->fields[i];
- out->keySuffix[i] = &oil.intervals[_curInterval[i]].start;
- out->suffixInclusive[i] = oil.intervals[_curInterval[i]].startInclusive;
- }
-
- return MUST_ADVANCE;
- }
- else {
- verify (AHEAD == where);
- // Field number 'firstNonContainedField' cannot possibly be placed into an interval,
- // as it is already past its last possible interval. The caller must move forward
- // to a key with a greater value for the previous field.
-
- // If all fields to the left have hit the end of their intervals, we can't ask them
- // to move forward and we should stop iterating.
- if (!spaceLeftToAdvance(firstNonContainedField, keyValues)) {
- return DONE;
- }
-
- out->keyPrefix = key.getOwned();
- out->prefixLen = firstNonContainedField;
- out->prefixExclusive = true;
-
- for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
- _curInterval[i] = 0;
- }
-
- // If movePastKeyElts is true, we don't examine any fields after the keyEltsToUse
- // fields of the key. As such we don't populate the out/incOut.
- return MUST_ADVANCE;
- }
- }
-
- verify(firstNonContainedField == _curInterval.size());
- return VALID;
}
- namespace {
-
- /**
- * Returns true if key (first member of pair) is AHEAD of interval
- * along 'direction' (second member of pair).
- */
- bool isKeyAheadOfInterval(const Interval& interval,
- const std::pair<BSONElement, int>& keyAndDirection) {
- const BSONElement& elt = keyAndDirection.first;
- int expectedDirection = keyAndDirection.second;
- IndexBoundsChecker::Location where = intervalCmp(interval, elt, expectedDirection);
- return IndexBoundsChecker::AHEAD == where;
- }
+ verify(firstNonContainedField == _curInterval.size());
+ return VALID;
+}
- } // namespace
-
- // static
- IndexBoundsChecker::Location IndexBoundsChecker::findIntervalForField(const BSONElement& elt,
- const OrderedIntervalList& oil, const int expectedDirection, size_t* newIntervalIndex) {
- // Binary search for interval.
- // Intervals are ordered in the same direction as our keys.
- // Key behind all intervals: [BEHIND, ..., BEHIND]
- // Key ahead of all intervals: [AHEAD, ..., AHEAD]
- // Key within one interval: [AHEAD, ..., WITHIN, BEHIND, ...]
- // Key not in any inteval: [AHEAD, ..., AHEAD, BEHIND, ...]
-
- // Find left-most BEHIND/WITHIN interval.
- vector<Interval>::const_iterator i =
- std::lower_bound(oil.intervals.begin(), oil.intervals.end(),
- std::make_pair(elt, expectedDirection), isKeyAheadOfInterval);
-
- // Key ahead of all intervals.
- if (i == oil.intervals.end()) {
- return AHEAD;
- }
+namespace {
- // Found either interval containing key or left-most BEHIND interval.
- *newIntervalIndex = std::distance(oil.intervals.begin(), i);
+/**
+ * Returns true if key (first member of pair) is AHEAD of interval
+ * along 'direction' (second member of pair).
+ */
+bool isKeyAheadOfInterval(const Interval& interval,
+ const std::pair<BSONElement, int>& keyAndDirection) {
+ const BSONElement& elt = keyAndDirection.first;
+ int expectedDirection = keyAndDirection.second;
+ IndexBoundsChecker::Location where = intervalCmp(interval, elt, expectedDirection);
+ return IndexBoundsChecker::AHEAD == where;
+}
+
+} // namespace
+
+// static
+IndexBoundsChecker::Location IndexBoundsChecker::findIntervalForField(
+ const BSONElement& elt,
+ const OrderedIntervalList& oil,
+ const int expectedDirection,
+ size_t* newIntervalIndex) {
+ // Binary search for interval.
+ // Intervals are ordered in the same direction as our keys.
+ // Key behind all intervals: [BEHIND, ..., BEHIND]
+ // Key ahead of all intervals: [AHEAD, ..., AHEAD]
+ // Key within one interval: [AHEAD, ..., WITHIN, BEHIND, ...]
+ // Key not in any inteval: [AHEAD, ..., AHEAD, BEHIND, ...]
+
+ // Find left-most BEHIND/WITHIN interval.
+ vector<Interval>::const_iterator i = std::lower_bound(oil.intervals.begin(),
+ oil.intervals.end(),
+ std::make_pair(elt, expectedDirection),
+ isKeyAheadOfInterval);
+
+ // Key ahead of all intervals.
+ if (i == oil.intervals.end()) {
+ return AHEAD;
+ }
- // Additional check to determine if interval contains key.
- Location where = intervalCmp(*i, elt, expectedDirection);
- invariant(BEHIND == where || WITHIN == where);
+ // Found either interval containing key or left-most BEHIND interval.
+ *newIntervalIndex = std::distance(oil.intervals.begin(), i);
- return where;
- }
+ // Additional check to determine if interval contains key.
+ Location where = intervalCmp(*i, elt, expectedDirection);
+ invariant(BEHIND == where || WITHIN == where);
+
+ return where;
+}
} // namespace mongo
diff --git a/src/mongo/db/query/index_bounds.h b/src/mongo/db/query/index_bounds.h
index d1e140f3932..f70f75def8b 100644
--- a/src/mongo/db/query/index_bounds.h
+++ b/src/mongo/db/query/index_bounds.h
@@ -37,201 +37,204 @@
namespace mongo {
+/**
+ * An ordered list of intervals for one field.
+ */
+struct OrderedIntervalList {
+ OrderedIntervalList() {}
+ OrderedIntervalList(const std::string& n) : name(n) {}
+
+ // Must be ordered according to the index order.
+ std::vector<Interval> intervals;
+
+ // TODO: We could drop this. Only used in IndexBounds::isValidFor.
+ std::string name;
+
+ bool isValidFor(int expectedOrientation) const;
+ std::string toString() const;
+
/**
- * An ordered list of intervals for one field.
+ * Complements the OIL. Used by the index bounds builder in order
+ * to create index bounds for $not predicates.
+ *
+ * Assumes the OIL is increasing, and therefore must be called prior to
+ * alignBounds(...).
+ *
+ * Example:
+ * The complement of [3, 6), [8, 10] is [MinKey, 3), [6, 8), (20, MaxKey],
+ * where this OIL has direction==1.
*/
- struct OrderedIntervalList {
- OrderedIntervalList() { }
- OrderedIntervalList(const std::string& n) : name(n) { }
-
- // Must be ordered according to the index order.
- std::vector<Interval> intervals;
-
- // TODO: We could drop this. Only used in IndexBounds::isValidFor.
- std::string name;
-
- bool isValidFor(int expectedOrientation) const;
- std::string toString() const;
-
- /**
- * Complements the OIL. Used by the index bounds builder in order
- * to create index bounds for $not predicates.
- *
- * Assumes the OIL is increasing, and therefore must be called prior to
- * alignBounds(...).
- *
- * Example:
- * The complement of [3, 6), [8, 10] is [MinKey, 3), [6, 8), (20, MaxKey],
- * where this OIL has direction==1.
- */
- void complement();
- };
+ void complement();
+};
+
+/**
+ * Tied to an index. Permissible values for all fields in the index. Requires the index to
+ * interpret. Previously known as FieldRangeVector.
+ */
+struct IndexBounds {
+ IndexBounds() : isSimpleRange(false), endKeyInclusive(false) {}
+
+ // For each indexed field, the values that the field is allowed to take on.
+ std::vector<OrderedIntervalList> fields;
+
+ // Debugging check.
+ // We must have as many fields the key pattern does.
+ // The fields must be oriented in the direction we'd encounter them given the indexing
+ // direction (the value of the field in keyPattern) and index traversal direction provided
+ // by 'direction'.
+ //
+ // An example: [7, 20]
+ // We can traverse this forward if indexed ascending
+ // We can traverse this backwards if indexed descending.
+ bool isValidFor(const BSONObj& keyPattern, int direction);
+
+ // Methods below used for debugging purpose only. Do not use outside testing code.
+ size_t size() const;
+ std::string getFieldName(size_t i) const;
+ size_t getNumIntervals(size_t i) const;
+ Interval getInterval(size_t i, size_t j) const;
+ std::string toString() const;
/**
- * Tied to an index. Permissible values for all fields in the index. Requires the index to
- * interpret. Previously known as FieldRangeVector.
+ * BSON format for explain. The format is an array of strings for each field.
+ * Each string represents an interval. The strings use "[" and "]" if the interval
+ * bounds are inclusive, and "(" / ")" if exclusive.
+ *
+ * Ex.
+ * {a: ["[1, 1]", "(3, 10)"], b: ["[Infinity, 10)"] }
*/
- struct IndexBounds {
- IndexBounds() : isSimpleRange(false), endKeyInclusive(false) { }
-
- // For each indexed field, the values that the field is allowed to take on.
- std::vector<OrderedIntervalList> fields;
-
- // Debugging check.
- // We must have as many fields the key pattern does.
- // The fields must be oriented in the direction we'd encounter them given the indexing
- // direction (the value of the field in keyPattern) and index traversal direction provided
- // by 'direction'.
- //
- // An example: [7, 20]
- // We can traverse this forward if indexed ascending
- // We can traverse this backwards if indexed descending.
- bool isValidFor(const BSONObj& keyPattern, int direction);
-
- // Methods below used for debugging purpose only. Do not use outside testing code.
- size_t size() const;
- std::string getFieldName(size_t i) const;
- size_t getNumIntervals(size_t i) const;
- Interval getInterval(size_t i, size_t j) const;
- std::string toString() const;
-
- /**
- * BSON format for explain. The format is an array of strings for each field.
- * Each string represents an interval. The strings use "[" and "]" if the interval
- * bounds are inclusive, and "(" / ")" if exclusive.
- *
- * Ex.
- * {a: ["[1, 1]", "(3, 10)"], b: ["[Infinity, 10)"] }
- */
- BSONObj toBSON() const;
-
- // TODO: we use this for max/min scan. Consider migrating that.
- bool isSimpleRange;
- BSONObj startKey;
- BSONObj endKey;
- bool endKeyInclusive;
+ BSONObj toBSON() const;
+
+ // TODO: we use this for max/min scan. Consider migrating that.
+ bool isSimpleRange;
+ BSONObj startKey;
+ BSONObj endKey;
+ bool endKeyInclusive;
+};
+
+/**
+ * A helper used by IndexScan to navigate an index.
+ */
+class IndexBoundsChecker {
+public:
+ /**
+ * keyPattern is the index that we're iterating over.
+ * bounds are the bounds we're allowed to iterate over.
+ * direction is the direction we're moving over the index, 1 or -1.
+ *
+ * Bounds not owned by us.
+ */
+ IndexBoundsChecker(const IndexBounds* bounds, const BSONObj& keyPattern, int direction);
+
+
+ /**
+ * Get the IndexSeekPoint that we should with.
+ *
+ * Returns false if there are no possible index entries that match the bounds. In this case
+ * there is no valid start point to seek to so out will not be filled out and the caller
+ * should emit no results.
+ */
+ bool getStartSeekPoint(IndexSeekPoint* out);
+
+ /**
+ * The states of a key from an index scan. See checkKey below.
+ */
+ enum KeyState {
+ VALID,
+ MUST_ADVANCE,
+ DONE,
};
/**
- * A helper used by IndexScan to navigate an index.
+ * Is 'key' a valid key? Note that this differs from checkKey, which assumes that it
+ * receives keys in sorted order.
+ */
+ bool isValidKey(const BSONObj& key);
+
+ /**
+ * This function checks if the key is within the bounds we're iterating over and updates any
+ * internal state required to efficiently determine if the key is within our bounds.
+ *
+ * Possible outcomes:
+ *
+ * 1. The key is in our bounds. Returns VALID. Caller can use the data associated with the
+ * key.
+ *
+ * 2. The key is not in our bounds but has not exceeded the maximum value in our bounds.
+ * Returns MUST_ADVANCE. Caller must advance to the query provided in the out parameters
+ * and call checkKey again.
+ *
+ * 3. The key is past our bounds. Returns DONE. No further keys will satisfy the bounds
+ * and the caller should stop.
+ *
+ * keyEltsToUse, movePastKeyElts, out, and incOut must all be non-NULL.
+ * out and incOut must already be resized to have as many elements as the key has fields.
+ *
+ * In parameters:
+ * currentKey is the index key.
+ *
+ * Out parameter only valid if we return MUST_ADVANCE.
*/
- class IndexBoundsChecker {
- public:
- /**
- * keyPattern is the index that we're iterating over.
- * bounds are the bounds we're allowed to iterate over.
- * direction is the direction we're moving over the index, 1 or -1.
- *
- * Bounds not owned by us.
- */
- IndexBoundsChecker(const IndexBounds* bounds, const BSONObj& keyPattern, int direction);
-
-
- /**
- * Get the IndexSeekPoint that we should with.
- *
- * Returns false if there are no possible index entries that match the bounds. In this case
- * there is no valid start point to seek to so out will not be filled out and the caller
- * should emit no results.
- */
- bool getStartSeekPoint(IndexSeekPoint* out);
-
- /**
- * The states of a key from an index scan. See checkKey below.
- */
- enum KeyState {
- VALID,
- MUST_ADVANCE,
- DONE,
- };
-
- /**
- * Is 'key' a valid key? Note that this differs from checkKey, which assumes that it
- * receives keys in sorted order.
- */
- bool isValidKey(const BSONObj& key);
-
- /**
- * This function checks if the key is within the bounds we're iterating over and updates any
- * internal state required to efficiently determine if the key is within our bounds.
- *
- * Possible outcomes:
- *
- * 1. The key is in our bounds. Returns VALID. Caller can use the data associated with the
- * key.
- *
- * 2. The key is not in our bounds but has not exceeded the maximum value in our bounds.
- * Returns MUST_ADVANCE. Caller must advance to the query provided in the out parameters
- * and call checkKey again.
- *
- * 3. The key is past our bounds. Returns DONE. No further keys will satisfy the bounds
- * and the caller should stop.
- *
- * keyEltsToUse, movePastKeyElts, out, and incOut must all be non-NULL.
- * out and incOut must already be resized to have as many elements as the key has fields.
- *
- * In parameters:
- * currentKey is the index key.
- *
- * Out parameter only valid if we return MUST_ADVANCE.
- */
- KeyState checkKey(const BSONObj& currentKey, IndexSeekPoint* query);
-
- /**
- * Relative position of a key to an interval.
- * Exposed for testing only.
- */
- enum Location {
- BEHIND = -1,
- WITHIN = 0,
- AHEAD = 1,
- };
-
- /**
- * If 'elt' is in any interval, return WITHIN and set 'newIntervalIndex' to the index of the
- * interval in the ordered interval list.
- *
- * If 'elt' is not in any interval but could be advanced to be in one, return BEHIND and set
- * 'newIntervalIndex' to the index of the interval that 'elt' could be advanced to.
- *
- * If 'elt' cannot be advanced to any interval, return AHEAD.
- *
- * Exposed for testing only.
- *
- * TODO(efficiency): Start search from a given index.
- */
- static Location findIntervalForField(const BSONElement &elt, const OrderedIntervalList& oil,
- const int expectedDirection, size_t* newIntervalIndex);
-
- private:
- /**
- * Find the first field in the key that isn't within the interval we think it is. Returns
- * false if every field is in the interval we think it is. Returns true and populates out
- * parameters if a field isn't in the interval we think it is.
- *
- * Out parameters set if we return true:
- * 'where' is the leftmost field that isn't in the interval we think it is.
- * 'what' is the orientation of the field with respect to that interval.
- */
- bool findLeftmostProblem(const std::vector<BSONElement>& keyValues, size_t* where,
- Location* what);
-
- /**
- * Returns true if it's possible to advance any of the first 'fieldsToCheck' fields of the
- * index key and still be within valid index bounds.
- *
- * keyValues are the elements of the index key in order.
- */
- bool spaceLeftToAdvance(size_t fieldsToCheck, const std::vector<BSONElement>& keyValues);
-
- // The actual bounds. Must outlive this object. Not owned by us.
- const IndexBounds* _bounds;
-
- // For each field, which interval are we currently in?
- std::vector<size_t> _curInterval;
-
- // Direction of scan * direction of indexing.
- std::vector<int> _expectedDirection;
+ KeyState checkKey(const BSONObj& currentKey, IndexSeekPoint* query);
+
+ /**
+ * Relative position of a key to an interval.
+ * Exposed for testing only.
+ */
+ enum Location {
+ BEHIND = -1,
+ WITHIN = 0,
+ AHEAD = 1,
};
+ /**
+ * If 'elt' is in any interval, return WITHIN and set 'newIntervalIndex' to the index of the
+ * interval in the ordered interval list.
+ *
+ * If 'elt' is not in any interval but could be advanced to be in one, return BEHIND and set
+ * 'newIntervalIndex' to the index of the interval that 'elt' could be advanced to.
+ *
+ * If 'elt' cannot be advanced to any interval, return AHEAD.
+ *
+ * Exposed for testing only.
+ *
+ * TODO(efficiency): Start search from a given index.
+ */
+ static Location findIntervalForField(const BSONElement& elt,
+ const OrderedIntervalList& oil,
+ const int expectedDirection,
+ size_t* newIntervalIndex);
+
+private:
+ /**
+ * Find the first field in the key that isn't within the interval we think it is. Returns
+ * false if every field is in the interval we think it is. Returns true and populates out
+ * parameters if a field isn't in the interval we think it is.
+ *
+ * Out parameters set if we return true:
+ * 'where' is the leftmost field that isn't in the interval we think it is.
+ * 'what' is the orientation of the field with respect to that interval.
+ */
+ bool findLeftmostProblem(const std::vector<BSONElement>& keyValues,
+ size_t* where,
+ Location* what);
+
+ /**
+ * Returns true if it's possible to advance any of the first 'fieldsToCheck' fields of the
+ * index key and still be within valid index bounds.
+ *
+ * keyValues are the elements of the index key in order.
+ */
+ bool spaceLeftToAdvance(size_t fieldsToCheck, const std::vector<BSONElement>& keyValues);
+
+ // The actual bounds. Must outlive this object. Not owned by us.
+ const IndexBounds* _bounds;
+
+ // For each field, which interval are we currently in?
+ std::vector<size_t> _curInterval;
+
+ // Direction of scan * direction of indexing.
+ std::vector<int> _expectedDirection;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/query/index_bounds_builder.cpp b/src/mongo/db/query/index_bounds_builder.cpp
index 8df448edcf5..92bff9f8d39 100644
--- a/src/mongo/db/query/index_bounds_builder.cpp
+++ b/src/mongo/db/query/index_bounds_builder.cpp
@@ -48,33 +48,32 @@
namespace mongo {
- string IndexBoundsBuilder::simpleRegex(const char* regex, const char* flags,
- BoundsTightness* tightnessOut) {
- string r = "";
- *tightnessOut = IndexBoundsBuilder::INEXACT_COVERED;
-
- bool multilineOK;
- if ( regex[0] == '\\' && regex[1] == 'A') {
- multilineOK = true;
- regex += 2;
- }
- else if (regex[0] == '^') {
- multilineOK = false;
- regex += 1;
- }
- else {
- return r;
- }
+string IndexBoundsBuilder::simpleRegex(const char* regex,
+ const char* flags,
+ BoundsTightness* tightnessOut) {
+ string r = "";
+ *tightnessOut = IndexBoundsBuilder::INEXACT_COVERED;
+
+ bool multilineOK;
+ if (regex[0] == '\\' && regex[1] == 'A') {
+ multilineOK = true;
+ regex += 2;
+ } else if (regex[0] == '^') {
+ multilineOK = false;
+ regex += 1;
+ } else {
+ return r;
+ }
- // A regex with the "|" character is never considered a simple regular expression.
- if (StringData(regex).find('|') != std::string::npos) {
- return "";
- }
+ // A regex with the "|" character is never considered a simple regular expression.
+ if (StringData(regex).find('|') != std::string::npos) {
+ return "";
+ }
- bool extended = false;
- while (*flags) {
- switch (*(flags++)) {
- case 'm': // multiline
+ bool extended = false;
+ while (*flags) {
+ switch (*(flags++)) {
+ case 'm': // multiline
if (multilineOK)
continue;
else
@@ -83,964 +82,912 @@ namespace mongo {
// Single-line mode specified. This just changes the behavior of the '.'
// character to match every character instead of every character except '\n'.
continue;
- case 'x': // extended
+ case 'x': // extended
extended = true;
break;
default:
- return r; // cant use index
- }
+ return r; // cant use index
}
+ }
- mongoutils::str::stream ss;
+ mongoutils::str::stream ss;
- while(*regex) {
- char c = *(regex++);
+ while (*regex) {
+ char c = *(regex++);
- // We should have bailed out early above if '|' is in the regex.
- invariant(c != '|');
+ // We should have bailed out early above if '|' is in the regex.
+ invariant(c != '|');
- if ( c == '*' || c == '?' ) {
- // These are the only two symbols that make the last char optional
- r = ss;
- r = r.substr( 0 , r.size() - 1 );
- return r; //breaking here fails with /^a?/
- }
- else if (c == '\\') {
- c = *(regex++);
- if (c == 'Q'){
- // \Q...\E quotes everything inside
- while (*regex) {
- c = (*regex++);
- if (c == '\\' && (*regex == 'E')){
- regex++; //skip the 'E'
- break; // go back to start of outer loop
- }
- else {
- ss << c; // character should match itself
- }
+ if (c == '*' || c == '?') {
+ // These are the only two symbols that make the last char optional
+ r = ss;
+ r = r.substr(0, r.size() - 1);
+ return r; // breaking here fails with /^a?/
+ } else if (c == '\\') {
+ c = *(regex++);
+ if (c == 'Q') {
+ // \Q...\E quotes everything inside
+ while (*regex) {
+ c = (*regex++);
+ if (c == '\\' && (*regex == 'E')) {
+ regex++; // skip the 'E'
+ break; // go back to start of outer loop
+ } else {
+ ss << c; // character should match itself
}
}
- else if ((c >= 'A' && c <= 'Z') ||
- (c >= 'a' && c <= 'z') ||
- (c >= '0' && c <= '0') ||
- (c == '\0')) {
- // don't know what to do with these
- r = ss;
- break;
- }
- else {
- // slash followed by non-alphanumeric represents the following char
- ss << c;
- }
- }
- else if (strchr("^$.[()+{", c)) {
- // list of "metacharacters" from man pcrepattern
+ } else if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '0') ||
+ (c == '\0')) {
+ // don't know what to do with these
r = ss;
break;
- }
- else if (extended && c == '#') {
- // comment
- r = ss;
- break;
- }
- else if (extended && isspace(c)) {
- continue;
- }
- else {
- // self-matching char
+ } else {
+ // slash followed by non-alphanumeric represents the following char
ss << c;
}
- }
-
- if ( r.empty() && *regex == 0 ) {
+ } else if (strchr("^$.[()+{", c)) {
+ // list of "metacharacters" from man pcrepattern
+ r = ss;
+ break;
+ } else if (extended && c == '#') {
+ // comment
r = ss;
- *tightnessOut = r.empty() ? IndexBoundsBuilder::INEXACT_COVERED : IndexBoundsBuilder::EXACT;
+ break;
+ } else if (extended && isspace(c)) {
+ continue;
+ } else {
+ // self-matching char
+ ss << c;
}
-
- return r;
}
-
- // static
- void IndexBoundsBuilder::allValuesForField(const BSONElement& elt, OrderedIntervalList* out) {
- // ARGH, BSONValue would make this shorter.
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendMaxKey("");
- out->name = elt.fieldName();
- out->intervals.push_back(makeRangeInterval(bob.obj(), true, true));
+ if (r.empty() && *regex == 0) {
+ r = ss;
+ *tightnessOut = r.empty() ? IndexBoundsBuilder::INEXACT_COVERED : IndexBoundsBuilder::EXACT;
}
- Interval IndexBoundsBuilder::allValues() {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendMaxKey("");
- return makeRangeInterval(bob.obj(), true, true);
+ return r;
+}
+
+
+// static
+void IndexBoundsBuilder::allValuesForField(const BSONElement& elt, OrderedIntervalList* out) {
+ // ARGH, BSONValue would make this shorter.
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendMaxKey("");
+ out->name = elt.fieldName();
+ out->intervals.push_back(makeRangeInterval(bob.obj(), true, true));
+}
+
+Interval IndexBoundsBuilder::allValues() {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendMaxKey("");
+ return makeRangeInterval(bob.obj(), true, true);
+}
+
+bool IntervalComparison(const Interval& lhs, const Interval& rhs) {
+ int wo = lhs.start.woCompare(rhs.start, false);
+ if (0 != wo) {
+ return wo < 0;
}
- bool IntervalComparison(const Interval& lhs, const Interval& rhs) {
- int wo = lhs.start.woCompare(rhs.start, false);
- if (0 != wo) {
- return wo < 0;
- }
-
- // The start and end are equal.
- // Strict weak requires irreflexivity which implies that equivalence returns false.
- if (lhs.startInclusive == rhs.startInclusive) { return false; }
-
- // Put the bound that's inclusive to the left.
- return lhs.startInclusive;
+ // The start and end are equal.
+ // Strict weak requires irreflexivity which implies that equivalence returns false.
+ if (lhs.startInclusive == rhs.startInclusive) {
+ return false;
}
- // static
- void IndexBoundsBuilder::translateAndIntersect(const MatchExpression* expr,
- const BSONElement& elt,
- const IndexEntry& index,
- OrderedIntervalList* oilOut,
- BoundsTightness* tightnessOut) {
- OrderedIntervalList arg;
- translate(expr, elt, index, &arg, tightnessOut);
-
- // translate outputs arg in sorted order. intersectize assumes that its arguments are
- // sorted.
- intersectize(arg, oilOut);
- }
+ // Put the bound that's inclusive to the left.
+ return lhs.startInclusive;
+}
- // static
- void IndexBoundsBuilder::translateAndUnion(const MatchExpression* expr,
+// static
+void IndexBoundsBuilder::translateAndIntersect(const MatchExpression* expr,
const BSONElement& elt,
const IndexEntry& index,
OrderedIntervalList* oilOut,
BoundsTightness* tightnessOut) {
- OrderedIntervalList arg;
- translate(expr, elt, index, &arg, tightnessOut);
-
- // Append the new intervals to oilOut.
- oilOut->intervals.insert(oilOut->intervals.end(), arg.intervals.begin(),
- arg.intervals.end());
-
- // Union the appended intervals with the existing ones.
- unionize(oilOut);
+ OrderedIntervalList arg;
+ translate(expr, elt, index, &arg, tightnessOut);
+
+ // translate outputs arg in sorted order. intersectize assumes that its arguments are
+ // sorted.
+ intersectize(arg, oilOut);
+}
+
+// static
+void IndexBoundsBuilder::translateAndUnion(const MatchExpression* expr,
+ const BSONElement& elt,
+ const IndexEntry& index,
+ OrderedIntervalList* oilOut,
+ BoundsTightness* tightnessOut) {
+ OrderedIntervalList arg;
+ translate(expr, elt, index, &arg, tightnessOut);
+
+ // Append the new intervals to oilOut.
+ oilOut->intervals.insert(oilOut->intervals.end(), arg.intervals.begin(), arg.intervals.end());
+
+ // Union the appended intervals with the existing ones.
+ unionize(oilOut);
+}
+
+bool typeMatch(const BSONObj& obj) {
+ BSONObjIterator it(obj);
+ verify(it.more());
+ BSONElement first = it.next();
+ verify(it.more());
+ BSONElement second = it.next();
+ return first.canonicalType() == second.canonicalType();
+}
+
+// static
+void IndexBoundsBuilder::translate(const MatchExpression* expr,
+ const BSONElement& elt,
+ const IndexEntry& index,
+ OrderedIntervalList* oilOut,
+ BoundsTightness* tightnessOut) {
+ // We expect that the OIL we are constructing starts out empty.
+ invariant(oilOut->intervals.empty());
+
+ oilOut->name = elt.fieldName();
+
+ bool isHashed = false;
+ if (mongoutils::str::equals("hashed", elt.valuestrsafe())) {
+ isHashed = true;
}
- bool typeMatch(const BSONObj& obj) {
- BSONObjIterator it(obj);
- verify(it.more());
- BSONElement first = it.next();
- verify(it.more());
- BSONElement second = it.next();
- return first.canonicalType() == second.canonicalType();
+ if (isHashed) {
+ verify(MatchExpression::EQ == expr->matchType() ||
+ MatchExpression::MATCH_IN == expr->matchType());
}
- // static
- void IndexBoundsBuilder::translate(const MatchExpression* expr,
- const BSONElement& elt,
- const IndexEntry& index,
- OrderedIntervalList* oilOut,
- BoundsTightness* tightnessOut) {
- // We expect that the OIL we are constructing starts out empty.
- invariant(oilOut->intervals.empty());
+ if (MatchExpression::ELEM_MATCH_VALUE == expr->matchType()) {
+ OrderedIntervalList acc;
+ translate(expr->getChild(0), elt, index, &acc, tightnessOut);
- oilOut->name = elt.fieldName();
-
- bool isHashed = false;
- if (mongoutils::str::equals("hashed", elt.valuestrsafe())) {
- isHashed = true;
+ for (size_t i = 1; i < expr->numChildren(); ++i) {
+ OrderedIntervalList next;
+ BoundsTightness tightness;
+ translate(expr->getChild(i), elt, index, &next, &tightness);
+ intersectize(next, &acc);
}
- if (isHashed) {
- verify(MatchExpression::EQ == expr->matchType()
- || MatchExpression::MATCH_IN == expr->matchType());
+ for (size_t i = 0; i < acc.intervals.size(); ++i) {
+ oilOut->intervals.push_back(acc.intervals[i]);
}
- if (MatchExpression::ELEM_MATCH_VALUE == expr->matchType()) {
- OrderedIntervalList acc;
- translate(expr->getChild(0), elt, index, &acc, tightnessOut);
-
- for (size_t i = 1; i < expr->numChildren(); ++i) {
- OrderedIntervalList next;
- BoundsTightness tightness;
- translate(expr->getChild(i), elt, index, &next, &tightness);
- intersectize(next, &acc);
- }
-
- for (size_t i = 0; i < acc.intervals.size(); ++i) {
- oilOut->intervals.push_back(acc.intervals[i]);
- }
+ if (!oilOut->intervals.empty()) {
+ std::sort(oilOut->intervals.begin(), oilOut->intervals.end(), IntervalComparison);
+ }
- if (!oilOut->intervals.empty()) {
- std::sort(oilOut->intervals.begin(), oilOut->intervals.end(), IntervalComparison);
- }
+ // $elemMatch value requires an array.
+ // Scalars and directly nested objects are not matched with $elemMatch.
+ // We can't tell if a multi-key index key is derived from an array field.
+ // Therefore, a fetch is required.
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else if (MatchExpression::NOT == expr->matchType()) {
+ // A NOT is indexed by virtue of its child. If we're here then the NOT's child
+ // must be a kind of node for which we can index negations. It can't be things like
+ // $mod, $regex, or $type.
+ MatchExpression* child = expr->getChild(0);
+
+ // If we have a NOT -> EXISTS, we must handle separately.
+ if (MatchExpression::EXISTS == child->matchType()) {
+ // We should never try to use a sparse index for $exists:false.
+ invariant(!index.sparse);
+ BSONObjBuilder bob;
+ bob.appendNull("");
+ bob.appendNull("");
+ BSONObj dataObj = bob.obj();
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
- // $elemMatch value requires an array.
- // Scalars and directly nested objects are not matched with $elemMatch.
- // We can't tell if a multi-key index key is derived from an array field.
- // Therefore, a fetch is required.
*tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ return;
}
- else if (MatchExpression::NOT == expr->matchType()) {
- // A NOT is indexed by virtue of its child. If we're here then the NOT's child
- // must be a kind of node for which we can index negations. It can't be things like
- // $mod, $regex, or $type.
- MatchExpression* child = expr->getChild(0);
-
- // If we have a NOT -> EXISTS, we must handle separately.
- if (MatchExpression::EXISTS == child->matchType()) {
- // We should never try to use a sparse index for $exists:false.
- invariant(!index.sparse);
- BSONObjBuilder bob;
- bob.appendNull("");
- bob.appendNull("");
- BSONObj dataObj = bob.obj();
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- return;
- }
+ translate(child, elt, index, oilOut, tightnessOut);
+ oilOut->complement();
- translate(child, elt, index, oilOut, tightnessOut);
- oilOut->complement();
-
- // If the index is multikey, it doesn't matter what the tightness
- // of the child is, we must return INEXACT_FETCH. Consider a multikey
- // index on 'a' with document {a: [1, 2, 3]} and query {a: {$ne: 3}}.
- // If we treated the bounds [MinKey, 3), (3, MaxKey] as exact, then
- // we would erroneously return the document!
- if (index.multikey) {
- *tightnessOut = INEXACT_FETCH;
- }
+ // If the index is multikey, it doesn't matter what the tightness
+ // of the child is, we must return INEXACT_FETCH. Consider a multikey
+ // index on 'a' with document {a: [1, 2, 3]} and query {a: {$ne: 3}}.
+ // If we treated the bounds [MinKey, 3), (3, MaxKey] as exact, then
+ // we would erroneously return the document!
+ if (index.multikey) {
+ *tightnessOut = INEXACT_FETCH;
}
- else if (MatchExpression::EXISTS == expr->matchType()) {
- // We only handle the {$exists:true} case, as {$exists:false}
- // will have been translated to {$not:{ $exists:true }}.
- //
- // Documents with a missing value are stored *as if* they were
- // explicitly given the value 'null'. Given:
- // X = { b : 1 }
- // Y = { a : null, b : 1 }
- // X and Y look identical from within a standard index on { a : 1 }.
- // HOWEVER a sparse index on { a : 1 } will treat X and Y differently,
- // storing Y and not storing X.
- //
- // We can safely use an index in the following cases:
- // {a:{ $exists:true }} - normal index helps, but we must still fetch
- // {a:{ $exists:true }} - sparse index is exact
- // {a:{ $exists:false }} - normal index requires a fetch
- // {a:{ $exists:false }} - sparse indexes cannot be used at all.
- //
- // Noted in SERVER-12869, in case this ever changes some day.
- if (index.sparse) {
- oilOut->intervals.push_back(allValues());
- // A sparse, compound index on { a:1, b:1 } will include entries
- // for all of the following documents:
- // { a:1 }, { b:1 }, { a:1, b:1 }
- // So we must use INEXACT bounds in this case.
- if ( 1 < index.keyPattern.nFields() ) {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- }
- else {
- oilOut->intervals.push_back(allValues());
+ } else if (MatchExpression::EXISTS == expr->matchType()) {
+ // We only handle the {$exists:true} case, as {$exists:false}
+ // will have been translated to {$not:{ $exists:true }}.
+ //
+ // Documents with a missing value are stored *as if* they were
+ // explicitly given the value 'null'. Given:
+ // X = { b : 1 }
+ // Y = { a : null, b : 1 }
+ // X and Y look identical from within a standard index on { a : 1 }.
+ // HOWEVER a sparse index on { a : 1 } will treat X and Y differently,
+ // storing Y and not storing X.
+ //
+ // We can safely use an index in the following cases:
+ // {a:{ $exists:true }} - normal index helps, but we must still fetch
+ // {a:{ $exists:true }} - sparse index is exact
+ // {a:{ $exists:false }} - normal index requires a fetch
+ // {a:{ $exists:false }} - sparse indexes cannot be used at all.
+ //
+ // Noted in SERVER-12869, in case this ever changes some day.
+ if (index.sparse) {
+ oilOut->intervals.push_back(allValues());
+ // A sparse, compound index on { a:1, b:1 } will include entries
+ // for all of the following documents:
+ // { a:1 }, { b:1 }, { a:1, b:1 }
+ // So we must use INEXACT bounds in this case.
+ if (1 < index.keyPattern.nFields()) {
*tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- }
- else if (MatchExpression::EQ == expr->matchType()) {
- const EqualityMatchExpression* node = static_cast<const EqualityMatchExpression*>(expr);
- translateEquality(node->getData(), isHashed, oilOut, tightnessOut);
- }
- else if (MatchExpression::LTE == expr->matchType()) {
- const LTEMatchExpression* node = static_cast<const LTEMatchExpression*>(expr);
- BSONElement dataElt = node->getData();
-
- // Everything is <= MaxKey.
- if (MaxKey == dataElt.type()) {
- oilOut->intervals.push_back(allValues());
+ } else {
*tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
-
- // Only NaN is <= NaN.
- if (std::isnan(dataElt.numberDouble())) {
- double nan = dataElt.numberDouble();
- oilOut->intervals.push_back(makePointInterval(nan));
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
-
- BSONObjBuilder bob;
- // Use -infinity for one-sided numerical bounds
- if (dataElt.isNumber()) {
- bob.appendNumber("", -std::numeric_limits<double>::infinity());
- }
- else {
- bob.appendMinForType("", dataElt.type());
- }
- bob.appendAs(dataElt, "");
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- oilOut->intervals.push_back(makeRangeInterval(dataObj, typeMatch(dataObj), true));
-
- if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
}
+ } else {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
}
- else if (MatchExpression::LT == expr->matchType()) {
- const LTMatchExpression* node = static_cast<const LTMatchExpression*>(expr);
- BSONElement dataElt = node->getData();
+ } else if (MatchExpression::EQ == expr->matchType()) {
+ const EqualityMatchExpression* node = static_cast<const EqualityMatchExpression*>(expr);
+ translateEquality(node->getData(), isHashed, oilOut, tightnessOut);
+ } else if (MatchExpression::LTE == expr->matchType()) {
+ const LTEMatchExpression* node = static_cast<const LTEMatchExpression*>(expr);
+ BSONElement dataElt = node->getData();
- // Everything is <= MaxKey.
- if (MaxKey == dataElt.type()) {
- oilOut->intervals.push_back(allValues());
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
-
- // Nothing is < NaN.
- if (std::isnan(dataElt.numberDouble())) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ // Everything is <= MaxKey.
+ if (MaxKey == dataElt.type()) {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
+ }
- BSONObjBuilder bob;
- // Use -infinity for one-sided numerical bounds
- if (dataElt.isNumber()) {
- bob.appendNumber("", -std::numeric_limits<double>::infinity());
- }
- else {
- bob.appendMinForType("", dataElt.type());
- }
- bob.appendAs(dataElt, "");
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- Interval interval = makeRangeInterval(dataObj, typeMatch(dataObj), false);
+ // Only NaN is <= NaN.
+ if (std::isnan(dataElt.numberDouble())) {
+ double nan = dataElt.numberDouble();
+ oilOut->intervals.push_back(makePointInterval(nan));
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
+ }
- // If the operand to LT is equal to the lower bound X, the interval [X, X) is invalid
- // and should not be added to the bounds.
- if (!interval.isNull()) {
- oilOut->intervals.push_back(interval);
- }
+ BSONObjBuilder bob;
+ // Use -infinity for one-sided numerical bounds
+ if (dataElt.isNumber()) {
+ bob.appendNumber("", -std::numeric_limits<double>::infinity());
+ } else {
+ bob.appendMinForType("", dataElt.type());
+ }
+ bob.appendAs(dataElt, "");
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, typeMatch(dataObj), true));
+
+ if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ }
+ } else if (MatchExpression::LT == expr->matchType()) {
+ const LTMatchExpression* node = static_cast<const LTMatchExpression*>(expr);
+ BSONElement dataElt = node->getData();
- if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
+ // Everything is <= MaxKey.
+ if (MaxKey == dataElt.type()) {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
}
- else if (MatchExpression::GT == expr->matchType()) {
- const GTMatchExpression* node = static_cast<const GTMatchExpression*>(expr);
- BSONElement dataElt = node->getData();
- // Everything is > MinKey.
- if (MinKey == dataElt.type()) {
- oilOut->intervals.push_back(allValues());
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ // Nothing is < NaN.
+ if (std::isnan(dataElt.numberDouble())) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
+ }
- // Nothing is > NaN.
- if (std::isnan(dataElt.numberDouble())) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ BSONObjBuilder bob;
+ // Use -infinity for one-sided numerical bounds
+ if (dataElt.isNumber()) {
+ bob.appendNumber("", -std::numeric_limits<double>::infinity());
+ } else {
+ bob.appendMinForType("", dataElt.type());
+ }
+ bob.appendAs(dataElt, "");
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ Interval interval = makeRangeInterval(dataObj, typeMatch(dataObj), false);
- BSONObjBuilder bob;
- bob.appendAs(node->getData(), "");
- if (dataElt.isNumber()) {
- bob.appendNumber("", std::numeric_limits<double>::infinity());
- }
- else {
- bob.appendMaxForType("", dataElt.type());
- }
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- Interval interval = makeRangeInterval(dataObj, false, typeMatch(dataObj));
+ // If the operand to LT is equal to the lower bound X, the interval [X, X) is invalid
+ // and should not be added to the bounds.
+ if (!interval.isNull()) {
+ oilOut->intervals.push_back(interval);
+ }
- // If the operand to GT is equal to the upper bound X, the interval (X, X] is invalid
- // and should not be added to the bounds.
- if (!interval.isNull()) {
- oilOut->intervals.push_back(interval);
- }
+ if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ }
+ } else if (MatchExpression::GT == expr->matchType()) {
+ const GTMatchExpression* node = static_cast<const GTMatchExpression*>(expr);
+ BSONElement dataElt = node->getData();
- if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
+ // Everything is > MinKey.
+ if (MinKey == dataElt.type()) {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
}
- else if (MatchExpression::GTE == expr->matchType()) {
- const GTEMatchExpression* node = static_cast<const GTEMatchExpression*>(expr);
- BSONElement dataElt = node->getData();
- // Everything is >= MinKey.
- if (MinKey == dataElt.type()) {
- oilOut->intervals.push_back(allValues());
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ // Nothing is > NaN.
+ if (std::isnan(dataElt.numberDouble())) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
+ }
- // Only NaN is >= NaN.
- if (std::isnan(dataElt.numberDouble())) {
- double nan = dataElt.numberDouble();
- oilOut->intervals.push_back(makePointInterval(nan));
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ BSONObjBuilder bob;
+ bob.appendAs(node->getData(), "");
+ if (dataElt.isNumber()) {
+ bob.appendNumber("", std::numeric_limits<double>::infinity());
+ } else {
+ bob.appendMaxForType("", dataElt.type());
+ }
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ Interval interval = makeRangeInterval(dataObj, false, typeMatch(dataObj));
- BSONObjBuilder bob;
- bob.appendAs(dataElt, "");
- if (dataElt.isNumber()) {
- bob.appendNumber("", std::numeric_limits<double>::infinity());
- }
- else {
- bob.appendMaxForType("", dataElt.type());
- }
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
+ // If the operand to GT is equal to the upper bound X, the interval (X, X] is invalid
+ // and should not be added to the bounds.
+ if (!interval.isNull()) {
+ oilOut->intervals.push_back(interval);
+ }
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, typeMatch(dataObj)));
- if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
+ if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
}
- else if (MatchExpression::REGEX == expr->matchType()) {
- const RegexMatchExpression* rme = static_cast<const RegexMatchExpression*>(expr);
- translateRegex(rme, oilOut, tightnessOut);
+ } else if (MatchExpression::GTE == expr->matchType()) {
+ const GTEMatchExpression* node = static_cast<const GTEMatchExpression*>(expr);
+ BSONElement dataElt = node->getData();
+
+ // Everything is >= MinKey.
+ if (MinKey == dataElt.type()) {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
}
- else if (MatchExpression::MOD == expr->matchType()) {
- BSONObjBuilder bob;
- bob.appendMinForType("", NumberDouble);
- bob.appendMaxForType("", NumberDouble);
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
- *tightnessOut = IndexBoundsBuilder::INEXACT_COVERED;
+
+ // Only NaN is >= NaN.
+ if (std::isnan(dataElt.numberDouble())) {
+ double nan = dataElt.numberDouble();
+ oilOut->intervals.push_back(makePointInterval(nan));
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
}
- else if (MatchExpression::TYPE_OPERATOR == expr->matchType()) {
- const TypeMatchExpression* tme = static_cast<const TypeMatchExpression*>(expr);
- BSONObjBuilder bob;
- bob.appendMinForType("", tme->getData());
- bob.appendMaxForType("", tme->getData());
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+
+ BSONObjBuilder bob;
+ bob.appendAs(dataElt, "");
+ if (dataElt.isNumber()) {
+ bob.appendNumber("", std::numeric_limits<double>::infinity());
+ } else {
+ bob.appendMaxForType("", dataElt.type());
}
- else if (MatchExpression::MATCH_IN == expr->matchType()) {
- const InMatchExpression* ime = static_cast<const InMatchExpression*>(expr);
- const ArrayFilterEntries& afr = ime->getData();
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, typeMatch(dataObj)));
+ if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
*tightnessOut = IndexBoundsBuilder::EXACT;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ }
+ } else if (MatchExpression::REGEX == expr->matchType()) {
+ const RegexMatchExpression* rme = static_cast<const RegexMatchExpression*>(expr);
+ translateRegex(rme, oilOut, tightnessOut);
+ } else if (MatchExpression::MOD == expr->matchType()) {
+ BSONObjBuilder bob;
+ bob.appendMinForType("", NumberDouble);
+ bob.appendMaxForType("", NumberDouble);
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
+ *tightnessOut = IndexBoundsBuilder::INEXACT_COVERED;
+ } else if (MatchExpression::TYPE_OPERATOR == expr->matchType()) {
+ const TypeMatchExpression* tme = static_cast<const TypeMatchExpression*>(expr);
+ BSONObjBuilder bob;
+ bob.appendMinForType("", tme->getData());
+ bob.appendMaxForType("", tme->getData());
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else if (MatchExpression::MATCH_IN == expr->matchType()) {
+ const InMatchExpression* ime = static_cast<const InMatchExpression*>(expr);
+ const ArrayFilterEntries& afr = ime->getData();
- // Create our various intervals.
+ *tightnessOut = IndexBoundsBuilder::EXACT;
- IndexBoundsBuilder::BoundsTightness tightness;
- for (BSONElementSet::iterator it = afr.equalities().begin();
- it != afr.equalities().end(); ++it) {
- translateEquality(*it, isHashed, oilOut, &tightness);
- if (tightness != IndexBoundsBuilder::EXACT) {
- *tightnessOut = tightness;
- }
- }
+ // Create our various intervals.
- for (size_t i = 0; i < afr.numRegexes(); ++i) {
- translateRegex(afr.regex(i), oilOut, &tightness);
- if (tightness != IndexBoundsBuilder::EXACT) {
- *tightnessOut = tightness;
- }
+ IndexBoundsBuilder::BoundsTightness tightness;
+ for (BSONElementSet::iterator it = afr.equalities().begin(); it != afr.equalities().end();
+ ++it) {
+ translateEquality(*it, isHashed, oilOut, &tightness);
+ if (tightness != IndexBoundsBuilder::EXACT) {
+ *tightnessOut = tightness;
}
+ }
- if (afr.hasNull()) {
- // A null index key does not always match a null query value so we must fetch the
- // doc and run a full comparison. See SERVER-4529.
- // TODO: Do we already set the tightnessOut by calling translateEquality?
- *tightnessOut = INEXACT_FETCH;
+ for (size_t i = 0; i < afr.numRegexes(); ++i) {
+ translateRegex(afr.regex(i), oilOut, &tightness);
+ if (tightness != IndexBoundsBuilder::EXACT) {
+ *tightnessOut = tightness;
}
+ }
- if (afr.hasEmptyArray()) {
- // Empty arrays are indexed as undefined.
- BSONObjBuilder undefinedBob;
- undefinedBob.appendUndefined("");
- oilOut->intervals.push_back(makePointInterval(undefinedBob.obj()));
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
+ if (afr.hasNull()) {
+ // A null index key does not always match a null query value so we must fetch the
+ // doc and run a full comparison. See SERVER-4529.
+ // TODO: Do we already set the tightnessOut by calling translateEquality?
+ *tightnessOut = INEXACT_FETCH;
+ }
- unionize(oilOut);
+ if (afr.hasEmptyArray()) {
+ // Empty arrays are indexed as undefined.
+ BSONObjBuilder undefinedBob;
+ undefinedBob.appendUndefined("");
+ oilOut->intervals.push_back(makePointInterval(undefinedBob.obj()));
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
}
- else if (MatchExpression::GEO == expr->matchType()) {
- const GeoMatchExpression* gme = static_cast<const GeoMatchExpression*>(expr);
+ unionize(oilOut);
+ } else if (MatchExpression::GEO == expr->matchType()) {
+ const GeoMatchExpression* gme = static_cast<const GeoMatchExpression*>(expr);
- if (mongoutils::str::equals("2dsphere", elt.valuestrsafe())) {
- verify(gme->getGeoExpression().getGeometry().hasS2Region());
- const S2Region& region = gme->getGeoExpression().getGeometry().getS2Region();
- ExpressionMapping::cover2dsphere(region, index.infoObj, oilOut);
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- else if (mongoutils::str::equals("2d", elt.valuestrsafe())) {
- verify(gme->getGeoExpression().getGeometry().hasR2Region());
- const R2Region& region = gme->getGeoExpression().getGeometry().getR2Region();
+ if (mongoutils::str::equals("2dsphere", elt.valuestrsafe())) {
+ verify(gme->getGeoExpression().getGeometry().hasS2Region());
+ const S2Region& region = gme->getGeoExpression().getGeometry().getS2Region();
+ ExpressionMapping::cover2dsphere(region, index.infoObj, oilOut);
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else if (mongoutils::str::equals("2d", elt.valuestrsafe())) {
+ verify(gme->getGeoExpression().getGeometry().hasR2Region());
+ const R2Region& region = gme->getGeoExpression().getGeometry().getR2Region();
- ExpressionMapping::cover2d(region,
- index.infoObj,
- internalGeoPredicateQuery2DMaxCoveringCells,
- oilOut);
+ ExpressionMapping::cover2d(
+ region, index.infoObj, internalGeoPredicateQuery2DMaxCoveringCells, oilOut);
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- else {
- warning() << "Planner error trying to build geo bounds for " << elt.toString()
- << " index element.";
- verify(0);
- }
- }
- else {
- warning() << "Planner error, trying to build bounds for expression: "
- << expr->toString() << endl;
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else {
+ warning() << "Planner error trying to build geo bounds for " << elt.toString()
+ << " index element.";
verify(0);
}
+ } else {
+ warning() << "Planner error, trying to build bounds for expression: " << expr->toString()
+ << endl;
+ verify(0);
}
-
- // static
- Interval IndexBoundsBuilder::makeRangeInterval(const BSONObj& obj, bool startInclusive,
- bool endInclusive) {
- Interval ret;
- ret._intervalData = obj;
- ret.startInclusive = startInclusive;
- ret.endInclusive = endInclusive;
- BSONObjIterator it(obj);
- verify(it.more());
- ret.start = it.next();
- verify(it.more());
- ret.end = it.next();
- return ret;
- }
-
- // static
- void IndexBoundsBuilder::intersectize(const OrderedIntervalList& arg,
- OrderedIntervalList* oilOut) {
- verify(arg.name == oilOut->name);
-
- size_t argidx = 0;
- const vector<Interval>& argiv = arg.intervals;
-
- size_t ividx = 0;
- vector<Interval>& iv = oilOut->intervals;
-
- vector<Interval> result;
-
- while (argidx < argiv.size() && ividx < iv.size()) {
- Interval::IntervalComparison cmp = argiv[argidx].compare(iv[ividx]);
-
- verify(Interval::INTERVAL_UNKNOWN != cmp);
-
- if (cmp == Interval::INTERVAL_PRECEDES
- || cmp == Interval::INTERVAL_PRECEDES_COULD_UNION) {
- // argiv is before iv. move argiv forward.
+}
+
+// static
+Interval IndexBoundsBuilder::makeRangeInterval(const BSONObj& obj,
+ bool startInclusive,
+ bool endInclusive) {
+ Interval ret;
+ ret._intervalData = obj;
+ ret.startInclusive = startInclusive;
+ ret.endInclusive = endInclusive;
+ BSONObjIterator it(obj);
+ verify(it.more());
+ ret.start = it.next();
+ verify(it.more());
+ ret.end = it.next();
+ return ret;
+}
+
+// static
+void IndexBoundsBuilder::intersectize(const OrderedIntervalList& arg, OrderedIntervalList* oilOut) {
+ verify(arg.name == oilOut->name);
+
+ size_t argidx = 0;
+ const vector<Interval>& argiv = arg.intervals;
+
+ size_t ividx = 0;
+ vector<Interval>& iv = oilOut->intervals;
+
+ vector<Interval> result;
+
+ while (argidx < argiv.size() && ividx < iv.size()) {
+ Interval::IntervalComparison cmp = argiv[argidx].compare(iv[ividx]);
+
+ verify(Interval::INTERVAL_UNKNOWN != cmp);
+
+ if (cmp == Interval::INTERVAL_PRECEDES || cmp == Interval::INTERVAL_PRECEDES_COULD_UNION) {
+ // argiv is before iv. move argiv forward.
+ ++argidx;
+ } else if (cmp == Interval::INTERVAL_SUCCEEDS) {
+ // iv is before argiv. move iv forward.
+ ++ividx;
+ } else {
+ // argiv[argidx] (cmpresults) iv[ividx]
+ Interval newInt = argiv[argidx];
+ newInt.intersect(iv[ividx], cmp);
+ result.push_back(newInt);
+
+ if (Interval::INTERVAL_EQUALS == cmp) {
++argidx;
- }
- else if (cmp == Interval::INTERVAL_SUCCEEDS) {
- // iv is before argiv. move iv forward.
++ividx;
- }
- else {
- // argiv[argidx] (cmpresults) iv[ividx]
- Interval newInt = argiv[argidx];
- newInt.intersect(iv[ividx], cmp);
- result.push_back(newInt);
-
- if (Interval::INTERVAL_EQUALS == cmp) {
- ++argidx;
- ++ividx;
- }
- else if (Interval::INTERVAL_WITHIN == cmp) {
- ++argidx;
- }
- else if (Interval::INTERVAL_CONTAINS == cmp) {
- ++ividx;
- }
- else if (Interval::INTERVAL_OVERLAPS_BEFORE == cmp) {
- ++argidx;
- }
- else if (Interval::INTERVAL_OVERLAPS_AFTER == cmp) {
- ++ividx;
- }
- else {
- verify(0);
- }
+ } else if (Interval::INTERVAL_WITHIN == cmp) {
+ ++argidx;
+ } else if (Interval::INTERVAL_CONTAINS == cmp) {
+ ++ividx;
+ } else if (Interval::INTERVAL_OVERLAPS_BEFORE == cmp) {
+ ++argidx;
+ } else if (Interval::INTERVAL_OVERLAPS_AFTER == cmp) {
+ ++ividx;
+ } else {
+ verify(0);
}
}
-
- oilOut->intervals.swap(result);
}
- // static
- void IndexBoundsBuilder::unionize(OrderedIntervalList* oilOut) {
- vector<Interval>& iv = oilOut->intervals;
-
- // This can happen.
- if (iv.empty()) { return; }
+ oilOut->intervals.swap(result);
+}
- // Step 1: sort.
- std::sort(iv.begin(), iv.end(), IntervalComparison);
+// static
+void IndexBoundsBuilder::unionize(OrderedIntervalList* oilOut) {
+ vector<Interval>& iv = oilOut->intervals;
- // Step 2: Walk through and merge.
- size_t i = 0;
- while (i < iv.size() - 1) {
- // Compare i with i + 1.
- Interval::IntervalComparison cmp = iv[i].compare(iv[i + 1]);
-
- // This means our sort didn't work.
- verify(Interval::INTERVAL_SUCCEEDS != cmp);
-
- // Intervals are correctly ordered.
- if (Interval::INTERVAL_PRECEDES == cmp) {
- // We can move to the next pair.
- ++i;
- }
- else if (Interval::INTERVAL_EQUALS == cmp || Interval::INTERVAL_WITHIN == cmp) {
- // Interval 'i' is equal to i+1, or is contained within i+1.
- // Remove interval i and don't move to the next value of 'i'.
- iv.erase(iv.begin() + i);
- }
- else if (Interval::INTERVAL_CONTAINS == cmp) {
- // Interval 'i' contains i+1, remove i+1 and don't move to the next value of 'i'.
- iv.erase(iv.begin() + i + 1);
- }
- else if (Interval::INTERVAL_OVERLAPS_BEFORE == cmp
- || Interval::INTERVAL_PRECEDES_COULD_UNION == cmp) {
- // We want to merge intervals i and i+1.
- // Interval 'i' starts before interval 'i+1'.
- BSONObjBuilder bob;
- bob.appendAs(iv[i].start, "");
- bob.appendAs(iv[i + 1].end, "");
- BSONObj data = bob.obj();
- bool startInclusive = iv[i].startInclusive;
- bool endInclusive = iv[i + 1].endInclusive;
- iv.erase(iv.begin() + i);
- // iv[i] is now the former iv[i + 1]
- iv[i] = makeRangeInterval(data, startInclusive, endInclusive);
- // Don't increment 'i'.
- }
- }
- }
-
- // static
- Interval IndexBoundsBuilder::makeRangeInterval(const string& start, const string& end,
- bool startInclusive, bool endInclusive) {
- BSONObjBuilder bob;
- bob.append("", start);
- bob.append("", end);
- return makeRangeInterval(bob.obj(), startInclusive, endInclusive);
+ // This can happen.
+ if (iv.empty()) {
+ return;
}
- // static
- Interval IndexBoundsBuilder::makePointInterval(const BSONObj& obj) {
- Interval ret;
- ret._intervalData = obj;
- ret.startInclusive = ret.endInclusive = true;
- ret.start = ret.end = obj.firstElement();
- return ret;
- }
-
- // static
- Interval IndexBoundsBuilder::makePointInterval(const string& str) {
- BSONObjBuilder bob;
- bob.append("", str);
- return makePointInterval(bob.obj());
- }
-
- // static
- Interval IndexBoundsBuilder::makePointInterval(double d) {
- BSONObjBuilder bob;
- bob.append("", d);
- return makePointInterval(bob.obj());
- }
-
- // static
- BSONObj IndexBoundsBuilder::objFromElement(const BSONElement& elt) {
- BSONObjBuilder bob;
- bob.appendAs(elt, "");
- return bob.obj();
- }
-
- // static
- void IndexBoundsBuilder::reverseInterval(Interval* ival) {
- BSONElement tmp = ival->start;
- ival->start = ival->end;
- ival->end = tmp;
-
- bool tmpInc = ival->startInclusive;
- ival->startInclusive = ival->endInclusive;
- ival->endInclusive = tmpInc;
- }
-
- // static
- void IndexBoundsBuilder::translateRegex(const RegexMatchExpression* rme,
- OrderedIntervalList* oilOut, BoundsTightness* tightnessOut) {
-
- const string start = simpleRegex(rme->getString().c_str(), rme->getFlags().c_str(), tightnessOut);
-
- // Note that 'tightnessOut' is set by simpleRegex above.
- if (!start.empty()) {
- string end = start;
- end[end.size() - 1]++;
- oilOut->intervals.push_back(makeRangeInterval(start, end, true, false));
- }
- else {
+ // Step 1: sort.
+ std::sort(iv.begin(), iv.end(), IntervalComparison);
+
+ // Step 2: Walk through and merge.
+ size_t i = 0;
+ while (i < iv.size() - 1) {
+ // Compare i with i + 1.
+ Interval::IntervalComparison cmp = iv[i].compare(iv[i + 1]);
+
+ // This means our sort didn't work.
+ verify(Interval::INTERVAL_SUCCEEDS != cmp);
+
+ // Intervals are correctly ordered.
+ if (Interval::INTERVAL_PRECEDES == cmp) {
+ // We can move to the next pair.
+ ++i;
+ } else if (Interval::INTERVAL_EQUALS == cmp || Interval::INTERVAL_WITHIN == cmp) {
+ // Interval 'i' is equal to i+1, or is contained within i+1.
+ // Remove interval i and don't move to the next value of 'i'.
+ iv.erase(iv.begin() + i);
+ } else if (Interval::INTERVAL_CONTAINS == cmp) {
+ // Interval 'i' contains i+1, remove i+1 and don't move to the next value of 'i'.
+ iv.erase(iv.begin() + i + 1);
+ } else if (Interval::INTERVAL_OVERLAPS_BEFORE == cmp ||
+ Interval::INTERVAL_PRECEDES_COULD_UNION == cmp) {
+ // We want to merge intervals i and i+1.
+ // Interval 'i' starts before interval 'i+1'.
BSONObjBuilder bob;
- bob.appendMinForType("", String);
- bob.appendMaxForType("", String);
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, false));
+ bob.appendAs(iv[i].start, "");
+ bob.appendAs(iv[i + 1].end, "");
+ BSONObj data = bob.obj();
+ bool startInclusive = iv[i].startInclusive;
+ bool endInclusive = iv[i + 1].endInclusive;
+ iv.erase(iv.begin() + i);
+ // iv[i] is now the former iv[i + 1]
+ iv[i] = makeRangeInterval(data, startInclusive, endInclusive);
+ // Don't increment 'i'.
}
-
- // Regexes are after strings.
+ }
+}
+
+// static
+Interval IndexBoundsBuilder::makeRangeInterval(const string& start,
+ const string& end,
+ bool startInclusive,
+ bool endInclusive) {
+ BSONObjBuilder bob;
+ bob.append("", start);
+ bob.append("", end);
+ return makeRangeInterval(bob.obj(), startInclusive, endInclusive);
+}
+
+// static
+Interval IndexBoundsBuilder::makePointInterval(const BSONObj& obj) {
+ Interval ret;
+ ret._intervalData = obj;
+ ret.startInclusive = ret.endInclusive = true;
+ ret.start = ret.end = obj.firstElement();
+ return ret;
+}
+
+// static
+Interval IndexBoundsBuilder::makePointInterval(const string& str) {
+ BSONObjBuilder bob;
+ bob.append("", str);
+ return makePointInterval(bob.obj());
+}
+
+// static
+Interval IndexBoundsBuilder::makePointInterval(double d) {
+ BSONObjBuilder bob;
+ bob.append("", d);
+ return makePointInterval(bob.obj());
+}
+
+// static
+BSONObj IndexBoundsBuilder::objFromElement(const BSONElement& elt) {
+ BSONObjBuilder bob;
+ bob.appendAs(elt, "");
+ return bob.obj();
+}
+
+// static
+void IndexBoundsBuilder::reverseInterval(Interval* ival) {
+ BSONElement tmp = ival->start;
+ ival->start = ival->end;
+ ival->end = tmp;
+
+ bool tmpInc = ival->startInclusive;
+ ival->startInclusive = ival->endInclusive;
+ ival->endInclusive = tmpInc;
+}
+
+// static
+void IndexBoundsBuilder::translateRegex(const RegexMatchExpression* rme,
+ OrderedIntervalList* oilOut,
+ BoundsTightness* tightnessOut) {
+ const string start =
+ simpleRegex(rme->getString().c_str(), rme->getFlags().c_str(), tightnessOut);
+
+ // Note that 'tightnessOut' is set by simpleRegex above.
+ if (!start.empty()) {
+ string end = start;
+ end[end.size() - 1]++;
+ oilOut->intervals.push_back(makeRangeInterval(start, end, true, false));
+ } else {
BSONObjBuilder bob;
- bob.appendRegex("", rme->getString(), rme->getFlags());
- oilOut->intervals.push_back(makePointInterval(bob.obj()));
+ bob.appendMinForType("", String);
+ bob.appendMaxForType("", String);
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, false));
}
- // static
- void IndexBoundsBuilder::translateEquality(const BSONElement& data, bool isHashed,
- OrderedIntervalList* oil, BoundsTightness* tightnessOut) {
- // We have to copy the data out of the parse tree and stuff it into the index
- // bounds. BSONValue will be useful here.
- if (Array != data.type()) {
- BSONObj dataObj;
- if (isHashed) {
- dataObj = ExpressionMapping::hash(data);
- }
- else {
- dataObj = objFromElement(data);
- }
+ // Regexes are after strings.
+ BSONObjBuilder bob;
+ bob.appendRegex("", rme->getString(), rme->getFlags());
+ oilOut->intervals.push_back(makePointInterval(bob.obj()));
+}
- verify(dataObj.isOwned());
- oil->intervals.push_back(makePointInterval(dataObj));
-
- if (dataObj.firstElement().isNull() || isHashed) {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- return;
+// static
+void IndexBoundsBuilder::translateEquality(const BSONElement& data,
+ bool isHashed,
+ OrderedIntervalList* oil,
+ BoundsTightness* tightnessOut) {
+ // We have to copy the data out of the parse tree and stuff it into the index
+ // bounds. BSONValue will be useful here.
+ if (Array != data.type()) {
+ BSONObj dataObj;
+ if (isHashed) {
+ dataObj = ExpressionMapping::hash(data);
+ } else {
+ dataObj = objFromElement(data);
}
- // If we're here, Array == data.type().
- //
- // Using arrays with hashed indices is currently not supported, so we don't have to worry
- // about that case.
- //
- // Arrays are indexed by either:
- //
- // 1. the first element if there is one. Note that using the first is arbitrary; we could
- // just as well use any array element.). If the query is {a: [1, 2, 3]}, for example, then
- // using the bounds [1, 1] for the multikey index will pick up every document containing the
- // array [1, 2, 3].
- //
- // 2. undefined if the array is empty.
- //
- // Also, arrays are indexed by:
- //
- // 3. the full array if it's inside of another array. We check for this so that the query
- // {a: [1, 2, 3]} will match documents like {a: [[1, 2, 3], 4, 5]}.
-
- // Case 3.
- oil->intervals.push_back(makePointInterval(objFromElement(data)));
+ verify(dataObj.isOwned());
+ oil->intervals.push_back(makePointInterval(dataObj));
- if (data.Obj().isEmpty()) {
- // Case 2.
- BSONObjBuilder undefinedBob;
- undefinedBob.appendUndefined("");
- oil->intervals.push_back(makePointInterval(undefinedBob.obj()));
- }
- else {
- // Case 1.
- BSONElement firstEl = data.Obj().firstElement();
- oil->intervals.push_back(makePointInterval(objFromElement(firstEl)));
+ if (dataObj.firstElement().isNull() || isHashed) {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
}
+ return;
+ }
- std::sort(oil->intervals.begin(), oil->intervals.end(), IntervalComparison);
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ // If we're here, Array == data.type().
+ //
+ // Using arrays with hashed indices is currently not supported, so we don't have to worry
+ // about that case.
+ //
+ // Arrays are indexed by either:
+ //
+ // 1. the first element if there is one. Note that using the first is arbitrary; we could
+ // just as well use any array element.). If the query is {a: [1, 2, 3]}, for example, then
+ // using the bounds [1, 1] for the multikey index will pick up every document containing the
+ // array [1, 2, 3].
+ //
+ // 2. undefined if the array is empty.
+ //
+ // Also, arrays are indexed by:
+ //
+ // 3. the full array if it's inside of another array. We check for this so that the query
+ // {a: [1, 2, 3]} will match documents like {a: [[1, 2, 3], 4, 5]}.
+
+ // Case 3.
+ oil->intervals.push_back(makePointInterval(objFromElement(data)));
+
+ if (data.Obj().isEmpty()) {
+ // Case 2.
+ BSONObjBuilder undefinedBob;
+ undefinedBob.appendUndefined("");
+ oil->intervals.push_back(makePointInterval(undefinedBob.obj()));
+ } else {
+ // Case 1.
+ BSONElement firstEl = data.Obj().firstElement();
+ oil->intervals.push_back(makePointInterval(objFromElement(firstEl)));
}
- // static
- void IndexBoundsBuilder::allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds) {
- bounds->fields.resize(keyPattern.nFields());
+ std::sort(oil->intervals.begin(), oil->intervals.end(), IntervalComparison);
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+}
- BSONObjIterator it(keyPattern);
- int field = 0;
- while (it.more()) {
- IndexBoundsBuilder::allValuesForField(it.next(), &bounds->fields[field]);
- ++field;
- }
+// static
+void IndexBoundsBuilder::allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds) {
+ bounds->fields.resize(keyPattern.nFields());
- alignBounds(bounds, keyPattern);
+ BSONObjIterator it(keyPattern);
+ int field = 0;
+ while (it.more()) {
+ IndexBoundsBuilder::allValuesForField(it.next(), &bounds->fields[field]);
+ ++field;
}
- // static
- void IndexBoundsBuilder::alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir) {
- BSONObjIterator it(kp);
- size_t oilIdx = 0;
- while (it.more()) {
- BSONElement elt = it.next();
- int direction = (elt.numberInt() >= 0) ? 1 : -1;
- direction *= scanDir;
- if (-1 == direction) {
- vector<Interval>& iv = bounds->fields[oilIdx].intervals;
- // Step 1: reverse the list.
- std::reverse(iv.begin(), iv.end());
- // Step 2: reverse each interval.
- for (size_t i = 0; i < iv.size(); ++i) {
- iv[i].reverse();
- }
- }
- ++oilIdx;
- }
-
- if (!bounds->isValidFor(kp, scanDir)) {
- log() << "INVALID BOUNDS: " << bounds->toString() << endl
- << "kp = " << kp.toString() << endl
- << "scanDir = " << scanDir << endl;
- verify(0);
- }
+ alignBounds(bounds, keyPattern);
+}
+
+// static
+void IndexBoundsBuilder::alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir) {
+ BSONObjIterator it(kp);
+ size_t oilIdx = 0;
+ while (it.more()) {
+ BSONElement elt = it.next();
+ int direction = (elt.numberInt() >= 0) ? 1 : -1;
+ direction *= scanDir;
+ if (-1 == direction) {
+ vector<Interval>& iv = bounds->fields[oilIdx].intervals;
+ // Step 1: reverse the list.
+ std::reverse(iv.begin(), iv.end());
+ // Step 2: reverse each interval.
+ for (size_t i = 0; i < iv.size(); ++i) {
+ iv[i].reverse();
+ }
+ }
+ ++oilIdx;
}
- // static
- bool IndexBoundsBuilder::isSingleInterval(const IndexBounds& bounds,
- BSONObj* startKey,
- bool* startKeyInclusive,
- BSONObj* endKey,
- bool* endKeyInclusive) {
- // We build our start/end keys as we go.
- BSONObjBuilder startBob;
- BSONObjBuilder endBob;
-
- // The start and end keys are inclusive unless we have a non-point interval, in which case
- // we take the inclusivity from there.
- *startKeyInclusive = true;
- *endKeyInclusive = true;
-
- size_t fieldNo = 0;
-
- // First, we skip over point intervals.
- for (; fieldNo < bounds.fields.size(); ++fieldNo) {
- const OrderedIntervalList& oil = bounds.fields[fieldNo];
- // A point interval requires just one interval...
- if (1 != oil.intervals.size()) {
- break;
- }
- if (!oil.intervals[0].isPoint()) {
- break;
- }
- // Since it's a point, start == end.
- startBob.append(oil.intervals[0].start);
- endBob.append(oil.intervals[0].end);
- }
-
- if (fieldNo >= bounds.fields.size()) {
- // All our intervals are points. We count for all values of one field.
- *startKey = startBob.obj();
- *endKey = endBob.obj();
- return true;
- }
-
- // After point intervals we can have exactly one non-point interval.
- const OrderedIntervalList& nonPoint = bounds.fields[fieldNo];
- if (1 != nonPoint.intervals.size()) {
- return false;
- }
+ if (!bounds->isValidFor(kp, scanDir)) {
+ log() << "INVALID BOUNDS: " << bounds->toString() << endl
+ << "kp = " << kp.toString() << endl
+ << "scanDir = " << scanDir << endl;
+ verify(0);
+ }
+}
+
+// static
+bool IndexBoundsBuilder::isSingleInterval(const IndexBounds& bounds,
+ BSONObj* startKey,
+ bool* startKeyInclusive,
+ BSONObj* endKey,
+ bool* endKeyInclusive) {
+ // We build our start/end keys as we go.
+ BSONObjBuilder startBob;
+ BSONObjBuilder endBob;
+
+ // The start and end keys are inclusive unless we have a non-point interval, in which case
+ // we take the inclusivity from there.
+ *startKeyInclusive = true;
+ *endKeyInclusive = true;
+
+ size_t fieldNo = 0;
+
+ // First, we skip over point intervals.
+ for (; fieldNo < bounds.fields.size(); ++fieldNo) {
+ const OrderedIntervalList& oil = bounds.fields[fieldNo];
+ // A point interval requires just one interval...
+ if (1 != oil.intervals.size()) {
+ break;
+ }
+ if (!oil.intervals[0].isPoint()) {
+ break;
+ }
+ // Since it's a point, start == end.
+ startBob.append(oil.intervals[0].start);
+ endBob.append(oil.intervals[0].end);
+ }
- // Add the non-point interval to our builder and set the inclusivity from it.
- startBob.append(nonPoint.intervals[0].start);
- *startKeyInclusive = nonPoint.intervals[0].startInclusive;
- endBob.append(nonPoint.intervals[0].end);
- *endKeyInclusive = nonPoint.intervals[0].endInclusive;
-
- ++fieldNo;
-
- // Get some "all values" intervals for comparison's sake.
- // TODO: make static?
- Interval minMax = IndexBoundsBuilder::allValues();
- Interval maxMin = minMax;
- maxMin.reverse();
-
- // And after the non-point interval we can have any number of "all values" intervals.
- for (; fieldNo < bounds.fields.size(); ++fieldNo) {
- const OrderedIntervalList& oil = bounds.fields[fieldNo];
- // "All Values" is just one point.
- if (1 != oil.intervals.size()) {
- break;
- }
+ if (fieldNo >= bounds.fields.size()) {
+ // All our intervals are points. We count for all values of one field.
+ *startKey = startBob.obj();
+ *endKey = endBob.obj();
+ return true;
+ }
- // Must be min->max or max->min.
- if (oil.intervals[0].equals(minMax)) {
- // As an example for the logic below, consider the index {a:1, b:1} and a count for
- // {a: {$gt: 2}}. Our start key isn't inclusive (as it's $gt: 2) and looks like
- // {"":2} so far. If we move to the key greater than {"":2, "": MaxKey} we will get
- // the first value of 'a' that is greater than 2.
- if (!*startKeyInclusive) {
- startBob.appendMaxKey("");
- }
- else {
- // In this case, consider the index {a:1, b:1} and a count for {a:{$gte: 2}}.
- // We want to look at all values where a is 2, so our start key is {"":2,
- // "":MinKey}.
- startBob.appendMinKey("");
- }
+ // After point intervals we can have exactly one non-point interval.
+ const OrderedIntervalList& nonPoint = bounds.fields[fieldNo];
+ if (1 != nonPoint.intervals.size()) {
+ return false;
+ }
- // Same deal as above. Consider the index {a:1, b:1} and a count for {a: {$lt: 2}}.
- // Our end key isn't inclusive as ($lt: 2) and looks like {"":2} so far. We can't
- // look at any values where a is 2 so we have to stop at {"":2, "": MinKey} as
- // that's the smallest key where a is still 2.
- if (!*endKeyInclusive) {
- endBob.appendMinKey("");
- }
- else {
- endBob.appendMaxKey("");
- }
- }
- else if (oil.intervals[0].equals(maxMin)) {
- // The reasoning here is the same as above but with the directions reversed.
- if (!*startKeyInclusive) {
- startBob.appendMinKey("");
- }
- else {
- startBob.appendMaxKey("");
- }
- if (!*endKeyInclusive) {
- endBob.appendMaxKey("");
- }
- else {
- endBob.appendMinKey("");
- }
- }
- else {
- // No dice.
- break;
- }
+ // Add the non-point interval to our builder and set the inclusivity from it.
+ startBob.append(nonPoint.intervals[0].start);
+ *startKeyInclusive = nonPoint.intervals[0].startInclusive;
+ endBob.append(nonPoint.intervals[0].end);
+ *endKeyInclusive = nonPoint.intervals[0].endInclusive;
+
+ ++fieldNo;
+
+ // Get some "all values" intervals for comparison's sake.
+ // TODO: make static?
+ Interval minMax = IndexBoundsBuilder::allValues();
+ Interval maxMin = minMax;
+ maxMin.reverse();
+
+ // And after the non-point interval we can have any number of "all values" intervals.
+ for (; fieldNo < bounds.fields.size(); ++fieldNo) {
+ const OrderedIntervalList& oil = bounds.fields[fieldNo];
+ // "All Values" is just one point.
+ if (1 != oil.intervals.size()) {
+ break;
+ }
+
+ // Must be min->max or max->min.
+ if (oil.intervals[0].equals(minMax)) {
+ // As an example for the logic below, consider the index {a:1, b:1} and a count for
+ // {a: {$gt: 2}}. Our start key isn't inclusive (as it's $gt: 2) and looks like
+ // {"":2} so far. If we move to the key greater than {"":2, "": MaxKey} we will get
+ // the first value of 'a' that is greater than 2.
+ if (!*startKeyInclusive) {
+ startBob.appendMaxKey("");
+ } else {
+ // In this case, consider the index {a:1, b:1} and a count for {a:{$gte: 2}}.
+ // We want to look at all values where a is 2, so our start key is {"":2,
+ // "":MinKey}.
+ startBob.appendMinKey("");
+ }
+
+ // Same deal as above. Consider the index {a:1, b:1} and a count for {a: {$lt: 2}}.
+ // Our end key isn't inclusive as ($lt: 2) and looks like {"":2} so far. We can't
+ // look at any values where a is 2 so we have to stop at {"":2, "": MinKey} as
+ // that's the smallest key where a is still 2.
+ if (!*endKeyInclusive) {
+ endBob.appendMinKey("");
+ } else {
+ endBob.appendMaxKey("");
+ }
+ } else if (oil.intervals[0].equals(maxMin)) {
+ // The reasoning here is the same as above but with the directions reversed.
+ if (!*startKeyInclusive) {
+ startBob.appendMinKey("");
+ } else {
+ startBob.appendMaxKey("");
+ }
+ if (!*endKeyInclusive) {
+ endBob.appendMaxKey("");
+ } else {
+ endBob.appendMinKey("");
+ }
+ } else {
+ // No dice.
+ break;
}
+ }
- if (fieldNo >= bounds.fields.size()) {
- *startKey = startBob.obj();
- *endKey = endBob.obj();
- return true;
- }
- else {
- return false;
- }
+ if (fieldNo >= bounds.fields.size()) {
+ *startKey = startBob.obj();
+ *endKey = endBob.obj();
+ return true;
+ } else {
+ return false;
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/index_bounds_builder.h b/src/mongo/db/query/index_bounds_builder.h
index 094e8a4dbc1..785f78feb6d 100644
--- a/src/mongo/db/query/index_bounds_builder.h
+++ b/src/mongo/db/query/index_bounds_builder.h
@@ -36,161 +36,158 @@
namespace mongo {
+/**
+ * Translates expressions over fields into bounds on an index.
+ */
+class IndexBoundsBuilder {
+public:
+ /**
+ * Describes various degrees of precision with which predicates can be evaluated based
+ * on the index bounds.
+ *
+ * The integer values of the enum are significant, and are assigned in order of
+ * increasing tightness. These values are used when we need to do comparison between two
+ * BoundsTightness values. Such comparisons can answer questions such as "Does predicate
+ * X have tighter or looser bounds than predicate Y?".
+ */
+ enum BoundsTightness {
+ // Index bounds are inexact, and a fetch is required.
+ INEXACT_FETCH = 0,
+
+ // Index bounds are inexact, but no fetch is required
+ INEXACT_COVERED = 1,
+
+ // Index bounds are exact.
+ EXACT = 2
+ };
+
+ /**
+ * Populate the provided O.I.L. with one interval goes from MinKey to MaxKey (or vice-versa
+ * depending on the index direction).
+ */
+ static void allValuesForField(const BSONElement& elt, OrderedIntervalList* out);
+
/**
- * Translates expressions over fields into bounds on an index.
+ * Turn the MatchExpression in 'expr' into a set of index bounds. The field that 'expr' is
+ * concerned with is indexed according to the keypattern element 'elt' from index 'index'.
+ *
+ * If 'expr' is elemMatch, the index tag is affixed to a child.
+ *
+ * The expression must be a predicate over one field. That is, expr->isLeaf() or
+ * expr->isArray() must be true, and expr->isLogical() must be false.
*/
- class IndexBoundsBuilder {
- public:
- /**
- * Describes various degrees of precision with which predicates can be evaluated based
- * on the index bounds.
- *
- * The integer values of the enum are significant, and are assigned in order of
- * increasing tightness. These values are used when we need to do comparison between two
- * BoundsTightness values. Such comparisons can answer questions such as "Does predicate
- * X have tighter or looser bounds than predicate Y?".
- */
- enum BoundsTightness {
- // Index bounds are inexact, and a fetch is required.
- INEXACT_FETCH = 0,
-
- // Index bounds are inexact, but no fetch is required
- INEXACT_COVERED = 1,
-
- // Index bounds are exact.
- EXACT = 2
- };
-
- /**
- * Populate the provided O.I.L. with one interval goes from MinKey to MaxKey (or vice-versa
- * depending on the index direction).
- */
- static void allValuesForField(const BSONElement& elt, OrderedIntervalList* out);
-
- /**
- * Turn the MatchExpression in 'expr' into a set of index bounds. The field that 'expr' is
- * concerned with is indexed according to the keypattern element 'elt' from index 'index'.
- *
- * If 'expr' is elemMatch, the index tag is affixed to a child.
- *
- * The expression must be a predicate over one field. That is, expr->isLeaf() or
- * expr->isArray() must be true, and expr->isLogical() must be false.
- */
- static void translate(const MatchExpression* expr,
- const BSONElement& elt,
- const IndexEntry& index,
- OrderedIntervalList* oilOut,
- BoundsTightness* tightnessOut);
-
- /**
- * Creates bounds for 'expr' (indexed according to 'elt'). Intersects those bounds
- * with the bounds in oilOut, which is an in/out parameter.
- */
- static void translateAndIntersect(const MatchExpression* expr,
- const BSONElement& elt,
- const IndexEntry& index,
- OrderedIntervalList* oilOut,
- BoundsTightness* tightnessOut);
-
- /**
- * Creates bounds for 'expr' (indexed according to 'elt'). Unions those bounds
- * with the bounds in oilOut, which is an in/out parameter.
- */
- static void translateAndUnion(const MatchExpression* expr,
+ static void translate(const MatchExpression* expr,
+ const BSONElement& elt,
+ const IndexEntry& index,
+ OrderedIntervalList* oilOut,
+ BoundsTightness* tightnessOut);
+
+ /**
+ * Creates bounds for 'expr' (indexed according to 'elt'). Intersects those bounds
+ * with the bounds in oilOut, which is an in/out parameter.
+ */
+ static void translateAndIntersect(const MatchExpression* expr,
const BSONElement& elt,
const IndexEntry& index,
OrderedIntervalList* oilOut,
BoundsTightness* tightnessOut);
- /**
- * Make a range interval from the provided object.
- * The object must have exactly two fields. The first field is the start, the second the
- * end.
- * The two inclusive flags indicate whether or not the start/end fields are included in the
- * interval (closed interval if included, open if not).
- */
- static Interval makeRangeInterval(const BSONObj& obj,
- bool startInclusive,
- bool endInclusive);
-
- static Interval makeRangeInterval(const std::string& start,
- const std::string& end,
- bool startInclusive,
- bool endInclusive);
-
- /**
- * Make a point interval from the provided object.
- * The object must have exactly one field which is the value of the point interval.
- */
- static Interval makePointInterval(const BSONObj& obj);
- static Interval makePointInterval(const std::string& str);
- static Interval makePointInterval(double d);
-
- /**
- * Since we have no BSONValue we must make an object that's a copy of a piece of another
- * object.
- */
- static BSONObj objFromElement(const BSONElement& elt);
-
- /**
- * Swap start/end in the provided interval.
- */
- static void reverseInterval(Interval* ival);
-
- /**
- * Copied almost verbatim from db/queryutil.cpp.
- *
- * returns a std::string that when used as a matcher, would match a super set of regex()
- *
- * returns "" for complex regular expressions
- *
- * used to optimize queries in some simple regex cases that start with '^'
- */
- static std::string simpleRegex(const char* regex,
- const char* flags,
+ /**
+ * Creates bounds for 'expr' (indexed according to 'elt'). Unions those bounds
+ * with the bounds in oilOut, which is an in/out parameter.
+ */
+ static void translateAndUnion(const MatchExpression* expr,
+ const BSONElement& elt,
+ const IndexEntry& index,
+ OrderedIntervalList* oilOut,
BoundsTightness* tightnessOut);
- /**
- * Returns an Interval from minKey to maxKey
- */
- static Interval allValues();
+ /**
+ * Make a range interval from the provided object.
+ * The object must have exactly two fields. The first field is the start, the second the
+ * end.
+ * The two inclusive flags indicate whether or not the start/end fields are included in the
+ * interval (closed interval if included, open if not).
+ */
+ static Interval makeRangeInterval(const BSONObj& obj, bool startInclusive, bool endInclusive);
+
+ static Interval makeRangeInterval(const std::string& start,
+ const std::string& end,
+ bool startInclusive,
+ bool endInclusive);
+
+ /**
+ * Make a point interval from the provided object.
+ * The object must have exactly one field which is the value of the point interval.
+ */
+ static Interval makePointInterval(const BSONObj& obj);
+ static Interval makePointInterval(const std::string& str);
+ static Interval makePointInterval(double d);
+
+ /**
+ * Since we have no BSONValue we must make an object that's a copy of a piece of another
+ * object.
+ */
+ static BSONObj objFromElement(const BSONElement& elt);
+
+ /**
+ * Swap start/end in the provided interval.
+ */
+ static void reverseInterval(Interval* ival);
- static void translateRegex(const RegexMatchExpression* rme,
- OrderedIntervalList* oil,
+ /**
+ * Copied almost verbatim from db/queryutil.cpp.
+ *
+ * returns a std::string that when used as a matcher, would match a super set of regex()
+ *
+ * returns "" for complex regular expressions
+ *
+ * used to optimize queries in some simple regex cases that start with '^'
+ */
+ static std::string simpleRegex(const char* regex,
+ const char* flags,
BoundsTightness* tightnessOut);
- static void translateEquality(const BSONElement& data,
- bool isHashed,
- OrderedIntervalList* oil,
- BoundsTightness* tightnessOut);
+ /**
+ * Returns an Interval from minKey to maxKey
+ */
+ static Interval allValues();
- static void unionize(OrderedIntervalList* oilOut);
- static void intersectize(const OrderedIntervalList& arg,
- OrderedIntervalList* oilOut);
-
- /**
- * Fills out 'bounds' with the bounds for an index scan over all values of the
- * index described by 'keyPattern' in the default forward direction.
- */
- static void allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds);
-
- /**
- * Assumes each OIL in 'bounds' is increasing.
- *
- * Aligns OILs (and bounds) according to the 'kp' direction * the scanDir.
- */
- static void alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir = 1);
-
- /**
- * Returns 'true' if the bounds 'bounds' can be represented as one interval between
- * 'startKey' and 'endKey'. Inclusivity of each bound is set through the relevant
- * (name)KeyInclusive parameter. Returns 'false' if otherwise.
- */
- static bool isSingleInterval(const IndexBounds& bounds,
- BSONObj* startKey,
- bool* startKeyInclusive,
- BSONObj* endKey,
- bool* endKeyInclusive);
- };
+ static void translateRegex(const RegexMatchExpression* rme,
+ OrderedIntervalList* oil,
+ BoundsTightness* tightnessOut);
+
+ static void translateEquality(const BSONElement& data,
+ bool isHashed,
+ OrderedIntervalList* oil,
+ BoundsTightness* tightnessOut);
+
+ static void unionize(OrderedIntervalList* oilOut);
+ static void intersectize(const OrderedIntervalList& arg, OrderedIntervalList* oilOut);
+
+ /**
+ * Fills out 'bounds' with the bounds for an index scan over all values of the
+ * index described by 'keyPattern' in the default forward direction.
+ */
+ static void allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds);
+
+ /**
+ * Assumes each OIL in 'bounds' is increasing.
+ *
+ * Aligns OILs (and bounds) according to the 'kp' direction * the scanDir.
+ */
+ static void alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir = 1);
+
+ /**
+ * Returns 'true' if the bounds 'bounds' can be represented as one interval between
+ * 'startKey' and 'endKey'. Inclusivity of each bound is set through the relevant
+ * (name)KeyInclusive parameter. Returns 'false' if otherwise.
+ */
+ static bool isSingleInterval(const IndexBounds& bounds,
+ BSONObj* startKey,
+ bool* startKeyInclusive,
+ BSONObj* endKey,
+ bool* endKeyInclusive);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/index_bounds_builder_test.cpp b/src/mongo/db/query/index_bounds_builder_test.cpp
index 1b349b0515a..cf8129c016d 100644
--- a/src/mongo/db/query/index_bounds_builder_test.cpp
+++ b/src/mongo/db/query/index_bounds_builder_test.cpp
@@ -42,1353 +42,1373 @@ using namespace mongo;
namespace {
- using std::unique_ptr;
- using std::numeric_limits;
- using std::string;
- using std::vector;
-
- double numberMin = -numeric_limits<double>::max();
- double numberMax = numeric_limits<double>::max();
- double negativeInfinity = -numeric_limits<double>::infinity();
- double positiveInfinity = numeric_limits<double>::infinity();
-
- /**
- * Utility function to create MatchExpression
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- ASSERT_TRUE(status.isOK());
- MatchExpression* expr(status.getValue());
- return expr;
- }
+using std::unique_ptr;
+using std::numeric_limits;
+using std::string;
+using std::vector;
- /**
- * Given a list of queries in 'toUnion', translate into index bounds and return
- * the union of these bounds in the out-parameter 'oilOut'.
- */
- void testTranslateAndUnion(const vector<BSONObj>& toUnion, OrderedIntervalList* oilOut,
- IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+double numberMin = -numeric_limits<double>::max();
+double numberMax = numeric_limits<double>::max();
+double negativeInfinity = -numeric_limits<double>::infinity();
+double positiveInfinity = numeric_limits<double>::infinity();
- IndexEntry testIndex = IndexEntry(BSONObj());
-
- for (vector<BSONObj>::const_iterator it = toUnion.begin();
- it != toUnion.end();
- ++it) {
- unique_ptr<MatchExpression> expr(parseMatchExpression(*it));
- BSONElement elt = it->firstElement();
- if (toUnion.begin() == it) {
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- else {
- IndexBoundsBuilder::translateAndUnion(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- }
- }
+/**
+ * Utility function to create MatchExpression
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ ASSERT_TRUE(status.isOK());
+ MatchExpression* expr(status.getValue());
+ return expr;
+}
- /**
- * Given a list of queries in 'toUnion', translate into index bounds and return
- * the intersection of these bounds in the out-parameter 'oilOut'.
- */
- void testTranslateAndIntersect(const vector<BSONObj>& toIntersect, OrderedIntervalList* oilOut,
- IndexBoundsBuilder::BoundsTightness* tightnessOut) {
-
- IndexEntry testIndex = IndexEntry(BSONObj());
-
- for (vector<BSONObj>::const_iterator it = toIntersect.begin();
- it != toIntersect.end();
- ++it) {
- unique_ptr<MatchExpression> expr(parseMatchExpression(*it));
- BSONElement elt = it->firstElement();
- if (toIntersect.begin() == it) {
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- else {
- IndexBoundsBuilder::translateAndIntersect(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
+/**
+ * Given a list of queries in 'toUnion', translate into index bounds and return
+ * the union of these bounds in the out-parameter 'oilOut'.
+ */
+void testTranslateAndUnion(const vector<BSONObj>& toUnion,
+ OrderedIntervalList* oilOut,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+
+ for (vector<BSONObj>::const_iterator it = toUnion.begin(); it != toUnion.end(); ++it) {
+ unique_ptr<MatchExpression> expr(parseMatchExpression(*it));
+ BSONElement elt = it->firstElement();
+ if (toUnion.begin() == it) {
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
+ } else {
+ IndexBoundsBuilder::translateAndUnion(expr.get(), elt, testIndex, oilOut, tightnessOut);
}
}
+}
- /**
- * 'constraints' is a vector of BSONObj's representing match expressions, where
- * each filter is paired with a boolean. If the boolean is true, then the filter's
- * index bounds should be intersected with the other constraints; if false, then
- * they should be unioned. The resulting bounds are returned in the
- * out-parameter 'oilOut'.
- */
- void testTranslate(const vector< std::pair<BSONObj, bool> >& constraints,
- OrderedIntervalList* oilOut,
- IndexBoundsBuilder::BoundsTightness* tightnessOut) {
-
- IndexEntry testIndex = IndexEntry(BSONObj());
-
- for (vector< std::pair<BSONObj, bool> >::const_iterator it = constraints.begin();
- it != constraints.end();
- ++it) {
- BSONObj obj = it->first;
- bool isIntersect = it->second;
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- if (constraints.begin() == it) {
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- else if (isIntersect) {
- IndexBoundsBuilder::translateAndIntersect(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- else {
- IndexBoundsBuilder::translateAndUnion(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
+/**
+ * Given a list of queries in 'toUnion', translate into index bounds and return
+ * the intersection of these bounds in the out-parameter 'oilOut'.
+ */
+void testTranslateAndIntersect(const vector<BSONObj>& toIntersect,
+ OrderedIntervalList* oilOut,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+
+ for (vector<BSONObj>::const_iterator it = toIntersect.begin(); it != toIntersect.end(); ++it) {
+ unique_ptr<MatchExpression> expr(parseMatchExpression(*it));
+ BSONElement elt = it->firstElement();
+ if (toIntersect.begin() == it) {
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
+ } else {
+ IndexBoundsBuilder::translateAndIntersect(
+ expr.get(), elt, testIndex, oilOut, tightnessOut);
}
}
+}
- /**
- * run isSingleInterval and return the result to calling test.
- */
- bool testSingleInterval(IndexBounds bounds) {
- BSONObj startKey;
- bool startKeyIn;
- BSONObj endKey;
- bool endKeyIn;
- return IndexBoundsBuilder::isSingleInterval( bounds,
- &startKey,
- &startKeyIn,
- &endKey,
- &endKeyIn );
- }
-
- //
- // $elemMatch value
- // Example: {a: {$elemMatch: {$gt: 2}}}
- //
-
- TEST(IndexBoundsBuilderTest, TranslateElemMatchValue) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- // Bounds generated should be the same as the embedded expression
- // except for the tightness.
- BSONObj obj = fromjson("{a: {$elemMatch: {$gt: 2}}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 2, '': Infinity}"), false, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- //
- // Comparison operators ($lte, $lt, $gt, $gte, $eq)
- //
-
- TEST(IndexBoundsBuilderTest, TranslateLteNumber) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lte: 1}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': 1}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLteNumberMin) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$lte" << numberMin));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << negativeInfinity << "" << numberMin), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLteNegativeInfinity) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lte: -Infinity}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': -Infinity}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtNumber) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lt: 1}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': 1}"), true, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtNumberMin) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$lt" << numberMin));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << negativeInfinity << "" << numberMin), true, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtNegativeInfinity) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lt: -Infinity}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtDate) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << LT << Date_t::fromMillisSinceEpoch(5000));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': true, '': new Date(5000)}"), false, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtNumber) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: 1}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': Infinity}"), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtNumberMax) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$gt" << numberMax));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << numberMax << "" << positiveInfinity), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtPositiveInfinity) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: Infinity}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGteNumber) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gte: 1}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': Infinity}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGteNumberMax) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$gte" << numberMax));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << numberMax << "" << positiveInfinity), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtePositiveInfinity) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gte: Infinity}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': Infinity, '': Infinity}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtString) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: 'abc'}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 'abc', '': {}}"), false, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateEqualNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: NaN}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lt: NaN}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLteNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lte: NaN}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: NaN}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGteNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gte: NaN}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateEqual) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << 4);
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 4, '': 4}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateArrayEqualBasic) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: [1, 2, 3]}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': [1, 2, 3], '': [1, 2, 3]}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateIn) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$in: [8, 44, -1, -3]}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 4U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -3, '': -3}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': -1, '': -1}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[2].compare(
- Interval(fromjson("{'': 8, '': 8}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[3].compare(
- Interval(fromjson("{'': 44, '': 44}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateInArray) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$in: [[1], 2]}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 3U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': 2, '': 2}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[2].compare(
- Interval(fromjson("{'': [1], '': [1]}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLteBinData) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lte: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA',"
- "$type: '00'}}}");
- std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQ(oil.name, "a");
- ASSERT_EQ(oil.intervals.size(), 1U);
- ASSERT_EQ(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': {$binary: '', $type: '00'},"
- "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
- true, true)));
- ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtBinData) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lt: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA',"
- "$type: '00'}}}");
- std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQ(oil.name, "a");
- ASSERT_EQ(oil.intervals.size(), 1U);
- ASSERT_EQ(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': {$binary: '', $type: '00'},"
- "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
- true, false)));
- ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtBinData) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: {$binary: '////////////////////////////',"
- "$type: '00'}}}");
- std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQ(oil.name, "a");
- ASSERT_EQ(oil.intervals.size(), 1U);
- ASSERT_EQ(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': {$binary: '////////////////////////////', $type: '00'},"
- "'': ObjectId('000000000000000000000000')}"),
- false, false)));
- ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGteBinData) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gte: {$binary: '////////////////////////////',"
- "$type: '00'}}}");
- std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQ(oil.name, "a");
- ASSERT_EQ(oil.intervals.size(), 1U);
- ASSERT_EQ(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': {$binary: '////////////////////////////', $type: '00'},"
- "'': ObjectId('000000000000000000000000')}"),
- true, false)));
- ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
- }
-
- //
- // $exists tests
- //
-
- TEST(IndexBoundsBuilderTest, ExistsTrue) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$exists: true}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
- oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- TEST(IndexBoundsBuilderTest, ExistsFalse) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$exists: false}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': null, '': null}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- TEST(IndexBoundsBuilderTest, ExistsTrueSparse) {
- IndexEntry testIndex = IndexEntry(BSONObj(),
- false, // multikey
- true, // sparse
- false, // unique
- "exists_true_sparse",
- nullptr, // filterExpr
- BSONObj());
- BSONObj obj = fromjson("{a: {$exists: true}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
- oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- //
- // Union tests
- //
-
- TEST(IndexBoundsBuilderTest, UnionTwoLt) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toUnion;
- toUnion.push_back(fromjson("{a: {$lt: 1}}"));
- toUnion.push_back(fromjson("{a: {$lt: 5}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndUnion(toUnion, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': 5}"), true, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, UnionDupEq) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toUnion;
- toUnion.push_back(fromjson("{a: 1}"));
- toUnion.push_back(fromjson("{a: 5}"));
- toUnion.push_back(fromjson("{a: 1}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndUnion(toUnion, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': 5, '': 5}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, UnionGtLt) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toUnion;
- toUnion.push_back(fromjson("{a: {$gt: 1}}"));
- toUnion.push_back(fromjson("{a: {$lt: 3}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndUnion(toUnion, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': Infinity}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, UnionTwoEmptyRanges) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector< std::pair<BSONObj, bool> > constraints;
- constraints.push_back(std::make_pair(fromjson("{a: {$gt: 1}}"), true));
- constraints.push_back(std::make_pair(fromjson("{a: {$lte: 0}}"), true));
- constraints.push_back(std::make_pair(fromjson("{a: {$in:[]}}"), false));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslate(constraints, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- }
-
- //
- // Intersection tests
- //
-
- TEST(IndexBoundsBuilderTest, IntersectTwoLt) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$lt: 1}}"));
- toIntersect.push_back(fromjson("{a: {$lt: 5}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': 1}"), true, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectEqGte) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: 1}}"));
- toIntersect.push_back(fromjson("{a: {$gte: 1}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectGtLte) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gt: 0}}"));
- toIntersect.push_back(fromjson("{a: {$lte: 10}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 0, '': 10}"), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectGtIn) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gt: 4}}"));
- toIntersect.push_back(fromjson("{a: {$in: [1,2,3,4,5,6]}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 5, '': 5}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': 6, '': 6}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectionIsPointInterval) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gte: 1}}"));
- toIntersect.push_back(fromjson("{a: {$lte: 1}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectFullyContained) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gt: 5}}"));
- toIntersect.push_back(fromjson("{a: {$lt: 15}}"));
- toIntersect.push_back(fromjson("{a: {$gte: 6}}"));
- toIntersect.push_back(fromjson("{a: {$lte: 13}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 6, '': 13}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, EmptyIntersection) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: 1}}"));
- toIntersect.push_back(fromjson("{a: {$gte: 2}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- }
-
- //
- // $mod
- //
-
- TEST(IndexBoundsBuilderTest, TranslateMod) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$mod: [2, 0]}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << numberMin << "" << numberMax), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- //
- // Test simpleRegex
- //
-
- TEST(SimpleRegexTest, RootedLine) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^foo", "", &tightness);
- ASSERT_EQUALS(prefix, "foo");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedString) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "", &tightness);
- ASSERT_EQUALS(prefix, "foo");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedOptionalFirstChar) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^f?oo", "", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedOptionalSecondChar) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^fz?oo", "", &tightness);
- ASSERT_EQUALS(prefix, "f");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedMultiline) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^foo", "m", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedStringMultiline) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "m", &tightness);
- ASSERT_EQUALS(prefix, "foo");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedCaseInsensitiveMulti) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "mi", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedComplex) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "\\Af \t\vo\n\ro \\ \\# #comment", "mx", &tightness);
- ASSERT_EQUALS(prefix, "foo #");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedLiteral) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qasdf\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "asdf");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralWithExtra) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qasdf\\E.*", "", &tightness);
- ASSERT_EQUALS(prefix, "asdf");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedLiteralNoEnd) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qasdf", "", &tightness);
- ASSERT_EQUALS(prefix, "asdf");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralBackslash) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qasdf\\\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "asdf\\");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralDotStar) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qas.*df\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "as.*df");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralNestedEscape) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qas\\Q[df\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "as\\Q[df");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralNestedEscapeEnd) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qas\\E\\\\E\\Q$df\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "as\\E$df");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- // A regular expression with the "|" character is not considered simple. See SERVER-15235.
- TEST(SimpleRegexTest, PipeCharacterDisallowed) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^(a(a|$)|b", "", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, PipeCharacterDisallowed2) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^(a(a|$)|^b", "", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- // SERVER-9035
- TEST(SimpleRegexTest, RootedSingleLineMode) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^foo", "s", &tightness);
- ASSERT_EQUALS(prefix, "foo");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- // SERVER-9035
- TEST(SimpleRegexTest, NonRootedSingleLineMode) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("foo", "s", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- // SERVER-9035
- TEST(SimpleRegexTest, RootedComplexSingleLineMode) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "\\Af \t\vo\n\ro \\ \\# #comment", "msx", &tightness);
- ASSERT_EQUALS(prefix, "foo #");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- //
- // Regex bounds
- //
-
- TEST(IndexBoundsBuilderTest, SimpleNonPrefixRegex) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: /foo/}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': '', '': {}}"), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': /foo/, '': /foo/}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(IndexBoundsBuilderTest, NonSimpleRegexWithPipe) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: /^foo.*|bar/}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': '', '': {}}"), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': /^foo.*|bar/, '': /^foo.*|bar/}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(IndexBoundsBuilderTest, SimpleRegexSingleLineMode) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: /^foo/s}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 'foo', '': 'fop'}"), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': /^foo/s, '': /^foo/s}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, SimplePrefixRegex) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: /^foo/}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 'foo', '': 'fop'}"), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': /^foo/, '': /^foo/}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::EXACT);
- }
-
- //
- // isSingleInterval
- //
-
- TEST(IndexBoundsBuilderTest, SingleFieldEqualityInterval) {
- // Equality on a single field is a single interval.
- OrderedIntervalList oil("a");
- IndexBounds bounds;
- oil.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
- bounds.fields.push_back(oil);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, SingleIntervalSingleFieldInterval) {
- // Single interval on a single field is a single interval.
- OrderedIntervalList oil("a");
- IndexBounds bounds;
- oil.intervals.push_back(Interval(fromjson("{ '':5, '':Infinity }"), true, true));
- bounds.fields.push_back(oil);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, MultipleIntervalsSingleFieldInterval) {
- // Multiple intervals on a single field is not a single interval.
- OrderedIntervalList oil("a");
- IndexBounds bounds;
- oil.intervals.push_back(Interval(fromjson( "{ '':4, '':5 }" ), true, true));
- oil.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil);
- ASSERT(!testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualityTwoFieldsInterval) {
- // Equality on two fields is a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
- oil_b.intervals.push_back(Interval(BSON("" << 6 << "" << 6), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualityFirstFieldSingleIntervalSecondFieldInterval) {
- // Equality on first field and single interval on second field
- // is a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':6, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, SingleIntervalFirstAndSecondFieldsInterval) {
- // Single interval on first field and single interval on second field is
- // not a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(fromjson( "{ '':-Infinity, '':5 }" ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':6, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(!testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, MultipleIntervalsTwoFieldsInterval) {
- // Multiple intervals on two fields is not a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 4 << "" << 4 ), true, true));
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(BSON( "" << 7 << "" << 7 ), true, true));
- oil_b.intervals.push_back(Interval(BSON( "" << 8 << "" << 8 ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(!testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, MissingSecondFieldInterval) {
- // when second field is not specified, still a compound single interval
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(IndexBoundsBuilder::allValues());
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualityTwoFieldsIntervalThirdInterval) {
- // Equality on first two fields and single interval on third is a
- // compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(BSON( "" << 6 << "" << 6 ), true, true));
- oil_c.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualitySingleIntervalMissingInterval) {
- // Equality, then Single Interval, then missing is a compound single interval
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualitySingleMissingMissingInterval) {
- // Equality, then single interval, then missing, then missing,
- // is a compound single interval
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- OrderedIntervalList oil_d("d");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
- oil_d.intervals.push_back(IndexBoundsBuilder::allValues());
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- bounds.fields.push_back(oil_d);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualitySingleMissingMissingMixedInterval) {
- // Equality, then single interval, then missing, then missing, with mixed order
- // fields is a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- OrderedIntervalList oil_d("d");
- IndexBounds bounds;
- Interval allValues = IndexBoundsBuilder::allValues();
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- oil_c.intervals.push_back(allValues);
- IndexBoundsBuilder::reverseInterval(&allValues);
- oil_d.intervals.push_back(allValues);
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- bounds.fields.push_back(oil_d);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualitySingleMissingSingleInterval) {
- // Equality, then single interval, then missing, then single interval is not
- // a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- OrderedIntervalList oil_d("d");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
- oil_d.intervals.push_back(Interval(fromjson( "{ '':1, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- bounds.fields.push_back(oil_d);
- ASSERT(!testSingleInterval(bounds));
- }
-
- //
- // Complementing bounds for negations
- //
-
- /**
- * Get a BSONObj which represents the interval from
- * MinKey to 'end'.
- */
- BSONObj minKeyIntObj(int end) {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendNumber("", end);
- return bob.obj();
- }
-
- /**
- * Get a BSONObj which represents the interval from
- * 'start' to MaxKey.
- */
- BSONObj maxKeyIntObj(int start) {
- BSONObjBuilder bob;
- bob.appendNumber("", start);
- bob.appendMaxKey("");
- return bob.obj();
- }
-
- // Expected oil: [MinKey, 3), (3, MaxKey]
- TEST(IndexBoundsBuilderTest, SimpleNE) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$ne" << 3));
+/**
+ * 'constraints' is a vector of BSONObj's representing match expressions, where
+ * each filter is paired with a boolean. If the boolean is true, then the filter's
+ * index bounds should be intersected with the other constraints; if false, then
+ * they should be unioned. The resulting bounds are returned in the
+ * out-parameter 'oilOut'.
+ */
+void testTranslate(const vector<std::pair<BSONObj, bool>>& constraints,
+ OrderedIntervalList* oilOut,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+
+ for (vector<std::pair<BSONObj, bool>>::const_iterator it = constraints.begin();
+ it != constraints.end();
+ ++it) {
+ BSONObj obj = it->first;
+ bool isIntersect = it->second;
unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(minKeyIntObj(3), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(maxKeyIntObj(3), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectWithNE) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gt: 1}}"));
- toIntersect.push_back(fromjson("{a: {$ne: 2}}}"));
- toIntersect.push_back(fromjson("{a: {$lte: 6}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << 1 << "" << 2), false, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(BSON("" << 2 << "" << 6), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, UnionizeWithNE) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toUnionize;
- toUnionize.push_back(fromjson("{a: {$ne: 3}}"));
- toUnionize.push_back(fromjson("{a: {$ne: 4}}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndUnion(toUnionize, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- IndexBoundsBuilder::allValues()));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+ if (constraints.begin() == it) {
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
+ } else if (isIntersect) {
+ IndexBoundsBuilder::translateAndIntersect(
+ expr.get(), elt, testIndex, oilOut, tightnessOut);
+ } else {
+ IndexBoundsBuilder::translateAndUnion(expr.get(), elt, testIndex, oilOut, tightnessOut);
+ }
}
+}
- // Test $type bounds for Code BSON type.
- TEST(IndexBoundsBuilderTest, CodeTypeBounds) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$type: 13}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
-
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
-
- // Build the expected interval.
- BSONObjBuilder bob;
- bob.appendCode("", "");
- bob.appendCodeWScope("", "", BSONObj());
- BSONObj expectedInterval = bob.obj();
-
- // Check the output of translate().
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(expectedInterval, true, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
- }
+/**
+ * run isSingleInterval and return the result to calling test.
+ */
+bool testSingleInterval(IndexBounds bounds) {
+ BSONObj startKey;
+ bool startKeyIn;
+ BSONObj endKey;
+ bool endKeyIn;
+ return IndexBoundsBuilder::isSingleInterval(bounds, &startKey, &startKeyIn, &endKey, &endKeyIn);
+}
+
+//
+// $elemMatch value
+// Example: {a: {$elemMatch: {$gt: 2}}}
+//
+
+TEST(IndexBoundsBuilderTest, TranslateElemMatchValue) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ // Bounds generated should be the same as the embedded expression
+ // except for the tightness.
+ BSONObj obj = fromjson("{a: {$elemMatch: {$gt: 2}}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 2, '': Infinity}"), false, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+//
+// Comparison operators ($lte, $lt, $gt, $gte, $eq)
+//
+
+TEST(IndexBoundsBuilderTest, TranslateLteNumber) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lte: 1}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': 1}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLteNumberMin) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$lte" << numberMin));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(BSON("" << negativeInfinity << "" << numberMin), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLteNegativeInfinity) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lte: -Infinity}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': -Infinity}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtNumber) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lt: 1}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': 1}"), true, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtNumberMin) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$lt" << numberMin));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(BSON("" << negativeInfinity << "" << numberMin), true, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtNegativeInfinity) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lt: -Infinity}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtDate) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << LT << Date_t::fromMillisSinceEpoch(5000));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson("{'': true, '': new Date(5000)}"), false, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtNumber) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gt: 1}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': Infinity}"), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtNumberMax) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$gt" << numberMax));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(BSON("" << numberMax << "" << positiveInfinity), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtPositiveInfinity) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gt: Infinity}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGteNumber) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gte: 1}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': Infinity}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGteNumberMax) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$gte" << numberMax));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(BSON("" << numberMax << "" << positiveInfinity), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtePositiveInfinity) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gte: Infinity}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': Infinity, '': Infinity}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtString) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gt: 'abc'}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 'abc', '': {}}"), false, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateEqualNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: NaN}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lt: NaN}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLteNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lte: NaN}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gt: NaN}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGteNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gte: NaN}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateEqual) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << 4);
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 4, '': 4}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateArrayEqualBasic) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: [1, 2, 3]}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': [1, 2, 3], '': [1, 2, 3]}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateIn) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$in: [8, 44, -1, -3]}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 4U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -3, '': -3}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': -1, '': -1}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[2].compare(Interval(fromjson("{'': 8, '': 8}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[3].compare(Interval(fromjson("{'': 44, '': 44}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateInArray) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$in: [[1], 2]}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 3U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': 2, '': 2}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[2].compare(Interval(fromjson("{'': [1], '': [1]}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLteBinData) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson(
+ "{a: {$lte: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA',"
+ "$type: '00'}}}");
+ std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQ(oil.name, "a");
+ ASSERT_EQ(oil.intervals.size(), 1U);
+ ASSERT_EQ(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson(
+ "{'': {$binary: '', $type: '00'},"
+ "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
+ true,
+ true)));
+ ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtBinData) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson(
+ "{a: {$lt: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA',"
+ "$type: '00'}}}");
+ std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQ(oil.name, "a");
+ ASSERT_EQ(oil.intervals.size(), 1U);
+ ASSERT_EQ(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson(
+ "{'': {$binary: '', $type: '00'},"
+ "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
+ true,
+ false)));
+ ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtBinData) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson(
+ "{a: {$gt: {$binary: '////////////////////////////',"
+ "$type: '00'}}}");
+ std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQ(oil.name, "a");
+ ASSERT_EQ(oil.intervals.size(), 1U);
+ ASSERT_EQ(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson(
+ "{'': {$binary: '////////////////////////////', $type: '00'},"
+ "'': ObjectId('000000000000000000000000')}"),
+ false,
+ false)));
+ ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGteBinData) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson(
+ "{a: {$gte: {$binary: '////////////////////////////',"
+ "$type: '00'}}}");
+ std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQ(oil.name, "a");
+ ASSERT_EQ(oil.intervals.size(), 1U);
+ ASSERT_EQ(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson(
+ "{'': {$binary: '////////////////////////////', $type: '00'},"
+ "'': ObjectId('000000000000000000000000')}"),
+ true,
+ false)));
+ ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
+}
+
+//
+// $exists tests
+//
+
+TEST(IndexBoundsBuilderTest, ExistsTrue) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$exists: true}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+TEST(IndexBoundsBuilderTest, ExistsFalse) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$exists: false}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': null, '': null}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+TEST(IndexBoundsBuilderTest, ExistsTrueSparse) {
+ IndexEntry testIndex = IndexEntry(BSONObj(),
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "exists_true_sparse",
+ nullptr, // filterExpr
+ BSONObj());
+ BSONObj obj = fromjson("{a: {$exists: true}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+//
+// Union tests
+//
+
+TEST(IndexBoundsBuilderTest, UnionTwoLt) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toUnion;
+ toUnion.push_back(fromjson("{a: {$lt: 1}}"));
+ toUnion.push_back(fromjson("{a: {$lt: 5}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndUnion(toUnion, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': 5}"), true, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, UnionDupEq) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toUnion;
+ toUnion.push_back(fromjson("{a: 1}"));
+ toUnion.push_back(fromjson("{a: 5}"));
+ toUnion.push_back(fromjson("{a: 1}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndUnion(toUnion, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': 5, '': 5}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, UnionGtLt) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toUnion;
+ toUnion.push_back(fromjson("{a: {$gt: 1}}"));
+ toUnion.push_back(fromjson("{a: {$lt: 3}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndUnion(toUnion, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': Infinity}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, UnionTwoEmptyRanges) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<std::pair<BSONObj, bool>> constraints;
+ constraints.push_back(std::make_pair(fromjson("{a: {$gt: 1}}"), true));
+ constraints.push_back(std::make_pair(fromjson("{a: {$lte: 0}}"), true));
+ constraints.push_back(std::make_pair(fromjson("{a: {$in:[]}}"), false));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslate(constraints, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+}
+
+//
+// Intersection tests
+//
+
+TEST(IndexBoundsBuilderTest, IntersectTwoLt) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$lt: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$lt: 5}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': 1}"), true, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectEqGte) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$gte: 1}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectGtLte) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gt: 0}}"));
+ toIntersect.push_back(fromjson("{a: {$lte: 10}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 0, '': 10}"), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectGtIn) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gt: 4}}"));
+ toIntersect.push_back(fromjson("{a: {$in: [1,2,3,4,5,6]}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 5, '': 5}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': 6, '': 6}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectionIsPointInterval) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gte: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$lte: 1}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectFullyContained) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gt: 5}}"));
+ toIntersect.push_back(fromjson("{a: {$lt: 15}}"));
+ toIntersect.push_back(fromjson("{a: {$gte: 6}}"));
+ toIntersect.push_back(fromjson("{a: {$lte: 13}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 6, '': 13}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, EmptyIntersection) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$gte: 2}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+}
+
+//
+// $mod
+//
+
+TEST(IndexBoundsBuilderTest, TranslateMod) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$mod: [2, 0]}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(BSON("" << numberMin << "" << numberMax), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+//
+// Test simpleRegex
+//
+
+TEST(SimpleRegexTest, RootedLine) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^foo", "", &tightness);
+ ASSERT_EQUALS(prefix, "foo");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedString) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "", &tightness);
+ ASSERT_EQUALS(prefix, "foo");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedOptionalFirstChar) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^f?oo", "", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedOptionalSecondChar) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^fz?oo", "", &tightness);
+ ASSERT_EQUALS(prefix, "f");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedMultiline) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^foo", "m", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedStringMultiline) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "m", &tightness);
+ ASSERT_EQUALS(prefix, "foo");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedCaseInsensitiveMulti) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "mi", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedComplex) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix =
+ IndexBoundsBuilder::simpleRegex("\\Af \t\vo\n\ro \\ \\# #comment", "mx", &tightness);
+ ASSERT_EQUALS(prefix, "foo #");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedLiteral) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qasdf\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "asdf");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralWithExtra) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qasdf\\E.*", "", &tightness);
+ ASSERT_EQUALS(prefix, "asdf");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedLiteralNoEnd) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qasdf", "", &tightness);
+ ASSERT_EQUALS(prefix, "asdf");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralBackslash) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qasdf\\\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "asdf\\");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralDotStar) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qas.*df\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "as.*df");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralNestedEscape) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qas\\Q[df\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "as\\Q[df");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralNestedEscapeEnd) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qas\\E\\\\E\\Q$df\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "as\\E$df");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+// A regular expression with the "|" character is not considered simple. See SERVER-15235.
+TEST(SimpleRegexTest, PipeCharacterDisallowed) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^(a(a|$)|b", "", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, PipeCharacterDisallowed2) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^(a(a|$)|^b", "", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+// SERVER-9035
+TEST(SimpleRegexTest, RootedSingleLineMode) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^foo", "s", &tightness);
+ ASSERT_EQUALS(prefix, "foo");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+// SERVER-9035
+TEST(SimpleRegexTest, NonRootedSingleLineMode) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("foo", "s", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+// SERVER-9035
+TEST(SimpleRegexTest, RootedComplexSingleLineMode) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix =
+ IndexBoundsBuilder::simpleRegex("\\Af \t\vo\n\ro \\ \\# #comment", "msx", &tightness);
+ ASSERT_EQUALS(prefix, "foo #");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+//
+// Regex bounds
+//
+
+TEST(IndexBoundsBuilderTest, SimpleNonPrefixRegex) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: /foo/}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': '', '': {}}"), true, false)));
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': /foo/, '': /foo/}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(IndexBoundsBuilderTest, NonSimpleRegexWithPipe) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: /^foo.*|bar/}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': '', '': {}}"), true, false)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(
+ Interval(fromjson("{'': /^foo.*|bar/, '': /^foo.*|bar/}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(IndexBoundsBuilderTest, SimpleRegexSingleLineMode) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: /^foo/s}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 'foo', '': 'fop'}"), true, false)));
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': /^foo/s, '': /^foo/s}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, SimplePrefixRegex) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: /^foo/}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 'foo', '': 'fop'}"), true, false)));
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': /^foo/, '': /^foo/}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::EXACT);
+}
+
+//
+// isSingleInterval
+//
+
+TEST(IndexBoundsBuilderTest, SingleFieldEqualityInterval) {
+ // Equality on a single field is a single interval.
+ OrderedIntervalList oil("a");
+ IndexBounds bounds;
+ oil.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ bounds.fields.push_back(oil);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, SingleIntervalSingleFieldInterval) {
+ // Single interval on a single field is a single interval.
+ OrderedIntervalList oil("a");
+ IndexBounds bounds;
+ oil.intervals.push_back(Interval(fromjson("{ '':5, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, MultipleIntervalsSingleFieldInterval) {
+ // Multiple intervals on a single field is not a single interval.
+ OrderedIntervalList oil("a");
+ IndexBounds bounds;
+ oil.intervals.push_back(Interval(fromjson("{ '':4, '':5 }"), true, true));
+ oil.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil);
+ ASSERT(!testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualityTwoFieldsInterval) {
+ // Equality on two fields is a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(BSON("" << 6 << "" << 6), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualityFirstFieldSingleIntervalSecondFieldInterval) {
+ // Equality on first field and single interval on second field
+ // is a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':6, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, SingleIntervalFirstAndSecondFieldsInterval) {
+ // Single interval on first field and single interval on second field is
+ // not a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(fromjson("{ '':-Infinity, '':5 }"), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':6, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(!testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, MultipleIntervalsTwoFieldsInterval) {
+ // Multiple intervals on two fields is not a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 4 << "" << 4), true, true));
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(BSON("" << 7 << "" << 7), true, true));
+ oil_b.intervals.push_back(Interval(BSON("" << 8 << "" << 8), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(!testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, MissingSecondFieldInterval) {
+ // when second field is not specified, still a compound single interval
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(IndexBoundsBuilder::allValues());
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualityTwoFieldsIntervalThirdInterval) {
+ // Equality on first two fields and single interval on third is a
+ // compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(BSON("" << 6 << "" << 6), true, true));
+ oil_c.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualitySingleIntervalMissingInterval) {
+ // Equality, then Single Interval, then missing is a compound single interval
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualitySingleMissingMissingInterval) {
+ // Equality, then single interval, then missing, then missing,
+ // is a compound single interval
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ OrderedIntervalList oil_d("d");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
+ oil_d.intervals.push_back(IndexBoundsBuilder::allValues());
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ bounds.fields.push_back(oil_d);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualitySingleMissingMissingMixedInterval) {
+ // Equality, then single interval, then missing, then missing, with mixed order
+ // fields is a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ OrderedIntervalList oil_d("d");
+ IndexBounds bounds;
+ Interval allValues = IndexBoundsBuilder::allValues();
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ oil_c.intervals.push_back(allValues);
+ IndexBoundsBuilder::reverseInterval(&allValues);
+ oil_d.intervals.push_back(allValues);
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ bounds.fields.push_back(oil_d);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualitySingleMissingSingleInterval) {
+ // Equality, then single interval, then missing, then single interval is not
+ // a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ OrderedIntervalList oil_d("d");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
+ oil_d.intervals.push_back(Interval(fromjson("{ '':1, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ bounds.fields.push_back(oil_d);
+ ASSERT(!testSingleInterval(bounds));
+}
+
+//
+// Complementing bounds for negations
+//
- // Test $type bounds for Code With Scoped BSON type.
- TEST(IndexBoundsBuilderTest, CodeWithScopeTypeBounds) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$type: 15}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
+/**
+ * Get a BSONObj which represents the interval from
+ * MinKey to 'end'.
+ */
+BSONObj minKeyIntObj(int end) {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendNumber("", end);
+ return bob.obj();
+}
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
-
- // Build the expected interval.
- BSONObjBuilder bob;
- bob.appendCodeWScope("", "", BSONObj());
- bob.appendMaxKey("");
- BSONObj expectedInterval = bob.obj();
-
- // Check the output of translate().
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(expectedInterval, true, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
- }
+/**
+ * Get a BSONObj which represents the interval from
+ * 'start' to MaxKey.
+ */
+BSONObj maxKeyIntObj(int start) {
+ BSONObjBuilder bob;
+ bob.appendNumber("", start);
+ bob.appendMaxKey("");
+ return bob.obj();
+}
+
+// Expected oil: [MinKey, 3), (3, MaxKey]
+TEST(IndexBoundsBuilderTest, SimpleNE) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$ne" << 3));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(minKeyIntObj(3), true, false)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(maxKeyIntObj(3), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectWithNE) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gt: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$ne: 2}}}"));
+ toIntersect.push_back(fromjson("{a: {$lte: 6}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(BSON("" << 1 << "" << 2), false, false)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(BSON("" << 2 << "" << 6), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, UnionizeWithNE) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toUnionize;
+ toUnionize.push_back(fromjson("{a: {$ne: 3}}"));
+ toUnionize.push_back(fromjson("{a: {$ne: 4}}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndUnion(toUnionize, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+// Test $type bounds for Code BSON type.
+TEST(IndexBoundsBuilderTest, CodeTypeBounds) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$type: 13}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+
+ // Build the expected interval.
+ BSONObjBuilder bob;
+ bob.appendCode("", "");
+ bob.appendCodeWScope("", "", BSONObj());
+ BSONObj expectedInterval = bob.obj();
+
+ // Check the output of translate().
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(expectedInterval, true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+// Test $type bounds for Code With Scoped BSON type.
+TEST(IndexBoundsBuilderTest, CodeWithScopeTypeBounds) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$type: 15}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+
+ // Build the expected interval.
+ BSONObjBuilder bob;
+ bob.appendCodeWScope("", "", BSONObj());
+ bob.appendMaxKey("");
+ BSONObj expectedInterval = bob.obj();
+
+ // Check the output of translate().
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(expectedInterval, true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
+}
} // namespace
diff --git a/src/mongo/db/query/index_bounds_test.cpp b/src/mongo/db/query/index_bounds_test.cpp
index d5cc470b3af..d1613ca0c94 100644
--- a/src/mongo/db/query/index_bounds_test.cpp
+++ b/src/mongo/db/query/index_bounds_test.cpp
@@ -42,659 +42,661 @@ using namespace mongo;
namespace {
- //
- // Validation
- //
-
- TEST(IndexBoundsTest, ValidBasic) {
- OrderedIntervalList list("foo");
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
-
- // Go forwards with data indexed forwards.
- ASSERT(bounds.isValidFor(BSON("foo" << 1), 1));
- // Go backwards with data indexed backwards.
- ASSERT(bounds.isValidFor(BSON("foo" << -1), -1));
- // Bounds are not oriented along the direction of traversal.
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), -1));
-
- // Bounds must match the index exactly.
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1 << "bar" << 1), 1));
- ASSERT_FALSE(bounds.isValidFor(BSON("bar" << 1), 1));
- }
-
- TEST(IndexBoundsTest, ValidTwoFields) {
- OrderedIntervalList list("foo");
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
-
- // Let's add another field
- OrderedIntervalList otherList("bar");
- otherList.intervals.push_back(Interval(BSON("" << 0 << "" << 3), true, true));
- bounds.fields.push_back(otherList);
-
- // These are OK.
- ASSERT(bounds.isValidFor(BSON("foo" << 1 << "bar" << 1), 1));
- ASSERT(bounds.isValidFor(BSON("foo" << -1 << "bar" << -1), -1));
-
- // Direction(s) don't match.
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1 << "bar" << 1), -1));
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1 << "bar" << -1), -1));
-
- // Index doesn't match.
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
- ASSERT_FALSE(bounds.isValidFor(BSON("bar" << 1 << "foo" << 1), 1));
- }
-
- TEST(IndexBoundsTest, ValidIntervalsInOrder) {
- OrderedIntervalList list("foo");
- // Whether navigated forward or backward, there's no valid ordering for these two intervals.
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- list.intervals.push_back(Interval(BSON("" << 0 << "" << 5), true, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1), 1));
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), -1));
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1), -1));
- }
-
- TEST(IndexBoundsTest, ValidNoOverlappingIntervals) {
- OrderedIntervalList list("foo");
- // overlapping intervals not allowed.
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- list.intervals.push_back(Interval(BSON("" << 19 << "" << 25), true, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
- }
-
- TEST(IndexBoundsTest, ValidOverlapOnlyWhenBothOpen) {
- OrderedIntervalList list("foo");
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, false));
- list.intervals.push_back(Interval(BSON("" << 20 << "" << 25), false, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
- ASSERT(bounds.isValidFor(BSON("foo" << 1), 1));
- }
-
- //
- // Tests for OrderedIntervalList::complement()
- //
-
- /**
- * Get a BSONObj which represents the interval from
- * MinKey to 'end'.
- */
- BSONObj minKeyIntObj(int end) {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendNumber("", end);
- return bob.obj();
- }
-
- /**
- * Get a BSONObj which represents the interval from
- * 'start' to MaxKey.
- */
- BSONObj maxKeyIntObj(int start) {
- BSONObjBuilder bob;
- bob.appendNumber("", start);
- bob.appendMaxKey("");
- return bob.obj();
- }
-
- /**
- * Get a BSONObj which represents the interval
- * [MinKey, MaxKey].
- */
- BSONObj allValues() {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendMaxKey("");
- return bob.obj();
- }
-
- /**
- * Test that if we complement the OIL twice,
- * we get back the original OIL.
- */
- void testDoubleComplement(const OrderedIntervalList* oil) {
- OrderedIntervalList clone;
- for (size_t i = 0; i < oil->intervals.size(); ++i) {
- clone.intervals.push_back(oil->intervals[i]);
- }
-
- clone.complement();
- clone.complement();
-
- ASSERT_EQUALS(oil->intervals.size(), clone.intervals.size());
- for (size_t i = 0; i < oil->intervals.size(); ++i) {
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
- oil->intervals[i].compare(clone.intervals[i]));
- }
- }
-
- // Complement of empty is [MinKey, MaxKey]
- TEST(IndexBoundsTest, ComplementEmptyOil) {
- OrderedIntervalList oil;
- testDoubleComplement(&oil);
- oil.complement();
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(allValues(), true, true)));
- }
-
- // Complement of [MinKey, MaxKey] is empty
- TEST(IndexBoundsTest, ComplementAllValues) {
- OrderedIntervalList oil;
- oil.intervals.push_back(Interval(allValues(), true, true));
- testDoubleComplement(&oil);
- oil.complement();
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- }
-
- // Complement of [MinKey, 3), [5, MaxKey) is
- // [3, 5), [MaxKey, MaxKey].
- TEST(IndexBoundsTest, ComplementRanges) {
- OrderedIntervalList oil;
- oil.intervals.push_back(Interval(minKeyIntObj(3), true, false));
- oil.intervals.push_back(Interval(maxKeyIntObj(5), true, false));
- testDoubleComplement(&oil);
- oil.complement();
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << 3 << "" << 5), true, false)));
-
- // Make the interval [MaxKey, MaxKey].
- BSONObjBuilder bob;
- bob.appendMaxKey("");
- bob.appendMaxKey("");
- BSONObj maxKeyInt = bob.obj();
-
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(maxKeyInt, true, true)));
- }
-
- // Complement of (MinKey, 3), (3, MaxKey) is
- // [MinKey, MinKey], [3, 3], [MaxKey, MaxKey].
- TEST(IndexBoundsTest, ComplementRanges2) {
- OrderedIntervalList oil;
- oil.intervals.push_back(Interval(minKeyIntObj(3), false, false));
- oil.intervals.push_back(Interval(maxKeyIntObj(3), false, false));
- testDoubleComplement(&oil);
- oil.complement();
- ASSERT_EQUALS(oil.intervals.size(), 3U);
-
- // First interval is [MinKey, MinKey]
- BSONObjBuilder minBob;
- minBob.appendMinKey("");
- minBob.appendMinKey("");
- BSONObj minObj = minBob.obj();
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(minObj, true, true)));
-
- // Second is [3, 3]
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(BSON("" << 3 << "" << 3), true, true)));
-
- // Third is [MaxKey, MaxKey]
- BSONObjBuilder maxBob;
- maxBob.appendMaxKey("");
- maxBob.appendMaxKey("");
- BSONObj maxObj = maxBob.obj();
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[2].compare(
- Interval(maxObj, true, true)));
- }
-
- //
- // Iteration over
- //
-
- TEST(IndexBoundsCheckerTest, StartKey) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- it.getStartSeekPoint(&seekPoint);
-
- ASSERT_EQUALS(seekPoint.keySuffix[0]->numberInt(), 7);
- ASSERT_EQUALS(seekPoint.suffixInclusive[0], true);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
- }
-
- TEST(IndexBoundsCheckerTest, CheckEnd) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // Second field moves past the end, but we're not done, since there's still an interval in
- // the previous field that the key hasn't advanced to.
- state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT(seekPoint.prefixExclusive);
-
- // The next index key is in the second interval for 'foo' and there is a valid interval for
- // 'bar'.
- state = it.checkKey(BSON("" << 22 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // The next index key is very close to the end of the open interval for foo, and it's past
- // the interval for 'bar'. Since the interval for foo is open, we are asked to move
- // forward, since we possibly could.
- state = it.checkKey(BSON("" << 29.9 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT(seekPoint.prefixExclusive);
- }
-
- TEST(IndexBoundsCheckerTest, MoveIntervalForwardToNextInterval) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // "foo" moves between two intervals.
- state = it.checkKey(BSON("" << 20.5 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 0);
- // Should be told to move exactly to the next interval's beginning.
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[0]->numberInt(), 21);
- ASSERT_EQUALS(seekPoint.suffixInclusive[0], true);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
- }
+//
+// Validation
+//
+
+TEST(IndexBoundsTest, ValidBasic) {
+ OrderedIntervalList list("foo");
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+
+ // Go forwards with data indexed forwards.
+ ASSERT(bounds.isValidFor(BSON("foo" << 1), 1));
+ // Go backwards with data indexed backwards.
+ ASSERT(bounds.isValidFor(BSON("foo" << -1), -1));
+ // Bounds are not oriented along the direction of traversal.
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), -1));
+
+ // Bounds must match the index exactly.
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1 << "bar" << 1), 1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("bar" << 1), 1));
+}
+
+TEST(IndexBoundsTest, ValidTwoFields) {
+ OrderedIntervalList list("foo");
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+
+ // Let's add another field
+ OrderedIntervalList otherList("bar");
+ otherList.intervals.push_back(Interval(BSON("" << 0 << "" << 3), true, true));
+ bounds.fields.push_back(otherList);
+
+ // These are OK.
+ ASSERT(bounds.isValidFor(BSON("foo" << 1 << "bar" << 1), 1));
+ ASSERT(bounds.isValidFor(BSON("foo" << -1 << "bar" << -1), -1));
+
+ // Direction(s) don't match.
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1 << "bar" << 1), -1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1 << "bar" << -1), -1));
+
+ // Index doesn't match.
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("bar" << 1 << "foo" << 1), 1));
+}
+
+TEST(IndexBoundsTest, ValidIntervalsInOrder) {
+ OrderedIntervalList list("foo");
+ // Whether navigated forward or backward, there's no valid ordering for these two intervals.
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ list.intervals.push_back(Interval(BSON("" << 0 << "" << 5), true, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1), 1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), -1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1), -1));
+}
+
+TEST(IndexBoundsTest, ValidNoOverlappingIntervals) {
+ OrderedIntervalList list("foo");
+ // overlapping intervals not allowed.
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ list.intervals.push_back(Interval(BSON("" << 19 << "" << 25), true, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
+}
+
+TEST(IndexBoundsTest, ValidOverlapOnlyWhenBothOpen) {
+ OrderedIntervalList list("foo");
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, false));
+ list.intervals.push_back(Interval(BSON("" << 20 << "" << 25), false, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+ ASSERT(bounds.isValidFor(BSON("foo" << 1), 1));
+}
+
+//
+// Tests for OrderedIntervalList::complement()
+//
- TEST(IndexBoundsCheckerTest, MoveIntervalForwardManyIntervals) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
- fooList.intervals.push_back(Interval(BSON("" << 31 << "" << 40), true, false));
- fooList.intervals.push_back(Interval(BSON("" << 41 << "" << 50), true, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 7), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+/**
+ * Get a BSONObj which represents the interval from
+ * MinKey to 'end'.
+ */
+BSONObj minKeyIntObj(int end) {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendNumber("", end);
+ return bob.obj();
+}
- // "foo" moves forward a few intervals.
- state = it.checkKey(BSON("" << 42), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
- }
+/**
+ * Get a BSONObj which represents the interval from
+ * 'start' to MaxKey.
+ */
+BSONObj maxKeyIntObj(int start) {
+ BSONObjBuilder bob;
+ bob.appendNumber("", start);
+ bob.appendMaxKey("");
+ return bob.obj();
+}
- TEST(IndexBoundsCheckerTest, SimpleCheckKey) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, true));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // The rightmost key is past the range. We should be told to move past the key before the
- // one whose interval we exhausted.
- state = it.checkKey(BSON("" << 7 << "" << 5.00001), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, true);
-
- // Move a little forward, but note that the rightmost key isn't in the interval yet.
- state = it.checkKey(BSON("" << 7.2 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
-
- // Move to the edge of both intervals, 20,5
- state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // And a little beyond.
- state = it.checkKey(BSON("" << 20 << "" << 5.1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::DONE);
- }
+/**
+ * Get a BSONObj which represents the interval
+ * [MinKey, MaxKey].
+ */
+BSONObj allValues() {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendMaxKey("");
+ return bob.obj();
+}
- TEST(IndexBoundsCheckerTest, FirstKeyMovedIsOKSecondKeyMustMove) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 0 << "" << 9), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 10 << "" << 20), true, true));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, true));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 0 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // First key moves to next interval, second key needs to be advanced.
- state = it.checkKey(BSON("" << 10 << "" << -1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+/**
+ * Test that if we complement the OIL twice,
+ * we get back the original OIL.
+ */
+void testDoubleComplement(const OrderedIntervalList* oil) {
+ OrderedIntervalList clone;
+ for (size_t i = 0; i < oil->intervals.size(); ++i) {
+ clone.intervals.push_back(oil->intervals[i]);
}
- TEST(IndexBoundsCheckerTest, SecondIntervalMustRewind) {
- OrderedIntervalList first("first");
- first.intervals.push_back(Interval(BSON("" << 25 << "" << 30), true, true));
-
- OrderedIntervalList second("second");
- second.intervals.push_back(Interval(BSON("" << 0 << "" << 0), true, true));
- second.intervals.push_back(Interval(BSON("" << 9 << "" << 9), true, true));
-
- IndexBounds bounds;
- bounds.fields.push_back(first);
- bounds.fields.push_back(second);
-
- BSONObj idx = BSON("first" << 1 << "second" << 1);
- ASSERT(bounds.isValidFor(idx, 1));
- IndexBoundsChecker it(&bounds, idx, 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- state = it.checkKey(BSON("" << 25 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- state = it.checkKey(BSON("" << 25 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 9);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], true);
-
- state = it.checkKey(BSON("" << 25 << "" << 9), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+ clone.complement();
+ clone.complement();
- // First key moved forward. The second key moved back to a valid state but it's behind
- // the interval that the checker thought it was in.
- state = it.checkKey(BSON("" << 26 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+ ASSERT_EQUALS(oil->intervals.size(), clone.intervals.size());
+ for (size_t i = 0; i < oil->intervals.size(); ++i) {
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil->intervals[i].compare(clone.intervals[i]));
}
+}
+
+// Complement of empty is [MinKey, MaxKey]
+TEST(IndexBoundsTest, ComplementEmptyOil) {
+ OrderedIntervalList oil;
+ testDoubleComplement(&oil);
+ oil.complement();
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(allValues(), true, true)));
+}
+
+// Complement of [MinKey, MaxKey] is empty
+TEST(IndexBoundsTest, ComplementAllValues) {
+ OrderedIntervalList oil;
+ oil.intervals.push_back(Interval(allValues(), true, true));
+ testDoubleComplement(&oil);
+ oil.complement();
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+}
+
+// Complement of [MinKey, 3), [5, MaxKey) is
+// [3, 5), [MaxKey, MaxKey].
+TEST(IndexBoundsTest, ComplementRanges) {
+ OrderedIntervalList oil;
+ oil.intervals.push_back(Interval(minKeyIntObj(3), true, false));
+ oil.intervals.push_back(Interval(maxKeyIntObj(5), true, false));
+ testDoubleComplement(&oil);
+ oil.complement();
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(BSON("" << 3 << "" << 5), true, false)));
+
+ // Make the interval [MaxKey, MaxKey].
+ BSONObjBuilder bob;
+ bob.appendMaxKey("");
+ bob.appendMaxKey("");
+ BSONObj maxKeyInt = bob.obj();
+
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(maxKeyInt, true, true)));
+}
+
+// Complement of (MinKey, 3), (3, MaxKey) is
+// [MinKey, MinKey], [3, 3], [MaxKey, MaxKey].
+TEST(IndexBoundsTest, ComplementRanges2) {
+ OrderedIntervalList oil;
+ oil.intervals.push_back(Interval(minKeyIntObj(3), false, false));
+ oil.intervals.push_back(Interval(maxKeyIntObj(3), false, false));
+ testDoubleComplement(&oil);
+ oil.complement();
+ ASSERT_EQUALS(oil.intervals.size(), 3U);
+
+ // First interval is [MinKey, MinKey]
+ BSONObjBuilder minBob;
+ minBob.appendMinKey("");
+ minBob.appendMinKey("");
+ BSONObj minObj = minBob.obj();
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(minObj, true, true)));
+
+ // Second is [3, 3]
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(BSON("" << 3 << "" << 3), true, true)));
+
+ // Third is [MaxKey, MaxKey]
+ BSONObjBuilder maxBob;
+ maxBob.appendMaxKey("");
+ maxBob.appendMaxKey("");
+ BSONObj maxObj = maxBob.obj();
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[2].compare(Interval(maxObj, true, true)));
+}
+
+//
+// Iteration over
+//
+
+TEST(IndexBoundsCheckerTest, StartKey) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ it.getStartSeekPoint(&seekPoint);
+
+ ASSERT_EQUALS(seekPoint.keySuffix[0]->numberInt(), 7);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[0], true);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+}
+
+TEST(IndexBoundsCheckerTest, CheckEnd) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // Second field moves past the end, but we're not done, since there's still an interval in
+ // the previous field that the key hasn't advanced to.
+ state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT(seekPoint.prefixExclusive);
+
+ // The next index key is in the second interval for 'foo' and there is a valid interval for
+ // 'bar'.
+ state = it.checkKey(BSON("" << 22 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // The next index key is very close to the end of the open interval for foo, and it's past
+ // the interval for 'bar'. Since the interval for foo is open, we are asked to move
+ // forward, since we possibly could.
+ state = it.checkKey(BSON("" << 29.9 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT(seekPoint.prefixExclusive);
+}
+
+TEST(IndexBoundsCheckerTest, MoveIntervalForwardToNextInterval) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // "foo" moves between two intervals.
+ state = it.checkKey(BSON("" << 20.5 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 0);
+ // Should be told to move exactly to the next interval's beginning.
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[0]->numberInt(), 21);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[0], true);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+}
+
+TEST(IndexBoundsCheckerTest, MoveIntervalForwardManyIntervals) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
+ fooList.intervals.push_back(Interval(BSON("" << 31 << "" << 40), true, false));
+ fooList.intervals.push_back(Interval(BSON("" << 41 << "" << 50), true, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 7), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // "foo" moves forward a few intervals.
+ state = it.checkKey(BSON("" << 42), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+}
+
+TEST(IndexBoundsCheckerTest, SimpleCheckKey) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, true));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // The rightmost key is past the range. We should be told to move past the key before the
+ // one whose interval we exhausted.
+ state = it.checkKey(BSON("" << 7 << "" << 5.00001), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, true);
+
+ // Move a little forward, but note that the rightmost key isn't in the interval yet.
+ state = it.checkKey(BSON("" << 7.2 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+
+ // Move to the edge of both intervals, 20,5
+ state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // And a little beyond.
+ state = it.checkKey(BSON("" << 20 << "" << 5.1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::DONE);
+}
+
+TEST(IndexBoundsCheckerTest, FirstKeyMovedIsOKSecondKeyMustMove) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 0 << "" << 9), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 10 << "" << 20), true, true));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, true));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 0 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // First key moves to next interval, second key needs to be advanced.
+ state = it.checkKey(BSON("" << 10 << "" << -1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+}
+
+TEST(IndexBoundsCheckerTest, SecondIntervalMustRewind) {
+ OrderedIntervalList first("first");
+ first.intervals.push_back(Interval(BSON("" << 25 << "" << 30), true, true));
+
+ OrderedIntervalList second("second");
+ second.intervals.push_back(Interval(BSON("" << 0 << "" << 0), true, true));
+ second.intervals.push_back(Interval(BSON("" << 9 << "" << 9), true, true));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(first);
+ bounds.fields.push_back(second);
+
+ BSONObj idx = BSON("first" << 1 << "second" << 1);
+ ASSERT(bounds.isValidFor(idx, 1));
+ IndexBoundsChecker it(&bounds, idx, 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ state = it.checkKey(BSON("" << 25 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ state = it.checkKey(BSON("" << 25 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 9);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], true);
+
+ state = it.checkKey(BSON("" << 25 << "" << 9), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // First key moved forward. The second key moved back to a valid state but it's behind
+ // the interval that the checker thought it was in.
+ state = it.checkKey(BSON("" << 26 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+}
+
+TEST(IndexBoundsCheckerTest, SimpleCheckKeyBackwards) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 20 << "" << 7), true, true));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 5 << "" << 0), true, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+
+ BSONObj idx = BSON("foo" << -1 << "bar" << -1);
+ ASSERT(bounds.isValidFor(idx, 1));
+ IndexBoundsChecker it(&bounds, idx, 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // The rightmost key is past the range. We should be told to move past the key before the
+ // one whose interval we exhausted.
+ state = it.checkKey(BSON("" << 20 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, true);
+
+ // Move a little forward, but note that the rightmost key isn't in the interval yet.
+ state = it.checkKey(BSON("" << 19 << "" << 6), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 5);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], true);
+
+ // Move to the edge of both intervals
+ state = it.checkKey(BSON("" << 7 << "" << 0.01), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // And a little beyond.
+ state = it.checkKey(BSON("" << 7 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::DONE);
+}
+
+TEST(IndexBoundsCheckerTest, CheckEndBackwards) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 30 << "" << 21), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 20 << "" << 7), true, false));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+
+ BSONObj idx = BSON("foo" << 1 << "bar" << -1);
+ ASSERT(bounds.isValidFor(idx, -1));
+ IndexBoundsChecker it(&bounds, idx, -1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 30 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // Second field moves past the end, but we're not done, since there's still an interval in
+ // the previous field that the key hasn't advanced to.
+ state = it.checkKey(BSON("" << 30 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT(seekPoint.prefixExclusive);
+
+ // The next index key is in the second interval for 'foo' and there is a valid interval for
+ // 'bar'.
+ state = it.checkKey(BSON("" << 20 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // The next index key is very close to the end of the open interval for foo, and it's past
+ // the interval for 'bar'. Since the interval for foo is open, we are asked to move
+ // forward, since we possibly could.
+ state = it.checkKey(BSON("" << 7.001 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT(seekPoint.prefixExclusive);
+}
+
+//
+// IndexBoundsChecker::findIntervalForField
+//
- TEST(IndexBoundsCheckerTest, SimpleCheckKeyBackwards) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 20 << "" << 7), true, true));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 5 << "" << 0), true, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
-
- BSONObj idx = BSON("foo" << -1 << "bar" << -1);
- ASSERT(bounds.isValidFor(idx, 1));
- IndexBoundsChecker it(&bounds, idx, 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // The rightmost key is past the range. We should be told to move past the key before the
- // one whose interval we exhausted.
- state = it.checkKey(BSON("" << 20 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, true);
-
- // Move a little forward, but note that the rightmost key isn't in the interval yet.
- state = it.checkKey(BSON("" << 19 << "" << 6), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 5);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], true);
-
- // Move to the edge of both intervals
- state = it.checkKey(BSON("" << 7 << "" << 0.01), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // And a little beyond.
- state = it.checkKey(BSON("" << 7 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::DONE);
- }
-
- TEST(IndexBoundsCheckerTest, CheckEndBackwards) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 30 << "" << 21), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 20 << "" << 7), true, false));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
-
- BSONObj idx = BSON("foo" << 1 << "bar" << -1);
- ASSERT(bounds.isValidFor(idx, -1));
- IndexBoundsChecker it(&bounds, idx, -1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 30 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // Second field moves past the end, but we're not done, since there's still an interval in
- // the previous field that the key hasn't advanced to.
- state = it.checkKey(BSON("" << 30 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT(seekPoint.prefixExclusive);
-
- // The next index key is in the second interval for 'foo' and there is a valid interval for
- // 'bar'.
- state = it.checkKey(BSON("" << 20 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // The next index key is very close to the end of the open interval for foo, and it's past
- // the interval for 'bar'. Since the interval for foo is open, we are asked to move
- // forward, since we possibly could.
- state = it.checkKey(BSON("" << 7.001 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT(seekPoint.prefixExclusive);
+/**
+ * Returns string representation of IndexBoundsChecker::Location.
+ */
+std::string toString(IndexBoundsChecker::Location location) {
+ switch (location) {
+ case IndexBoundsChecker::BEHIND:
+ return "BEHIND";
+ case IndexBoundsChecker::WITHIN:
+ return "WITHIN";
+ case IndexBoundsChecker::AHEAD:
+ return "AHEAD";
}
+ invariant(0);
+}
- //
- // IndexBoundsChecker::findIntervalForField
- //
-
- /**
- * Returns string representation of IndexBoundsChecker::Location.
- */
- std::string toString(IndexBoundsChecker::Location location) {
- switch(location) {
- case IndexBoundsChecker::BEHIND: return "BEHIND";
- case IndexBoundsChecker::WITHIN: return "WITHIN";
- case IndexBoundsChecker::AHEAD: return "AHEAD";
- }
- invariant(0);
+/**
+ * Test function for findIntervalForField.
+ * Constructs a list of point intervals from 'points' and searches for 'key'
+ * using findIntervalForField(). Verifies expected location and index (if expectedLocation
+ * is BEHIND or WITHIN).
+ * 'points' is provided in BSON format: {points: [pt1, pt2, pt4, ...]
+ */
+void testFindIntervalForField(int key,
+ const BSONObj& pointsObj,
+ const int expectedDirection,
+ IndexBoundsChecker::Location expectedLocation,
+ size_t expectedIntervalIndex) {
+ // Create key BSONElement.
+ BSONObj keyObj = BSON("" << key);
+ BSONElement keyElt = keyObj.firstElement();
+
+ // Construct point intervals.
+ OrderedIntervalList oil("foo");
+ BSONObjIterator i(pointsObj.getObjectField("points"));
+ while (i.more()) {
+ BSONElement e = i.next();
+ int j = e.numberInt();
+ oil.intervals.push_back(Interval(BSON("" << j << "" << j), true, true));
}
-
- /**
- * Test function for findIntervalForField.
- * Constructs a list of point intervals from 'points' and searches for 'key'
- * using findIntervalForField(). Verifies expected location and index (if expectedLocation
- * is BEHIND or WITHIN).
- * 'points' is provided in BSON format: {points: [pt1, pt2, pt4, ...]
- */
- void testFindIntervalForField(int key, const BSONObj& pointsObj, const int expectedDirection,
- IndexBoundsChecker::Location expectedLocation,
- size_t expectedIntervalIndex) {
- // Create key BSONElement.
- BSONObj keyObj = BSON("" << key);
- BSONElement keyElt = keyObj.firstElement();
-
- // Construct point intervals.
- OrderedIntervalList oil("foo");
- BSONObjIterator i(pointsObj.getObjectField("points"));
- while (i.more()) {
- BSONElement e = i.next();
- int j = e.numberInt();
- oil.intervals.push_back(Interval(BSON("" << j << "" << j), true, true));
- }
- size_t intervalIndex = 0;
- IndexBoundsChecker::Location location =
- IndexBoundsChecker::findIntervalForField(keyElt, oil, expectedDirection, &intervalIndex);
- if (expectedLocation != location) {
- mongoutils::str::stream ss;
- ss << "Unexpected location from findIntervalForField: key=" << keyElt
- << "; intervals=" << oil.toString() << "; direction=" << expectedDirection
- << ". Expected: " << toString(expectedLocation)
- << ". Actual: " << toString(location);
- FAIL(ss);
- }
- // Check interval index if location is BEHIND or WITHIN.
- if ((IndexBoundsChecker::BEHIND == expectedLocation ||
- IndexBoundsChecker::WITHIN == expectedLocation) &&
- expectedIntervalIndex != intervalIndex) {
- mongoutils::str::stream ss;
- ss << "Unexpected interval index from findIntervalForField: key=" << keyElt
- << "; intervals=" << oil.toString() << "; direction=" << expectedDirection
- << "; location= " << toString(location)
- << ". Expected: " << expectedIntervalIndex
- << ". Actual: " << intervalIndex;
- FAIL(ss);
- }
+ size_t intervalIndex = 0;
+ IndexBoundsChecker::Location location =
+ IndexBoundsChecker::findIntervalForField(keyElt, oil, expectedDirection, &intervalIndex);
+ if (expectedLocation != location) {
+ mongoutils::str::stream ss;
+ ss << "Unexpected location from findIntervalForField: key=" << keyElt
+ << "; intervals=" << oil.toString() << "; direction=" << expectedDirection
+ << ". Expected: " << toString(expectedLocation) << ". Actual: " << toString(location);
+ FAIL(ss);
}
-
- TEST(IndexBoundsCheckerTest, FindIntervalForField) {
- // No intervals
- BSONObj pointsObj = fromjson("{points: []}");
- testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
- testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
-
- // One interval
- pointsObj = fromjson("{points: [5]}");
- testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
-
- // One interval - reverse direction
- pointsObj = fromjson("{points: [5]}");
- testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
-
- // Two intervals
- // Verifies off-by-one handling in upper bound of binary search.
- pointsObj = fromjson("{points: [5, 7]}");
- testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::BEHIND, 1U);
- testFindIntervalForField(7, pointsObj, 1, IndexBoundsChecker::WITHIN, 1U);
- testFindIntervalForField(8, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
-
- // Two intervals - reverse direction
- // Verifies off-by-one handling in upper bound of binary search.
- pointsObj = fromjson("{points: [7, 5]}");
- testFindIntervalForField(8, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(7, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 1U);
- testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 1U);
- testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
-
- // Multiple intervals - odd number of intervals.
- pointsObj = fromjson("{points: [1, 3, 5, 7, 9]}");
- testFindIntervalForField(0, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(1, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(2, pointsObj, 1, IndexBoundsChecker::BEHIND, 1U);
- testFindIntervalForField(3, pointsObj, 1, IndexBoundsChecker::WITHIN, 1U);
- testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 2U);
- testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 2U);
- testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::BEHIND, 3U);
- testFindIntervalForField(7, pointsObj, 1, IndexBoundsChecker::WITHIN, 3U);
- testFindIntervalForField(8, pointsObj, 1, IndexBoundsChecker::BEHIND, 4U);
- testFindIntervalForField(9, pointsObj, 1, IndexBoundsChecker::WITHIN, 4U);
- testFindIntervalForField(10, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
-
- // Multiple intervals - even number of intervals, reverse direction
- // Interval order has to match direction.
- pointsObj = fromjson("{points: [7, 5, 3, 1]}");
- testFindIntervalForField(8, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(7, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 1U);
- testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 1U);
- testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::BEHIND, 2U);
- testFindIntervalForField(3, pointsObj, -1, IndexBoundsChecker::WITHIN, 2U);
- testFindIntervalForField(2, pointsObj, -1, IndexBoundsChecker::BEHIND, 3U);
- testFindIntervalForField(1, pointsObj, -1, IndexBoundsChecker::WITHIN, 3U);
- testFindIntervalForField(0, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+ // Check interval index if location is BEHIND or WITHIN.
+ if ((IndexBoundsChecker::BEHIND == expectedLocation ||
+ IndexBoundsChecker::WITHIN == expectedLocation) &&
+ expectedIntervalIndex != intervalIndex) {
+ mongoutils::str::stream ss;
+ ss << "Unexpected interval index from findIntervalForField: key=" << keyElt
+ << "; intervals=" << oil.toString() << "; direction=" << expectedDirection
+ << "; location= " << toString(location) << ". Expected: " << expectedIntervalIndex
+ << ". Actual: " << intervalIndex;
+ FAIL(ss);
}
+}
+
+TEST(IndexBoundsCheckerTest, FindIntervalForField) {
+ // No intervals
+ BSONObj pointsObj = fromjson("{points: []}");
+ testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
+ testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+
+ // One interval
+ pointsObj = fromjson("{points: [5]}");
+ testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
+
+ // One interval - reverse direction
+ pointsObj = fromjson("{points: [5]}");
+ testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+
+ // Two intervals
+ // Verifies off-by-one handling in upper bound of binary search.
+ pointsObj = fromjson("{points: [5, 7]}");
+ testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::BEHIND, 1U);
+ testFindIntervalForField(7, pointsObj, 1, IndexBoundsChecker::WITHIN, 1U);
+ testFindIntervalForField(8, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
+
+ // Two intervals - reverse direction
+ // Verifies off-by-one handling in upper bound of binary search.
+ pointsObj = fromjson("{points: [7, 5]}");
+ testFindIntervalForField(8, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(7, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 1U);
+ testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 1U);
+ testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+
+ // Multiple intervals - odd number of intervals.
+ pointsObj = fromjson("{points: [1, 3, 5, 7, 9]}");
+ testFindIntervalForField(0, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(1, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(2, pointsObj, 1, IndexBoundsChecker::BEHIND, 1U);
+ testFindIntervalForField(3, pointsObj, 1, IndexBoundsChecker::WITHIN, 1U);
+ testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 2U);
+ testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 2U);
+ testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::BEHIND, 3U);
+ testFindIntervalForField(7, pointsObj, 1, IndexBoundsChecker::WITHIN, 3U);
+ testFindIntervalForField(8, pointsObj, 1, IndexBoundsChecker::BEHIND, 4U);
+ testFindIntervalForField(9, pointsObj, 1, IndexBoundsChecker::WITHIN, 4U);
+ testFindIntervalForField(10, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
+
+ // Multiple intervals - even number of intervals, reverse direction
+ // Interval order has to match direction.
+ pointsObj = fromjson("{points: [7, 5, 3, 1]}");
+ testFindIntervalForField(8, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(7, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 1U);
+ testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 1U);
+ testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::BEHIND, 2U);
+ testFindIntervalForField(3, pointsObj, -1, IndexBoundsChecker::WITHIN, 2U);
+ testFindIntervalForField(2, pointsObj, -1, IndexBoundsChecker::BEHIND, 3U);
+ testFindIntervalForField(1, pointsObj, -1, IndexBoundsChecker::WITHIN, 3U);
+ testFindIntervalForField(0, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+}
} // namespace
diff --git a/src/mongo/db/query/index_entry.cpp b/src/mongo/db/query/index_entry.cpp
index 78a0d1efc4a..13153465387 100644
--- a/src/mongo/db/query/index_entry.cpp
+++ b/src/mongo/db/query/index_entry.cpp
@@ -34,33 +34,33 @@
namespace mongo {
- std::string IndexEntry::toString() const {
- StringBuilder sb;
- sb << "kp: " << keyPattern;
+std::string IndexEntry::toString() const {
+ StringBuilder sb;
+ sb << "kp: " << keyPattern;
- if (multikey) {
- sb << " multikey";
- }
-
- if (sparse) {
- sb << " sparse";
- }
+ if (multikey) {
+ sb << " multikey";
+ }
- if (unique) {
- sb << " unique";
- }
+ if (sparse) {
+ sb << " sparse";
+ }
- sb << " name: '" << name << "'";
+ if (unique) {
+ sb << " unique";
+ }
- if (filterExpr) {
- sb << " filterExpr: " << filterExpr->toString();
- }
+ sb << " name: '" << name << "'";
- if (!infoObj.isEmpty()) {
- sb << " io: " << infoObj;
- }
+ if (filterExpr) {
+ sb << " filterExpr: " << filterExpr->toString();
+ }
- return sb.str();
+ if (!infoObj.isEmpty()) {
+ sb << " io: " << infoObj;
}
+ return sb.str();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/index_entry.h b/src/mongo/db/query/index_entry.h
index c27be6bb3bd..9a785abef8b 100644
--- a/src/mongo/db/query/index_entry.h
+++ b/src/mongo/db/query/index_entry.h
@@ -36,90 +36,87 @@
namespace mongo {
- class MatchExpression;
+class MatchExpression;
+
+/**
+ * This name sucks, but every name involving 'index' is used somewhere.
+ */
+struct IndexEntry {
+ /**
+ * Use this constructor if you're making an IndexEntry from the catalog.
+ */
+ IndexEntry(const BSONObj& kp,
+ const std::string& accessMethod,
+ bool mk,
+ bool sp,
+ bool unq,
+ const std::string& n,
+ const MatchExpression* fe,
+ const BSONObj& io)
+ : keyPattern(kp),
+ multikey(mk),
+ sparse(sp),
+ unique(unq),
+ name(n),
+ filterExpr(fe),
+ infoObj(io) {
+ type = IndexNames::nameToType(accessMethod);
+ }
/**
- * This name sucks, but every name involving 'index' is used somewhere.
+ * For testing purposes only.
*/
- struct IndexEntry {
- /**
- * Use this constructor if you're making an IndexEntry from the catalog.
- */
- IndexEntry(const BSONObj& kp,
- const std::string& accessMethod,
- bool mk,
- bool sp,
- bool unq,
- const std::string& n,
- const MatchExpression* fe,
- const BSONObj& io)
- : keyPattern(kp),
- multikey(mk),
- sparse(sp),
- unique(unq),
- name(n),
- filterExpr(fe),
- infoObj(io) {
-
- type = IndexNames::nameToType(accessMethod);
- }
-
- /**
- * For testing purposes only.
- */
- IndexEntry(const BSONObj& kp,
- bool mk,
- bool sp,
- bool unq,
- const std::string& n,
- const MatchExpression* fe,
- const BSONObj& io)
- : keyPattern(kp),
- multikey(mk),
- sparse(sp),
- unique(unq),
- name(n),
- filterExpr(fe),
- infoObj(io) {
-
- type = IndexNames::nameToType(IndexNames::findPluginName(keyPattern));
- }
-
- /**
- * For testing purposes only.
- */
- IndexEntry(const BSONObj& kp)
- : keyPattern(kp),
- multikey(false),
- sparse(false),
- unique(false),
- name("test_foo"),
- filterExpr(nullptr),
- infoObj(BSONObj()) {
-
- type = IndexNames::nameToType(IndexNames::findPluginName(keyPattern));
- }
-
- BSONObj keyPattern;
-
- bool multikey;
-
- bool sparse;
-
- bool unique;
-
- std::string name;
-
- const MatchExpression* filterExpr;
-
- // Geo indices have extra parameters. We need those available to plan correctly.
- BSONObj infoObj;
-
- // What type of index is this? (What access method can we use on the index described
- // by the keyPattern?)
- IndexType type;
-
- std::string toString() const;
- };
+ IndexEntry(const BSONObj& kp,
+ bool mk,
+ bool sp,
+ bool unq,
+ const std::string& n,
+ const MatchExpression* fe,
+ const BSONObj& io)
+ : keyPattern(kp),
+ multikey(mk),
+ sparse(sp),
+ unique(unq),
+ name(n),
+ filterExpr(fe),
+ infoObj(io) {
+ type = IndexNames::nameToType(IndexNames::findPluginName(keyPattern));
+ }
+
+ /**
+ * For testing purposes only.
+ */
+ IndexEntry(const BSONObj& kp)
+ : keyPattern(kp),
+ multikey(false),
+ sparse(false),
+ unique(false),
+ name("test_foo"),
+ filterExpr(nullptr),
+ infoObj(BSONObj()) {
+ type = IndexNames::nameToType(IndexNames::findPluginName(keyPattern));
+ }
+
+ BSONObj keyPattern;
+
+ bool multikey;
+
+ bool sparse;
+
+ bool unique;
+
+ std::string name;
+
+ const MatchExpression* filterExpr;
+
+ // Geo indices have extra parameters. We need those available to plan correctly.
+ BSONObj infoObj;
+
+ // What type of index is this? (What access method can we use on the index described
+ // by the keyPattern?)
+ IndexType type;
+
+ std::string toString() const;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/index_tag.cpp b/src/mongo/db/query/index_tag.cpp
index 7b81a23f556..c1dceec6b02 100644
--- a/src/mongo/db/query/index_tag.cpp
+++ b/src/mongo/db/query/index_tag.cpp
@@ -35,81 +35,79 @@
namespace mongo {
- // TODO: Move out of the enumerator and into the planner.
-
- const size_t IndexTag::kNoIndex = std::numeric_limits<size_t>::max();
-
- void tagForSort(MatchExpression* tree) {
- if (!Indexability::nodeCanUseIndexOnOwnField(tree)) {
- size_t myTagValue = IndexTag::kNoIndex;
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- MatchExpression* child = tree->getChild(i);
- tagForSort(child);
- IndexTag* childTag = static_cast<IndexTag*>(child->getTag());
- if (NULL != childTag) {
- myTagValue = std::min(myTagValue, childTag->index);
- }
- }
- if (myTagValue != IndexTag::kNoIndex) {
- tree->setTag(new IndexTag(myTagValue));
+// TODO: Move out of the enumerator and into the planner.
+
+const size_t IndexTag::kNoIndex = std::numeric_limits<size_t>::max();
+
+void tagForSort(MatchExpression* tree) {
+ if (!Indexability::nodeCanUseIndexOnOwnField(tree)) {
+ size_t myTagValue = IndexTag::kNoIndex;
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ MatchExpression* child = tree->getChild(i);
+ tagForSort(child);
+ IndexTag* childTag = static_cast<IndexTag*>(child->getTag());
+ if (NULL != childTag) {
+ myTagValue = std::min(myTagValue, childTag->index);
}
}
+ if (myTagValue != IndexTag::kNoIndex) {
+ tree->setTag(new IndexTag(myTagValue));
+ }
}
+}
- bool TagComparison(const MatchExpression* lhs, const MatchExpression* rhs) {
- IndexTag* lhsTag = static_cast<IndexTag*>(lhs->getTag());
- size_t lhsValue = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->index;
- size_t lhsPos = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->pos;
+bool TagComparison(const MatchExpression* lhs, const MatchExpression* rhs) {
+ IndexTag* lhsTag = static_cast<IndexTag*>(lhs->getTag());
+ size_t lhsValue = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->index;
+ size_t lhsPos = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->pos;
- IndexTag* rhsTag = static_cast<IndexTag*>(rhs->getTag());
- size_t rhsValue = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->index;
- size_t rhsPos = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->pos;
+ IndexTag* rhsTag = static_cast<IndexTag*>(rhs->getTag());
+ size_t rhsValue = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->index;
+ size_t rhsPos = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->pos;
- // First, order on indices.
- if (lhsValue != rhsValue) {
- // This relies on kNoIndex being larger than every other possible index.
- return lhsValue < rhsValue;
- }
-
- // Next, order so that if there's a GEO_NEAR it's first.
- if (MatchExpression::GEO_NEAR == lhs->matchType()) {
- return true;
- }
- else if (MatchExpression::GEO_NEAR == rhs->matchType()) {
- return false;
- }
+ // First, order on indices.
+ if (lhsValue != rhsValue) {
+ // This relies on kNoIndex being larger than every other possible index.
+ return lhsValue < rhsValue;
+ }
- // Ditto text.
- if (MatchExpression::TEXT == lhs->matchType()) {
- return true;
- }
- else if (MatchExpression::TEXT == rhs->matchType()) {
- return false;
- }
+ // Next, order so that if there's a GEO_NEAR it's first.
+ if (MatchExpression::GEO_NEAR == lhs->matchType()) {
+ return true;
+ } else if (MatchExpression::GEO_NEAR == rhs->matchType()) {
+ return false;
+ }
- // Next, order so that the first field of a compound index appears first.
- if (lhsPos != rhsPos) {
- return lhsPos < rhsPos;
- }
+ // Ditto text.
+ if (MatchExpression::TEXT == lhs->matchType()) {
+ return true;
+ } else if (MatchExpression::TEXT == rhs->matchType()) {
+ return false;
+ }
- // Next, order on fields.
- int cmp = lhs->path().compare(rhs->path());
- if (0 != cmp) {
- return 0;
- }
+ // Next, order so that the first field of a compound index appears first.
+ if (lhsPos != rhsPos) {
+ return lhsPos < rhsPos;
+ }
- // Finally, order on expression type.
- return lhs->matchType() < rhs->matchType();
+ // Next, order on fields.
+ int cmp = lhs->path().compare(rhs->path());
+ if (0 != cmp) {
+ return 0;
}
- void sortUsingTags(MatchExpression* tree) {
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- sortUsingTags(tree->getChild(i));
- }
- std::vector<MatchExpression*>* children = tree->getChildVector();
- if (NULL != children) {
- std::sort(children->begin(), children->end(), TagComparison);
- }
+ // Finally, order on expression type.
+ return lhs->matchType() < rhs->matchType();
+}
+
+void sortUsingTags(MatchExpression* tree) {
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ sortUsingTags(tree->getChild(i));
+ }
+ std::vector<MatchExpression*>* children = tree->getChildVector();
+ if (NULL != children) {
+ std::sort(children->begin(), children->end(), TagComparison);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/index_tag.h b/src/mongo/db/query/index_tag.h
index 9fb115c818d..3980aa72f1b 100644
--- a/src/mongo/db/query/index_tag.h
+++ b/src/mongo/db/query/index_tag.h
@@ -35,98 +35,98 @@
namespace mongo {
- // output from enumerator to query planner
- class IndexTag : public MatchExpression::TagData {
- public:
- static const size_t kNoIndex;
-
- IndexTag() : index(kNoIndex), pos(0) {}
- IndexTag(size_t i) : index(i), pos(0) { }
- IndexTag(size_t i, size_t p) : index(i), pos(p) { }
-
- virtual ~IndexTag() { }
-
- virtual void debugString(StringBuilder* builder) const {
- *builder << " || Selected Index #" << index << " pos " << pos;
+// output from enumerator to query planner
+class IndexTag : public MatchExpression::TagData {
+public:
+ static const size_t kNoIndex;
+
+ IndexTag() : index(kNoIndex), pos(0) {}
+ IndexTag(size_t i) : index(i), pos(0) {}
+ IndexTag(size_t i, size_t p) : index(i), pos(p) {}
+
+ virtual ~IndexTag() {}
+
+ virtual void debugString(StringBuilder* builder) const {
+ *builder << " || Selected Index #" << index << " pos " << pos;
+ }
+
+ virtual MatchExpression::TagData* clone() const {
+ return new IndexTag(index, pos);
+ }
+
+ // What index should we try to use for this leaf?
+ size_t index;
+
+ // What position are we in the index? (Compound.)
+ size_t pos;
+};
+
+// used internally
+class RelevantTag : public MatchExpression::TagData {
+public:
+ RelevantTag() : elemMatchExpr(NULL), pathPrefix("") {}
+
+ std::vector<size_t> first;
+ std::vector<size_t> notFirst;
+
+ // We don't know the full path from a node unless we keep notes as we traverse from the
+ // root. We do this once and store it.
+ // TODO: Do a FieldRef / StringData pass.
+ // TODO: We might want this inside of the MatchExpression.
+ std::string path;
+
+ // Points to the innermost containing $elemMatch. If this tag is
+ // attached to an expression not contained in an $elemMatch, then
+ // 'elemMatchExpr' is NULL. Not owned here.
+ MatchExpression* elemMatchExpr;
+
+ // If not contained inside an elemMatch, 'pathPrefix' contains the
+ // part of 'path' prior to the first dot. For example, if 'path' is
+ // "a.b.c", then 'pathPrefix' is "a". If 'path' is just "a", then
+ // 'pathPrefix' is also "a".
+ //
+ // If tagging a predicate contained in an $elemMatch, 'pathPrefix'
+ // holds the prefix of the path *inside* the $elemMatch. If this
+ // tags predicate {a: {$elemMatch: {"b.c": {$gt: 1}}}}, then
+ // 'pathPrefix' is "b".
+ //
+ // Used by the plan enumerator to make sure that we never
+ // compound two predicates sharing a path prefix.
+ std::string pathPrefix;
+
+ virtual void debugString(StringBuilder* builder) const {
+ *builder << " || First: ";
+ for (size_t i = 0; i < first.size(); ++i) {
+ *builder << first[i] << " ";
}
-
- virtual MatchExpression::TagData* clone() const {
- return new IndexTag(index, pos);
+ *builder << "notFirst: ";
+ for (size_t i = 0; i < notFirst.size(); ++i) {
+ *builder << notFirst[i] << " ";
}
+ *builder << "full path: " << path;
+ }
- // What index should we try to use for this leaf?
- size_t index;
-
- // What position are we in the index? (Compound.)
- size_t pos;
- };
-
- // used internally
- class RelevantTag : public MatchExpression::TagData {
- public:
- RelevantTag() : elemMatchExpr(NULL), pathPrefix("") { }
-
- std::vector<size_t> first;
- std::vector<size_t> notFirst;
-
- // We don't know the full path from a node unless we keep notes as we traverse from the
- // root. We do this once and store it.
- // TODO: Do a FieldRef / StringData pass.
- // TODO: We might want this inside of the MatchExpression.
- std::string path;
-
- // Points to the innermost containing $elemMatch. If this tag is
- // attached to an expression not contained in an $elemMatch, then
- // 'elemMatchExpr' is NULL. Not owned here.
- MatchExpression* elemMatchExpr;
-
- // If not contained inside an elemMatch, 'pathPrefix' contains the
- // part of 'path' prior to the first dot. For example, if 'path' is
- // "a.b.c", then 'pathPrefix' is "a". If 'path' is just "a", then
- // 'pathPrefix' is also "a".
- //
- // If tagging a predicate contained in an $elemMatch, 'pathPrefix'
- // holds the prefix of the path *inside* the $elemMatch. If this
- // tags predicate {a: {$elemMatch: {"b.c": {$gt: 1}}}}, then
- // 'pathPrefix' is "b".
- //
- // Used by the plan enumerator to make sure that we never
- // compound two predicates sharing a path prefix.
- std::string pathPrefix;
-
- virtual void debugString(StringBuilder* builder) const {
- *builder << " || First: ";
- for (size_t i = 0; i < first.size(); ++i) {
- *builder << first[i] << " ";
- }
- *builder << "notFirst: ";
- for (size_t i = 0; i < notFirst.size(); ++i) {
- *builder << notFirst[i] << " ";
- }
- *builder << "full path: " << path;
- }
+ virtual MatchExpression::TagData* clone() const {
+ RelevantTag* ret = new RelevantTag();
+ ret->first = first;
+ ret->notFirst = notFirst;
+ return ret;
+ }
+};
- virtual MatchExpression::TagData* clone() const {
- RelevantTag* ret = new RelevantTag();
- ret->first = first;
- ret->notFirst = notFirst;
- return ret;
- }
- };
-
- /**
- * Tags each node of the tree with the lowest numbered index that the sub-tree rooted at that
- * node uses.
- *
- * Nodes that satisfy Indexability::nodeCanUseIndexOnOwnField are already tagged if there
- * exists an index that that node can use.
- */
- void tagForSort(MatchExpression* tree);
-
- /**
- * Sorts the tree using its IndexTag(s). Nodes that use the same index are adjacent to one
- * another.
- */
- void sortUsingTags(MatchExpression* tree);
-
-} // namespace mongo
+/**
+ * Tags each node of the tree with the lowest numbered index that the sub-tree rooted at that
+ * node uses.
+ *
+ * Nodes that satisfy Indexability::nodeCanUseIndexOnOwnField are already tagged if there
+ * exists an index that that node can use.
+ */
+void tagForSort(MatchExpression* tree);
+
+/**
+ * Sorts the tree using its IndexTag(s). Nodes that use the same index are adjacent to one
+ * another.
+ */
+void sortUsingTags(MatchExpression* tree);
+
+} // namespace mongo
diff --git a/src/mongo/db/query/indexability.h b/src/mongo/db/query/indexability.h
index f1f25ced1c6..a68bf3f328a 100644
--- a/src/mongo/db/query/indexability.h
+++ b/src/mongo/db/query/indexability.h
@@ -32,127 +32,123 @@
namespace mongo {
+/**
+ * Logic for how indices can be used with an expression.
+ */
+class Indexability {
+public:
/**
- * Logic for how indices can be used with an expression.
+ * Is an index over me->path() useful?
+ * This is the same thing as being sargable, if you have a RDBMS background.
*/
- class Indexability {
- public:
- /**
- * Is an index over me->path() useful?
- * This is the same thing as being sargable, if you have a RDBMS background.
- */
- static bool nodeCanUseIndexOnOwnField(const MatchExpression* me) {
- if (me->path().empty()) {
- return false;
- }
-
- if (arrayUsesIndexOnOwnField(me)) {
- return true;
- }
+ static bool nodeCanUseIndexOnOwnField(const MatchExpression* me) {
+ if (me->path().empty()) {
+ return false;
+ }
- return isIndexOnOwnFieldTypeNode(me);
+ if (arrayUsesIndexOnOwnField(me)) {
+ return true;
}
- /**
- * This array operator doesn't have any children with fields and can use an index.
- *
- * Example: a: {$elemMatch: {$gte: 1, $lte: 1}}.
- */
- static bool arrayUsesIndexOnOwnField(const MatchExpression* me) {
- if (!me->isArray()) {
- return false;
- }
+ return isIndexOnOwnFieldTypeNode(me);
+ }
- if (MatchExpression::ELEM_MATCH_VALUE != me->matchType()) {
- return false;
- }
+ /**
+ * This array operator doesn't have any children with fields and can use an index.
+ *
+ * Example: a: {$elemMatch: {$gte: 1, $lte: 1}}.
+ */
+ static bool arrayUsesIndexOnOwnField(const MatchExpression* me) {
+ if (!me->isArray()) {
+ return false;
+ }
+
+ if (MatchExpression::ELEM_MATCH_VALUE != me->matchType()) {
+ return false;
+ }
- // We have an ELEM_MATCH_VALUE expression. In order to be
- // considered "indexable" all children of the ELEM_MATCH_VALUE
- // must be "indexable" type expressions as well.
- for (size_t i = 0; i < me->numChildren(); i++) {
- MatchExpression* child = me->getChild(i);
-
- // Special case for NOT: If the child is a NOT, then it's the thing below
- // the NOT that we care about.
- if (MatchExpression::NOT == child->matchType()) {
- MatchExpression* notChild = child->getChild(0);
-
- if (MatchExpression::MOD == notChild->matchType() ||
- MatchExpression::REGEX == notChild->matchType() ||
- MatchExpression::TYPE_OPERATOR == notChild->matchType()) {
- // We can't index negations of this kind of expression node.
- return false;
- }
-
- // It's the child of the NOT that we check for indexability.
- if (!isIndexOnOwnFieldTypeNode(notChild)) {
- return false;
- }
-
- // Special handling for NOT has already been done; don't fall through.
- continue;
+ // We have an ELEM_MATCH_VALUE expression. In order to be
+ // considered "indexable" all children of the ELEM_MATCH_VALUE
+ // must be "indexable" type expressions as well.
+ for (size_t i = 0; i < me->numChildren(); i++) {
+ MatchExpression* child = me->getChild(i);
+
+ // Special case for NOT: If the child is a NOT, then it's the thing below
+ // the NOT that we care about.
+ if (MatchExpression::NOT == child->matchType()) {
+ MatchExpression* notChild = child->getChild(0);
+
+ if (MatchExpression::MOD == notChild->matchType() ||
+ MatchExpression::REGEX == notChild->matchType() ||
+ MatchExpression::TYPE_OPERATOR == notChild->matchType()) {
+ // We can't index negations of this kind of expression node.
+ return false;
}
- if (!isIndexOnOwnFieldTypeNode(child)) {
+ // It's the child of the NOT that we check for indexability.
+ if (!isIndexOnOwnFieldTypeNode(notChild)) {
return false;
}
+
+ // Special handling for NOT has already been done; don't fall through.
+ continue;
}
- // The entire ELEM_MATCH_VALUE is indexable since every one of its children
- // is indexable.
- return true;
+ if (!isIndexOnOwnFieldTypeNode(child)) {
+ return false;
+ }
}
- /**
- * Certain array operators require that the field for that operator is prepended
- * to all fields in that operator's children.
- *
- * Example: a: {$elemMatch: {b:1, c:1}}.
- */
- static bool arrayUsesIndexOnChildren(const MatchExpression* me) {
- return me->isArray() && MatchExpression::ELEM_MATCH_OBJECT == me->matchType();
- }
+ // The entire ELEM_MATCH_VALUE is indexable since every one of its children
+ // is indexable.
+ return true;
+ }
- /**
- * Returns true if 'me' is a NOT, and the child of the NOT can use
- * an index on its own field.
- */
- static bool isBoundsGeneratingNot(const MatchExpression* me) {
- return MatchExpression::NOT == me->matchType() &&
- nodeCanUseIndexOnOwnField(me->getChild(0));
- }
+ /**
+ * Certain array operators require that the field for that operator is prepended
+ * to all fields in that operator's children.
+ *
+ * Example: a: {$elemMatch: {b:1, c:1}}.
+ */
+ static bool arrayUsesIndexOnChildren(const MatchExpression* me) {
+ return me->isArray() && MatchExpression::ELEM_MATCH_OBJECT == me->matchType();
+ }
- /**
- * Returns true if either 'me' is a bounds generating NOT,
- * or 'me' can use an index on its own field.
- */
- static bool isBoundsGenerating(const MatchExpression* me) {
- return isBoundsGeneratingNot(me) || nodeCanUseIndexOnOwnField(me);
- }
+ /**
+ * Returns true if 'me' is a NOT, and the child of the NOT can use
+ * an index on its own field.
+ */
+ static bool isBoundsGeneratingNot(const MatchExpression* me) {
+ return MatchExpression::NOT == me->matchType() &&
+ nodeCanUseIndexOnOwnField(me->getChild(0));
+ }
- private:
- /**
- * Returns true if 'me' is "sargable" but is not a negation and
- * is not an array node such as ELEM_MATCH_VALUE.
- *
- * Used as a helper for nodeCanUseIndexOnOwnField().
- */
- static bool isIndexOnOwnFieldTypeNode(const MatchExpression* me) {
- return me->matchType() == MatchExpression::LTE
- || me->matchType() == MatchExpression::LT
- || me->matchType() == MatchExpression::EQ
- || me->matchType() == MatchExpression::GT
- || me->matchType() == MatchExpression::GTE
- || me->matchType() == MatchExpression::REGEX
- || me->matchType() == MatchExpression::MOD
- || me->matchType() == MatchExpression::MATCH_IN
- || me->matchType() == MatchExpression::TYPE_OPERATOR
- || me->matchType() == MatchExpression::GEO
- || me->matchType() == MatchExpression::GEO_NEAR
- || me->matchType() == MatchExpression::EXISTS
- || me->matchType() == MatchExpression::TEXT;
- }
- };
+ /**
+ * Returns true if either 'me' is a bounds generating NOT,
+ * or 'me' can use an index on its own field.
+ */
+ static bool isBoundsGenerating(const MatchExpression* me) {
+ return isBoundsGeneratingNot(me) || nodeCanUseIndexOnOwnField(me);
+ }
+
+private:
+ /**
+ * Returns true if 'me' is "sargable" but is not a negation and
+ * is not an array node such as ELEM_MATCH_VALUE.
+ *
+ * Used as a helper for nodeCanUseIndexOnOwnField().
+ */
+ static bool isIndexOnOwnFieldTypeNode(const MatchExpression* me) {
+ return me->matchType() == MatchExpression::LTE || me->matchType() == MatchExpression::LT ||
+ me->matchType() == MatchExpression::EQ || me->matchType() == MatchExpression::GT ||
+ me->matchType() == MatchExpression::GTE || me->matchType() == MatchExpression::REGEX ||
+ me->matchType() == MatchExpression::MOD ||
+ me->matchType() == MatchExpression::MATCH_IN ||
+ me->matchType() == MatchExpression::TYPE_OPERATOR ||
+ me->matchType() == MatchExpression::GEO ||
+ me->matchType() == MatchExpression::GEO_NEAR ||
+ me->matchType() == MatchExpression::EXISTS || me->matchType() == MatchExpression::TEXT;
+ }
+};
} // namespace mongo
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index 9a38020d00a..fe44395f021 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -40,91 +40,80 @@
namespace mongo {
- // static
- PlanExecutor* InternalPlanner::collectionScan(OperationContext* txn,
- StringData ns,
- Collection* collection,
- const Direction direction,
- const RecordId startLoc) {
- WorkingSet* ws = new WorkingSet();
-
- if (NULL == collection) {
- EOFStage* eof = new EOFStage();
- PlanExecutor* exec;
- // Takes ownership of 'ws' and 'eof'.
- Status execStatus = PlanExecutor::make(txn,
- ws,
- eof,
- ns.toString(),
- PlanExecutor::YIELD_MANUAL,
- &exec);
- invariant(execStatus.isOK());
- return exec;
- }
-
- invariant( ns == collection->ns().ns() );
-
- CollectionScanParams params;
- params.collection = collection;
- params.start = startLoc;
-
- if (FORWARD == direction) {
- params.direction = CollectionScanParams::FORWARD;
- }
- else {
- params.direction = CollectionScanParams::BACKWARD;
- }
-
- CollectionScan* cs = new CollectionScan(txn, params, ws, NULL);
+// static
+PlanExecutor* InternalPlanner::collectionScan(OperationContext* txn,
+ StringData ns,
+ Collection* collection,
+ const Direction direction,
+ const RecordId startLoc) {
+ WorkingSet* ws = new WorkingSet();
+
+ if (NULL == collection) {
+ EOFStage* eof = new EOFStage();
PlanExecutor* exec;
- // Takes ownership of 'ws' and 'cs'.
- Status execStatus = PlanExecutor::make(txn,
- ws,
- cs,
- collection,
- PlanExecutor::YIELD_MANUAL,
- &exec);
+ // Takes ownership of 'ws' and 'eof'.
+ Status execStatus =
+ PlanExecutor::make(txn, ws, eof, ns.toString(), PlanExecutor::YIELD_MANUAL, &exec);
invariant(execStatus.isOK());
return exec;
}
- // static
- PlanExecutor* InternalPlanner::indexScan(OperationContext* txn,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey, const BSONObj& endKey,
- bool endKeyInclusive, Direction direction,
- int options) {
- invariant(collection);
- invariant(descriptor);
-
- IndexScanParams params;
- params.descriptor = descriptor;
- params.direction = direction;
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = startKey;
- params.bounds.endKey = endKey;
- params.bounds.endKeyInclusive = endKeyInclusive;
-
- WorkingSet* ws = new WorkingSet();
- IndexScan* ix = new IndexScan(txn, params, ws, NULL);
-
- PlanStage* root = ix;
-
- if (IXSCAN_FETCH & options) {
- root = new FetchStage(txn, ws, root, NULL, collection);
- }
+ invariant(ns == collection->ns().ns());
- PlanExecutor* exec;
- // Takes ownership of 'ws' and 'root'.
- Status execStatus = PlanExecutor::make(txn,
- ws,
- root,
- collection,
- PlanExecutor::YIELD_MANUAL,
- &exec);
- invariant(execStatus.isOK());
- return exec;
+ CollectionScanParams params;
+ params.collection = collection;
+ params.start = startLoc;
+
+ if (FORWARD == direction) {
+ params.direction = CollectionScanParams::FORWARD;
+ } else {
+ params.direction = CollectionScanParams::BACKWARD;
}
+ CollectionScan* cs = new CollectionScan(txn, params, ws, NULL);
+ PlanExecutor* exec;
+ // Takes ownership of 'ws' and 'cs'.
+ Status execStatus =
+ PlanExecutor::make(txn, ws, cs, collection, PlanExecutor::YIELD_MANUAL, &exec);
+ invariant(execStatus.isOK());
+ return exec;
+}
+
+// static
+PlanExecutor* InternalPlanner::indexScan(OperationContext* txn,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ bool endKeyInclusive,
+ Direction direction,
+ int options) {
+ invariant(collection);
+ invariant(descriptor);
+
+ IndexScanParams params;
+ params.descriptor = descriptor;
+ params.direction = direction;
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = startKey;
+ params.bounds.endKey = endKey;
+ params.bounds.endKeyInclusive = endKeyInclusive;
+
+ WorkingSet* ws = new WorkingSet();
+ IndexScan* ix = new IndexScan(txn, params, ws, NULL);
+
+ PlanStage* root = ix;
+
+ if (IXSCAN_FETCH & options) {
+ root = new FetchStage(txn, ws, root, NULL, collection);
+ }
+
+ PlanExecutor* exec;
+ // Takes ownership of 'ws' and 'root'.
+ Status execStatus =
+ PlanExecutor::make(txn, ws, root, collection, PlanExecutor::YIELD_MANUAL, &exec);
+ invariant(execStatus.isOK());
+ return exec;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index d9e763828ca..3b21e3a4f1e 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -33,52 +33,53 @@
namespace mongo {
- class BSONObj;
- class Collection;
- class IndexDescriptor;
- class OperationContext;
- class PlanExecutor;
+class BSONObj;
+class Collection;
+class IndexDescriptor;
+class OperationContext;
+class PlanExecutor;
- /**
- * The internal planner is a one-stop shop for "off-the-shelf" plans. Most internal procedures
- * that do not require advanced queries could be served by plans already in here.
- */
- class InternalPlanner {
- public:
- enum Direction {
- FORWARD = 1,
- BACKWARD = -1,
- };
-
- enum IndexScanOptions {
- // The client is interested in the default outputs of an index scan: BSONObj of the key,
- // RecordId of the record that's indexed. The client does its own fetching if required.
- IXSCAN_DEFAULT = 0,
+/**
+ * The internal planner is a one-stop shop for "off-the-shelf" plans. Most internal procedures
+ * that do not require advanced queries could be served by plans already in here.
+ */
+class InternalPlanner {
+public:
+ enum Direction {
+ FORWARD = 1,
+ BACKWARD = -1,
+ };
- // The client wants the fetched object and the RecordId that refers to it. Delegating
- // the fetch to the runner allows fetching outside of a lock.
- IXSCAN_FETCH = 1,
- };
+ enum IndexScanOptions {
+ // The client is interested in the default outputs of an index scan: BSONObj of the key,
+ // RecordId of the record that's indexed. The client does its own fetching if required.
+ IXSCAN_DEFAULT = 0,
- /**
- * Return a collection scan. Caller owns pointer.
- */
- static PlanExecutor* collectionScan(OperationContext* txn,
- StringData ns,
- Collection* collection,
- const Direction direction = FORWARD,
- const RecordId startLoc = RecordId());
+ // The client wants the fetched object and the RecordId that refers to it. Delegating
+ // the fetch to the runner allows fetching outside of a lock.
+ IXSCAN_FETCH = 1,
+ };
- /**
- * Return an index scan. Caller owns returned pointer.
- */
- static PlanExecutor* indexScan(OperationContext* txn,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey, const BSONObj& endKey,
- bool endKeyInclusive, Direction direction = FORWARD,
- int options = 0);
+ /**
+ * Return a collection scan. Caller owns pointer.
+ */
+ static PlanExecutor* collectionScan(OperationContext* txn,
+ StringData ns,
+ Collection* collection,
+ const Direction direction = FORWARD,
+ const RecordId startLoc = RecordId());
- };
+ /**
+ * Return an index scan. Caller owns returned pointer.
+ */
+ static PlanExecutor* indexScan(OperationContext* txn,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ bool endKeyInclusive,
+ Direction direction = FORWARD,
+ int options = 0);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/interval.cpp b/src/mongo/db/query/interval.cpp
index cdbb7abbb6d..2f35dc2eccb 100644
--- a/src/mongo/db/query/interval.cpp
+++ b/src/mongo/db/query/interval.cpp
@@ -30,167 +30,164 @@
namespace mongo {
- using std::string;
-
- Interval::Interval()
- : _intervalData(BSONObj()), start(BSONElement()), startInclusive(false), end(BSONElement()),
- endInclusive(false) { }
-
- Interval::Interval(BSONObj base, bool si, bool ei) {
- init(base, si, ei);
- }
-
- void Interval::init(BSONObj base, bool si, bool ei) {
- verify(base.nFields() >= 2);
-
- _intervalData = base.getOwned();
- BSONObjIterator it(_intervalData);
- start = it.next();
- end = it.next();
- startInclusive = si;
- endInclusive = ei;
+using std::string;
+
+Interval::Interval()
+ : _intervalData(BSONObj()),
+ start(BSONElement()),
+ startInclusive(false),
+ end(BSONElement()),
+ endInclusive(false) {}
+
+Interval::Interval(BSONObj base, bool si, bool ei) {
+ init(base, si, ei);
+}
+
+void Interval::init(BSONObj base, bool si, bool ei) {
+ verify(base.nFields() >= 2);
+
+ _intervalData = base.getOwned();
+ BSONObjIterator it(_intervalData);
+ start = it.next();
+ end = it.next();
+ startInclusive = si;
+ endInclusive = ei;
+}
+
+bool Interval::isEmpty() const {
+ return _intervalData.nFields() == 0;
+}
+
+bool Interval::isPoint() const {
+ return startInclusive && endInclusive && 0 == start.woCompare(end, false);
+}
+
+bool Interval::isNull() const {
+ return (!startInclusive || !endInclusive) && 0 == start.woCompare(end, false);
+}
+
+//
+// Comparison
+//
+
+bool Interval::equals(const Interval& other) const {
+ if (this->startInclusive != other.startInclusive) {
+ return false;
}
- bool Interval::isEmpty() const {
- return _intervalData.nFields() == 0;
+ if (this->endInclusive != other.endInclusive) {
+ return false;
}
- bool Interval::isPoint() const {
- return startInclusive && endInclusive && 0 == start.woCompare(end, false);
+ int res = this->start.woCompare(other.start, false);
+ if (res != 0) {
+ return false;
}
- bool Interval::isNull() const {
- return (!startInclusive || !endInclusive) && 0 == start.woCompare(end, false);
+ res = this->end.woCompare(other.end, false);
+ if (res != 0) {
+ return false;
}
- //
- // Comparison
- //
-
- bool Interval::equals(const Interval& other) const {
- if (this->startInclusive != other.startInclusive) {
- return false;
- }
+ return true;
+}
- if (this->endInclusive != other.endInclusive) {
- return false;
- }
+bool Interval::intersects(const Interval& other) const {
+ int res = this->start.woCompare(other.end, false);
+ if (res > 0) {
+ return false;
+ } else if (res == 0 && (!this->startInclusive || !other.endInclusive)) {
+ return false;
+ }
- int res = this->start.woCompare(other.start, false);
- if (res != 0) {
- return false;
- }
+ res = other.start.woCompare(this->end, false);
+ if (res > 0) {
+ return false;
+ } else if (res == 0 && (!other.startInclusive || !this->endInclusive)) {
+ return false;
+ }
- res = this->end.woCompare(other.end, false);
- if (res != 0) {
- return false;
- }
+ return true;
+}
- return true;
+bool Interval::within(const Interval& other) const {
+ int res = this->start.woCompare(other.start, false);
+ if (res < 0) {
+ return false;
+ } else if (res == 0 && this->startInclusive && !other.startInclusive) {
+ return false;
}
- bool Interval::intersects(const Interval& other) const {
- int res = this->start.woCompare(other.end, false);
- if (res > 0) {
- return false;
- }
- else if (res == 0 && (!this->startInclusive || !other.endInclusive)) {
- return false;
- }
+ res = this->end.woCompare(other.end, false);
+ if (res > 0) {
+ return false;
+ } else if (res == 0 && this->endInclusive && !other.endInclusive) {
+ return false;
+ }
- res = other.start.woCompare(this->end, false);
- if (res > 0) {
- return false;
- }
- else if (res == 0 && (!other.startInclusive || !this->endInclusive)) {
- return false;
- }
+ return true;
+}
+/** Returns true if the start of comes before the start of other */
+bool Interval::precedes(const Interval& other) const {
+ int res = this->start.woCompare(other.start, false);
+ if (res < 0) {
+ return true;
+ } else if (res == 0 && this->startInclusive && !other.startInclusive) {
return true;
}
+ return false;
+}
- bool Interval::within(const Interval& other) const {
- int res = this->start.woCompare(other.start, false);
- if (res < 0) {
- return false;
- }
- else if (res == 0 && this->startInclusive && !other.startInclusive) {
- return false;
- }
-
- res = this->end.woCompare(other.end, false);
- if (res > 0) {
- return false;
- }
- else if (res == 0 && this->endInclusive && !other.endInclusive) {
- return false;
- }
- return true;
- }
+Interval::IntervalComparison Interval::compare(const Interval& other) const {
+ //
+ // Intersect cases
+ //
- /** Returns true if the start of comes before the start of other */
- bool Interval::precedes(const Interval& other) const {
- int res = this->start.woCompare(other.start, false);
- if (res < 0) {
- return true;
+ if (this->intersects(other)) {
+ if (this->equals(other)) {
+ return INTERVAL_EQUALS;
}
- else if (res == 0 && this->startInclusive && !other.startInclusive) {
- return true;
+ if (this->within(other)) {
+ return INTERVAL_WITHIN;
}
- return false;
- }
-
-
- Interval::IntervalComparison Interval::compare(const Interval& other) const {
- //
- // Intersect cases
- //
-
- if (this->intersects(other)) {
- if (this->equals(other)) {
- return INTERVAL_EQUALS;
- }
- if (this->within(other)) {
- return INTERVAL_WITHIN;
- }
- if (other.within(*this)) {
- return INTERVAL_CONTAINS;
- }
- if (this->precedes(other)) {
- return INTERVAL_OVERLAPS_BEFORE;
- }
- return INTERVAL_OVERLAPS_AFTER;
+ if (other.within(*this)) {
+ return INTERVAL_CONTAINS;
}
-
- //
- // Non-intersect cases
- //
-
if (this->precedes(other)) {
- // It's not possible for both endInclusive and other.startInclusive to be true because
- // the bounds would intersect. Refer to section on "Intersect cases" above.
- if ((endInclusive || other.startInclusive) && 0 == end.woCompare(other.start, false)) {
- return INTERVAL_PRECEDES_COULD_UNION;
- }
- return INTERVAL_PRECEDES;
+ return INTERVAL_OVERLAPS_BEFORE;
}
-
- return INTERVAL_SUCCEEDS;
+ return INTERVAL_OVERLAPS_AFTER;
}
//
- // Mutation: Union and Intersection
+ // Non-intersect cases
//
- void Interval::intersect(const Interval& other, IntervalComparison cmp) {
- if (cmp == INTERVAL_UNKNOWN) {
- cmp = this->compare(other);
+ if (this->precedes(other)) {
+ // It's not possible for both endInclusive and other.startInclusive to be true because
+ // the bounds would intersect. Refer to section on "Intersect cases" above.
+ if ((endInclusive || other.startInclusive) && 0 == end.woCompare(other.start, false)) {
+ return INTERVAL_PRECEDES_COULD_UNION;
}
+ return INTERVAL_PRECEDES;
+ }
- BSONObjBuilder builder;
- switch (cmp) {
+ return INTERVAL_SUCCEEDS;
+}
+//
+// Mutation: Union and Intersection
+//
+
+void Interval::intersect(const Interval& other, IntervalComparison cmp) {
+ if (cmp == INTERVAL_UNKNOWN) {
+ cmp = this->compare(other);
+ }
+
+ BSONObjBuilder builder;
+ switch (cmp) {
case INTERVAL_EQUALS:
case INTERVAL_WITHIN:
break;
@@ -220,17 +217,16 @@ namespace mongo {
default:
verify(false);
- }
}
+}
- void Interval::combine(const Interval& other, IntervalComparison cmp) {
- if (cmp == INTERVAL_UNKNOWN) {
- cmp = this->compare(other);
- }
-
- BSONObjBuilder builder;
- switch (cmp) {
+void Interval::combine(const Interval& other, IntervalComparison cmp) {
+ if (cmp == INTERVAL_UNKNOWN) {
+ cmp = this->compare(other);
+ }
+ BSONObjBuilder builder;
+ switch (cmp) {
case INTERVAL_EQUALS:
case INTERVAL_CONTAINS:
break;
@@ -257,62 +253,62 @@ namespace mongo {
default:
verify(false);
- }
- }
-
- void Interval::reverse() {
- std::swap(start, end);
- std::swap(startInclusive, endInclusive);
}
+}
- //
- // Debug info
- //
+void Interval::reverse() {
+ std::swap(start, end);
+ std::swap(startInclusive, endInclusive);
+}
- // static
- string Interval::cmpstr(IntervalComparison c) {
- if (c == INTERVAL_EQUALS) {
- return "INTERVAL_EQUALS";
- }
+//
+// Debug info
+//
- // 'this' contains the other interval.
- if (c == INTERVAL_CONTAINS) {
- return "INTERVAL_CONTAINS";
- }
+// static
+string Interval::cmpstr(IntervalComparison c) {
+ if (c == INTERVAL_EQUALS) {
+ return "INTERVAL_EQUALS";
+ }
- // 'this' is contained by the other interval.
- if (c == INTERVAL_WITHIN) {
- return "INTERVAL_WITHIN";
- }
+ // 'this' contains the other interval.
+ if (c == INTERVAL_CONTAINS) {
+ return "INTERVAL_CONTAINS";
+ }
- // The two intervals intersect and 'this' is before the other interval.
- if (c == INTERVAL_OVERLAPS_BEFORE) {
- return "INTERVAL_OVERLAPS_BEFORE";
- }
+ // 'this' is contained by the other interval.
+ if (c == INTERVAL_WITHIN) {
+ return "INTERVAL_WITHIN";
+ }
- // The two intervals intersect and 'this is after the other interval.
- if (c == INTERVAL_OVERLAPS_AFTER) {
- return "INTERVAL_OVERLAPS_AFTER";
- }
+ // The two intervals intersect and 'this' is before the other interval.
+ if (c == INTERVAL_OVERLAPS_BEFORE) {
+ return "INTERVAL_OVERLAPS_BEFORE";
+ }
- // There is no intersection.
- if (c == INTERVAL_PRECEDES) {
- return "INTERVAL_PRECEDES";
- }
+ // The two intervals intersect and 'this is after the other interval.
+ if (c == INTERVAL_OVERLAPS_AFTER) {
+ return "INTERVAL_OVERLAPS_AFTER";
+ }
- if (c == INTERVAL_PRECEDES_COULD_UNION) {
- return "INTERVAL_PRECEDES_COULD_UNION";
- }
+ // There is no intersection.
+ if (c == INTERVAL_PRECEDES) {
+ return "INTERVAL_PRECEDES";
+ }
- if (c == INTERVAL_SUCCEEDS) {
- return "INTERVAL_SUCCEEDS";
- }
+ if (c == INTERVAL_PRECEDES_COULD_UNION) {
+ return "INTERVAL_PRECEDES_COULD_UNION";
+ }
- if (c == INTERVAL_UNKNOWN) {
- return "INTERVAL_UNKNOWN";
- }
+ if (c == INTERVAL_SUCCEEDS) {
+ return "INTERVAL_SUCCEEDS";
+ }
- return "NO IDEA DUDE";
+ if (c == INTERVAL_UNKNOWN) {
+ return "INTERVAL_UNKNOWN";
}
-} // namespace mongo
+ return "NO IDEA DUDE";
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/interval.h b/src/mongo/db/query/interval.h
index e4f82da8088..88309d33e05 100644
--- a/src/mongo/db/query/interval.h
+++ b/src/mongo/db/query/interval.h
@@ -33,164 +33,161 @@
namespace mongo {
- /** A range of values for one field. */
- struct Interval {
-
- // No BSONValue means we have to keep a BSONObj and pointers (BSONElement) into it.
- // 'start' may not point at the first field in _intervalData.
- // 'end' may not point at the last field in _intervalData.
- // 'start' and 'end' may point at the same field.
- BSONObj _intervalData;
-
- // Start and End must be ordered according to the index order.
- BSONElement start;
- bool startInclusive;
-
- BSONElement end;
- bool endInclusive;
-
- /** Creates an empty interval */
- Interval();
-
- std::string toString() const {
- mongoutils::str::stream ss;
- if (startInclusive) {
- ss << "[";
- }
- else {
- ss << "(";
- }
- // false means omit the field name
- ss << start.toString(false);
- ss << ", ";
- ss << end.toString(false);
- if (endInclusive) {
- ss << "]";
- }
- else {
- ss << ")";
- }
- return ss;
+/** A range of values for one field. */
+struct Interval {
+ // No BSONValue means we have to keep a BSONObj and pointers (BSONElement) into it.
+ // 'start' may not point at the first field in _intervalData.
+ // 'end' may not point at the last field in _intervalData.
+ // 'start' and 'end' may point at the same field.
+ BSONObj _intervalData;
+
+ // Start and End must be ordered according to the index order.
+ BSONElement start;
+ bool startInclusive;
+
+ BSONElement end;
+ bool endInclusive;
+
+ /** Creates an empty interval */
+ Interval();
+
+ std::string toString() const {
+ mongoutils::str::stream ss;
+ if (startInclusive) {
+ ss << "[";
+ } else {
+ ss << "(";
}
+ // false means omit the field name
+ ss << start.toString(false);
+ ss << ", ";
+ ss << end.toString(false);
+ if (endInclusive) {
+ ss << "]";
+ } else {
+ ss << ")";
+ }
+ return ss;
+ }
- /**
- * Creates an interval that starts at the first field of 'base' and ends at the second
- * field of 'base'. (In other words, 'base' is a bsonobj with at least two elements, of
- * which we don't care about field names.)
- *
- * The interval's extremities are closed or not depending on whether
- * 'start'/'endIncluded' are true or not.
- */
- Interval(BSONObj base, bool startIncluded, bool endIncluded);
-
- /** Sets the current interval to the given values (see constructor) */
- void init(BSONObj base, bool startIncluded, bool endIncluded);
-
- /**
- * Returns true if an empty-constructed interval hasn't been init()-ialized yet
- */
- bool isEmpty() const;
-
- /**
- * Does this interval represent exactly one point?
- */
- bool isPoint() const;
-
- /**
- * Returns true if start is same as end and interval is open at either end
- */
- bool isNull() const;
-
+ /**
+ * Creates an interval that starts at the first field of 'base' and ends at the second
+ * field of 'base'. (In other words, 'base' is a bsonobj with at least two elements, of
+ * which we don't care about field names.)
+ *
+ * The interval's extremities are closed or not depending on whether
+ * 'start'/'endIncluded' are true or not.
+ */
+ Interval(BSONObj base, bool startIncluded, bool endIncluded);
+
+ /** Sets the current interval to the given values (see constructor) */
+ void init(BSONObj base, bool startIncluded, bool endIncluded);
+
+ /**
+ * Returns true if an empty-constructed interval hasn't been init()-ialized yet
+ */
+ bool isEmpty() const;
+
+ /**
+ * Does this interval represent exactly one point?
+ */
+ bool isPoint() const;
+
+ /**
+ * Returns true if start is same as end and interval is open at either end
+ */
+ bool isNull() const;
+
+ //
+ // Comparison with other intervals
+ //
+
+ /**
+ * Returns true if 'this' is the same interval as 'other'
+ */
+ bool equals(const Interval& other) const;
+
+ /**
+ * Returns true if 'this' overlaps with 'other', false otherwise.
+ */
+ bool intersects(const Interval& rhs) const;
+
+ /**
+ * Returns true if 'this' is within 'other', false otherwise.
+ */
+ bool within(const Interval& other) const;
+
+ /**
+ * Returns true if 'this' is located before 'other', false otherwise.
+ */
+ bool precedes(const Interval& other) const;
+
+ /** Returns how 'this' compares to 'other' */
+ enum IntervalComparison {
//
- // Comparison with other intervals
+ // There is some intersection.
//
- /**
- * Returns true if 'this' is the same interval as 'other'
- */
- bool equals(const Interval& other) const;
-
- /**
- * Returns true if 'this' overlaps with 'other', false otherwise.
- */
- bool intersects(const Interval& rhs) const;
+ // The two intervals are *exactly* equal.
+ INTERVAL_EQUALS,
- /**
- * Returns true if 'this' is within 'other', false otherwise.
- */
- bool within(const Interval& other) const;
+ // 'this' contains the other interval.
+ INTERVAL_CONTAINS,
- /**
- * Returns true if 'this' is located before 'other', false otherwise.
- */
- bool precedes(const Interval& other) const;
+ // 'this' is contained by the other interval.
+ INTERVAL_WITHIN,
- /** Returns how 'this' compares to 'other' */
- enum IntervalComparison {
- //
- // There is some intersection.
- //
+ // The two intervals intersect and 'this' is before the other interval.
+ INTERVAL_OVERLAPS_BEFORE,
- // The two intervals are *exactly* equal.
- INTERVAL_EQUALS,
+ // The two intervals intersect and 'this is after the other interval.
+ INTERVAL_OVERLAPS_AFTER,
- // 'this' contains the other interval.
- INTERVAL_CONTAINS,
-
- // 'this' is contained by the other interval.
- INTERVAL_WITHIN,
-
- // The two intervals intersect and 'this' is before the other interval.
- INTERVAL_OVERLAPS_BEFORE,
+ //
+ // There is no intersection.
+ //
- // The two intervals intersect and 'this is after the other interval.
- INTERVAL_OVERLAPS_AFTER,
+ INTERVAL_PRECEDES,
- //
- // There is no intersection.
- //
+ // This happens if we have [a,b) [b,c]
+ INTERVAL_PRECEDES_COULD_UNION,
- INTERVAL_PRECEDES,
+ INTERVAL_SUCCEEDS,
- // This happens if we have [a,b) [b,c]
- INTERVAL_PRECEDES_COULD_UNION,
+ INTERVAL_UNKNOWN
+ };
- INTERVAL_SUCCEEDS,
+ IntervalComparison compare(const Interval& other) const;
- INTERVAL_UNKNOWN
- };
+ /**
+ * toString for IntervalComparison
+ */
+ static std::string cmpstr(IntervalComparison c);
- IntervalComparison compare(const Interval& other) const;
+ //
+ // Mutation of intervals
+ //
- /**
- * toString for IntervalComparison
- */
- static std::string cmpstr(IntervalComparison c);
+ /**
+ * Swap start and end points of interval.
+ */
+ void reverse();
- //
- // Mutation of intervals
- //
+ /**
+ * Updates 'this' with the intersection of 'this' and 'other'. If 'this' and 'other'
+ * have been compare()d before, that result can be optionally passed in 'cmp'
+ */
+ void intersect(const Interval& other, IntervalComparison cmp = INTERVAL_UNKNOWN);
- /**
- * Swap start and end points of interval.
- */
- void reverse();
-
- /**
- * Updates 'this' with the intersection of 'this' and 'other'. If 'this' and 'other'
- * have been compare()d before, that result can be optionally passed in 'cmp'
- */
- void intersect(const Interval& other, IntervalComparison cmp = INTERVAL_UNKNOWN);
-
- /**
- * Updates 'this" with the union of 'this' and 'other'. If 'this' and 'other' have
- * been compare()d before, that result can be optionaly passed in 'cmp'.
- */
- void combine(const Interval& other, IntervalComparison cmp = INTERVAL_UNKNOWN);
- };
+ /**
+ * Updates 'this" with the union of 'this' and 'other'. If 'this' and 'other' have
+ * been compare()d before, that result can be optionaly passed in 'cmp'.
+ */
+ void combine(const Interval& other, IntervalComparison cmp = INTERVAL_UNKNOWN);
+};
- inline bool operator==(const Interval& lhs, const Interval& rhs) {
- return lhs.compare(rhs) == Interval::INTERVAL_EQUALS;
- }
+inline bool operator==(const Interval& lhs, const Interval& rhs) {
+ return lhs.compare(rhs) == Interval::INTERVAL_EQUALS;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/interval_test.cpp b/src/mongo/db/query/interval_test.cpp
index 62c4b815e53..d9e829a254b 100644
--- a/src/mongo/db/query/interval_test.cpp
+++ b/src/mongo/db/query/interval_test.cpp
@@ -33,264 +33,264 @@
namespace {
- using mongo::BSONObj;
- using mongo::Interval;
+using mongo::BSONObj;
+using mongo::Interval;
- //
- // Comparison
- //
+//
+// Comparison
+//
- TEST(Comparison, Equality) {
- Interval a(BSON("" << 0 << "" << 10), true, true);
- ASSERT_EQUALS(a.compare(a), Interval::INTERVAL_EQUALS);
+TEST(Comparison, Equality) {
+ Interval a(BSON("" << 0 << "" << 10), true, true);
+ ASSERT_EQUALS(a.compare(a), Interval::INTERVAL_EQUALS);
- Interval b(BSON("" << 0 << "" << 10), true, false);
- ASSERT_NOT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
+ Interval b(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_NOT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
- Interval c(BSON("" << 0 << "" << 10), false, true);
- ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_EQUALS);
+ Interval c(BSON("" << 0 << "" << 10), false, true);
+ ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_EQUALS);
- Interval d(BSON("" << 0 << "" << 11), true, true);
- ASSERT_NOT_EQUALS(a.compare(d), Interval::INTERVAL_EQUALS);
+ Interval d(BSON("" << 0 << "" << 11), true, true);
+ ASSERT_NOT_EQUALS(a.compare(d), Interval::INTERVAL_EQUALS);
- Interval e(BSON("" << 1 << "" << 10), true, true);
- ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_EQUALS);
- }
+ Interval e(BSON("" << 1 << "" << 10), true, true);
+ ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_EQUALS);
+}
- TEST(Comparison, Contains) {
- Interval a(BSON("" << 0 << "" << 10), true, true);
- Interval b(BSON("" << 1 << "" << 9), true, true);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_CONTAINS);
+TEST(Comparison, Contains) {
+ Interval a(BSON("" << 0 << "" << 10), true, true);
+ Interval b(BSON("" << 1 << "" << 9), true, true);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_CONTAINS);
- Interval c(BSON("" << 0 << "" << 10), true, false);
- ASSERT_EQUALS(a.compare(c), Interval::INTERVAL_CONTAINS);
+ Interval c(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_EQUALS(a.compare(c), Interval::INTERVAL_CONTAINS);
- Interval d(BSON("" << 0 << "" << 10), false, true);
- ASSERT_EQUALS(a.compare(d), Interval::INTERVAL_CONTAINS);
+ Interval d(BSON("" << 0 << "" << 10), false, true);
+ ASSERT_EQUALS(a.compare(d), Interval::INTERVAL_CONTAINS);
- Interval e(BSON("" << 0 << "" << 11), false, true);
- ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_CONTAINS);
- }
+ Interval e(BSON("" << 0 << "" << 11), false, true);
+ ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_CONTAINS);
+}
- TEST(Comparison, Within) {
- Interval a(BSON("" << 0 << "" << 10), true, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_WITHIN);
+TEST(Comparison, Within) {
+ Interval a(BSON("" << 0 << "" << 10), true, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_WITHIN);
- Interval b(BSON("" << 1 << "" << 9), true, true);
- ASSERT_EQUALS(b.compare(a), Interval::INTERVAL_WITHIN);
+ Interval b(BSON("" << 1 << "" << 9), true, true);
+ ASSERT_EQUALS(b.compare(a), Interval::INTERVAL_WITHIN);
- Interval c(BSON("" << 0 << "" << 10), true, false);
- ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_WITHIN);
+ Interval c(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_WITHIN);
- Interval d(BSON("" << 0 << "" << 10), false, true);
- ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_WITHIN);
+ Interval d(BSON("" << 0 << "" << 10), false, true);
+ ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_WITHIN);
- Interval e(BSON("" << 0 << "" << 11), false, true);
- ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_CONTAINS);
- }
+ Interval e(BSON("" << 0 << "" << 11), false, true);
+ ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_CONTAINS);
+}
- TEST(Comparison, OverlapsBefore) {
- Interval a(BSON("" << 1 << "" << 9), true, false);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_OVERLAPS_BEFORE);
+TEST(Comparison, OverlapsBefore) {
+ Interval a(BSON("" << 1 << "" << 9), true, false);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval b(BSON("" << 1 << "" << 9), false, true);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_OVERLAPS_BEFORE);
+ Interval b(BSON("" << 1 << "" << 9), false, true);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval c(BSON("" << 1 << "" << 9), false, false);
- ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_OVERLAPS_BEFORE);
+ Interval c(BSON("" << 1 << "" << 9), false, false);
+ ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval d(BSON("" << 2 << "" << 10), true, true);
- ASSERT_EQUALS(a.compare(d), Interval::INTERVAL_OVERLAPS_BEFORE);
+ Interval d(BSON("" << 2 << "" << 10), true, true);
+ ASSERT_EQUALS(a.compare(d), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval e(BSON("" << 0 << "" << 9), true, false);
- ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_OVERLAPS_BEFORE);
+ Interval e(BSON("" << 0 << "" << 9), true, false);
+ ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval f(BSON("" << 0 << "" << 8), true, false);
- ASSERT_NOT_EQUALS(a.compare(f), Interval::INTERVAL_OVERLAPS_BEFORE);
- }
+ Interval f(BSON("" << 0 << "" << 8), true, false);
+ ASSERT_NOT_EQUALS(a.compare(f), Interval::INTERVAL_OVERLAPS_BEFORE);
+}
- TEST(Comparison, OverlapsAfter) {
- Interval a(BSON("" << 1 << "" << 9), false, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_OVERLAPS_AFTER);
+TEST(Comparison, OverlapsAfter) {
+ Interval a(BSON("" << 1 << "" << 9), false, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_OVERLAPS_AFTER);
- Interval b(BSON("" << 1 << "" << 9), true, false);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_OVERLAPS_AFTER);
+ Interval b(BSON("" << 1 << "" << 9), true, false);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_OVERLAPS_AFTER);
- Interval c(BSON("" << 1 << "" << 9), true, true);
- ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_OVERLAPS_AFTER);
+ Interval c(BSON("" << 1 << "" << 9), true, true);
+ ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_OVERLAPS_AFTER);
- Interval d(BSON("" << 2 << "" << 10), true, true);
- ASSERT_NOT_EQUALS(a.compare(d), Interval::INTERVAL_OVERLAPS_AFTER);
+ Interval d(BSON("" << 2 << "" << 10), true, true);
+ ASSERT_NOT_EQUALS(a.compare(d), Interval::INTERVAL_OVERLAPS_AFTER);
- Interval e(BSON("" << 0 << "" << 9), true, false);
- ASSERT_EQUALS(a.compare(e), Interval::INTERVAL_OVERLAPS_AFTER);
- }
+ Interval e(BSON("" << 0 << "" << 9), true, false);
+ ASSERT_EQUALS(a.compare(e), Interval::INTERVAL_OVERLAPS_AFTER);
+}
- TEST(Comparison, Precedes) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_PRECEDES);
+TEST(Comparison, Precedes) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_PRECEDES);
- Interval b(BSON("" << 0 << "" << 10), true, true);
- ASSERT_NOT_EQUALS(b.compare(a), Interval::INTERVAL_PRECEDES);
+ Interval b(BSON("" << 0 << "" << 10), true, true);
+ ASSERT_NOT_EQUALS(b.compare(a), Interval::INTERVAL_PRECEDES);
- Interval c(BSON("" << 0 << "" << 10), true, false);
- ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_PRECEDES_COULD_UNION);
+ Interval c(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_PRECEDES_COULD_UNION);
- Interval d(BSON("" << 0 << "" << 9), true, true);
- ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_PRECEDES);
+ Interval d(BSON("" << 0 << "" << 9), true, true);
+ ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_PRECEDES);
- Interval e(BSON("" << 5 << "" << 15), true, true);
- ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_PRECEDES);
+ Interval e(BSON("" << 5 << "" << 15), true, true);
+ ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_PRECEDES);
- Interval f(BSON("" << 5 << "" << 20), true, false);
- ASSERT_NOT_EQUALS(f.compare(a), Interval::INTERVAL_PRECEDES);
- }
+ Interval f(BSON("" << 5 << "" << 20), true, false);
+ ASSERT_NOT_EQUALS(f.compare(a), Interval::INTERVAL_PRECEDES);
+}
- TEST(Comparison, PrecedesCouldUnion) {
- Interval a(BSON("" << 10 << "" << 20), false, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_PRECEDES);
+TEST(Comparison, PrecedesCouldUnion) {
+ Interval a(BSON("" << 10 << "" << 20), false, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_PRECEDES);
- Interval b(BSON("" << 0 << "" << 10), true, false);
- ASSERT_EQUALS(b.compare(a), Interval::INTERVAL_PRECEDES);
+ Interval b(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_EQUALS(b.compare(a), Interval::INTERVAL_PRECEDES);
- Interval c(BSON("" << 0 << "" << 10), true, true);
- ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_PRECEDES_COULD_UNION);
- }
+ Interval c(BSON("" << 0 << "" << 10), true, true);
+ ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_PRECEDES_COULD_UNION);
+}
- TEST(Comparison, Succeds) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_SUCCEEDS);
+TEST(Comparison, Succeds) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_SUCCEEDS);
- Interval b(BSON("" << 20 << "" << 30), true, true);
- ASSERT_NOT_EQUALS(b.compare(a), Interval::INTERVAL_SUCCEEDS);
-
- Interval c(BSON("" << 20 << "" << 30), false, true);
- ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_SUCCEEDS);
-
- Interval d(BSON("" << 21 << "" << 30), true, true);
- ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_SUCCEEDS);
-
- Interval e(BSON("" << 15 << "" << 30), true, true);
- ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_SUCCEEDS);
- }
-
- //
- // intersection
- //
-
- TEST(Intersection, Equals) {
- BSONObj itv = BSON("" << 10 << "" << 20);
- Interval a(itv, true, true);
- a.intersect(a);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, Contains) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- BSONObj itv = BSON("" << 11 << "" << 19);
- Interval b(itv, true, true);
- a.intersect(b);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, Within) {
- BSONObj itv = BSON("" << 10 << "" << 20);
- Interval a(itv, true, true);
- Interval b(BSON("" << 9 << "" << 21), true, true);
- a.intersect(b);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, OverlapsBefore) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 15 << "" << 25), true, true);
- a.intersect(b);
-
- BSONObj itv = BSON("" << 15 << "" << 20);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, OverlapsAfter) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 5 << "" << 15), true, true);
- a.intersect(b);
-
- BSONObj itv = BSON("" << 10 << "" << 15);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, Procedes) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 0 << "" << 5), true, true);
- a.intersect(b);
-
- ASSERT_TRUE(a.isEmpty());
- }
-
- TEST(Intersection, Succeds) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 25 << "" << 30), true, true);
- a.intersect(b);
-
- ASSERT_TRUE(a.isEmpty());
- }
-
- //
- // combine (union)
- //
-
- TEST(Union, Equals) {
- BSONObj itv = BSON("" << 10 << "" << 20);
- Interval a(itv, true, true);
- a.combine(a);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, Contains) {
- BSONObj itv = BSON("" << 10 << "" << 20);
- Interval a(itv, true, true);
- Interval b(BSON("" << 11 << "" << 19), true, true);
- a.combine(b);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, Within) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 9 << "" << 21), true, true);
- a.combine(b);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, OverlapsBefore) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 15 << "" << 25), true, true);
- a.combine(b);
- BSONObj itv = BSON("" << 10 << "" << 25);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, OverlapsAfter) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 5 << "" << 15), true, true);
- a.combine(b);
- BSONObj itv = BSON("" << 5 << "" << 20);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, Precedes) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 20 << "" << 30), true, true);
- a.combine(b);
- BSONObj itv = BSON("" << 10 << "" << 30);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, Succeds) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 0 << "" << 5), true, true);
- a.combine(b);
- BSONObj itv = BSON("" << 0 << "" << 20);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
-} // unnamed namespace
+ Interval b(BSON("" << 20 << "" << 30), true, true);
+ ASSERT_NOT_EQUALS(b.compare(a), Interval::INTERVAL_SUCCEEDS);
+
+ Interval c(BSON("" << 20 << "" << 30), false, true);
+ ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_SUCCEEDS);
+
+ Interval d(BSON("" << 21 << "" << 30), true, true);
+ ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_SUCCEEDS);
+
+ Interval e(BSON("" << 15 << "" << 30), true, true);
+ ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_SUCCEEDS);
+}
+
+//
+// intersection
+//
+
+TEST(Intersection, Equals) {
+ BSONObj itv = BSON("" << 10 << "" << 20);
+ Interval a(itv, true, true);
+ a.intersect(a);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, Contains) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ BSONObj itv = BSON("" << 11 << "" << 19);
+ Interval b(itv, true, true);
+ a.intersect(b);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, Within) {
+ BSONObj itv = BSON("" << 10 << "" << 20);
+ Interval a(itv, true, true);
+ Interval b(BSON("" << 9 << "" << 21), true, true);
+ a.intersect(b);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, OverlapsBefore) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 15 << "" << 25), true, true);
+ a.intersect(b);
+
+ BSONObj itv = BSON("" << 15 << "" << 20);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, OverlapsAfter) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 5 << "" << 15), true, true);
+ a.intersect(b);
+
+ BSONObj itv = BSON("" << 10 << "" << 15);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, Procedes) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 0 << "" << 5), true, true);
+ a.intersect(b);
+
+ ASSERT_TRUE(a.isEmpty());
+}
+
+TEST(Intersection, Succeds) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 25 << "" << 30), true, true);
+ a.intersect(b);
+
+ ASSERT_TRUE(a.isEmpty());
+}
+
+//
+// combine (union)
+//
+
+TEST(Union, Equals) {
+ BSONObj itv = BSON("" << 10 << "" << 20);
+ Interval a(itv, true, true);
+ a.combine(a);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, Contains) {
+ BSONObj itv = BSON("" << 10 << "" << 20);
+ Interval a(itv, true, true);
+ Interval b(BSON("" << 11 << "" << 19), true, true);
+ a.combine(b);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, Within) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 9 << "" << 21), true, true);
+ a.combine(b);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, OverlapsBefore) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 15 << "" << 25), true, true);
+ a.combine(b);
+ BSONObj itv = BSON("" << 10 << "" << 25);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, OverlapsAfter) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 5 << "" << 15), true, true);
+ a.combine(b);
+ BSONObj itv = BSON("" << 5 << "" << 20);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, Precedes) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 20 << "" << 30), true, true);
+ a.combine(b);
+ BSONObj itv = BSON("" << 10 << "" << 30);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, Succeds) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 0 << "" << 5), true, true);
+ a.combine(b);
+ BSONObj itv = BSON("" << 0 << "" << 20);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/query/lite_parsed_query.cpp b/src/mongo/db/query/lite_parsed_query.cpp
index 1b2ce795eb0..6b8a25abe48 100644
--- a/src/mongo/db/query/lite_parsed_query.cpp
+++ b/src/mongo/db/query/lite_parsed_query.cpp
@@ -41,922 +41,884 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
- const string LiteParsedQuery::cmdOptionMaxTimeMS("maxTimeMS");
- const string LiteParsedQuery::queryOptionMaxTimeMS("$maxTimeMS");
+const string LiteParsedQuery::cmdOptionMaxTimeMS("maxTimeMS");
+const string LiteParsedQuery::queryOptionMaxTimeMS("$maxTimeMS");
- const string LiteParsedQuery::metaTextScore("textScore");
- const string LiteParsedQuery::metaGeoNearDistance("geoNearDistance");
- const string LiteParsedQuery::metaGeoNearPoint("geoNearPoint");
- const string LiteParsedQuery::metaRecordId("recordId");
- const string LiteParsedQuery::metaIndexKey("indexKey");
+const string LiteParsedQuery::metaTextScore("textScore");
+const string LiteParsedQuery::metaGeoNearDistance("geoNearDistance");
+const string LiteParsedQuery::metaGeoNearPoint("geoNearPoint");
+const string LiteParsedQuery::metaRecordId("recordId");
+const string LiteParsedQuery::metaIndexKey("indexKey");
- const int LiteParsedQuery::kDefaultBatchSize = 101;
+const int LiteParsedQuery::kDefaultBatchSize = 101;
namespace {
- Status checkFieldType(const BSONElement& el, BSONType type) {
- if (type != el.type()) {
- str::stream ss;
- ss << "Failed to parse: " << el.toString() << ". "
- << "'" << el.fieldName() << "' field must be of BSON type "
- << typeName(type) << ".";
- return Status(ErrorCodes::FailedToParse, ss);
- }
+Status checkFieldType(const BSONElement& el, BSONType type) {
+ if (type != el.type()) {
+ str::stream ss;
+ ss << "Failed to parse: " << el.toString() << ". "
+ << "'" << el.fieldName() << "' field must be of BSON type " << typeName(type) << ".";
+ return Status(ErrorCodes::FailedToParse, ss);
+ }
- return Status::OK();
- }
-
- // Find command field names.
- const char kCmdName[] = "find";
- const char kFilterField[] = "filter";
- const char kProjectionField[] = "projection";
- const char kSortField[] = "sort";
- const char kHintField[] = "hint";
- const char kSkipField[] = "skip";
- const char kLimitField[] = "limit";
- const char kBatchSizeField[] = "batchSize";
- const char kSingleBatchField[] = "singleBatch";
- const char kCommentField[] = "comment";
- const char kMaxScanField[] = "maxScan";
- const char kMaxField[] = "max";
- const char kMinField[] = "min";
- const char kReturnKeyField[] = "returnKey";
- const char kShowRecordIdField[] = "showRecordId";
- const char kSnapshotField[] = "snapshot";
- const char kTailableField[] = "tailable";
- const char kOplogReplayField[] = "oplogReplay";
- const char kNoCursorTimeoutField[] = "noCursorTimeout";
- const char kAwaitDataField[] = "awaitData";
- const char kPartialField[] = "partial";
-
-} // namespace
-
- // static
- StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(
- const NamespaceString& nss,
- const BSONObj& cmdObj,
- bool isExplain) {
-
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
- pq->_ns = nss.ns();
- pq->_fromCommand = true;
- pq->_explain = isExplain;
-
- // Parse the command BSON by looping through one element at a time.
- BSONObjIterator it(cmdObj);
- while (it.more()) {
- BSONElement el = it.next();
- const char* fieldName = el.fieldName();
- if (str::equals(fieldName, kCmdName)) {
- Status status = checkFieldType(el, String);
- if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+}
+
+// Find command field names.
+const char kCmdName[] = "find";
+const char kFilterField[] = "filter";
+const char kProjectionField[] = "projection";
+const char kSortField[] = "sort";
+const char kHintField[] = "hint";
+const char kSkipField[] = "skip";
+const char kLimitField[] = "limit";
+const char kBatchSizeField[] = "batchSize";
+const char kSingleBatchField[] = "singleBatch";
+const char kCommentField[] = "comment";
+const char kMaxScanField[] = "maxScan";
+const char kMaxField[] = "max";
+const char kMinField[] = "min";
+const char kReturnKeyField[] = "returnKey";
+const char kShowRecordIdField[] = "showRecordId";
+const char kSnapshotField[] = "snapshot";
+const char kTailableField[] = "tailable";
+const char kOplogReplayField[] = "oplogReplay";
+const char kNoCursorTimeoutField[] = "noCursorTimeout";
+const char kAwaitDataField[] = "awaitData";
+const char kPartialField[] = "partial";
+
+} // namespace
+
+// static
+StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(
+ const NamespaceString& nss, const BSONObj& cmdObj, bool isExplain) {
+ unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
+ pq->_ns = nss.ns();
+ pq->_fromCommand = true;
+ pq->_explain = isExplain;
+
+ // Parse the command BSON by looping through one element at a time.
+ BSONObjIterator it(cmdObj);
+ while (it.more()) {
+ BSONElement el = it.next();
+ const char* fieldName = el.fieldName();
+ if (str::equals(fieldName, kCmdName)) {
+ Status status = checkFieldType(el, String);
+ if (!status.isOK()) {
+ return status;
+ }
+ } else if (str::equals(fieldName, kFilterField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ pq->_filter = el.Obj().getOwned();
+ } else if (str::equals(fieldName, kProjectionField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ pq->_proj = el.Obj().getOwned();
+ } else if (str::equals(fieldName, kSortField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ // Sort document normalization.
+ BSONObj sort = el.Obj().getOwned();
+ if (!isValidSortOrder(sort)) {
+ return Status(ErrorCodes::BadValue, "bad sort specification");
+ }
+
+ pq->_sort = sort;
+ } else if (str::equals(fieldName, kHintField)) {
+ BSONObj hintObj;
+ if (Object == el.type()) {
+ hintObj = cmdObj["hint"].Obj().getOwned();
+ } else if (String == el.type()) {
+ hintObj = el.wrap("$hint");
+ } else {
+ return Status(ErrorCodes::FailedToParse,
+ "hint must be either a string or nested object");
}
- else if (str::equals(fieldName, kFilterField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- pq->_filter = el.Obj().getOwned();
+ pq->_hint = hintObj;
+ } else if (str::equals(fieldName, kSkipField)) {
+ if (!el.isNumber()) {
+ str::stream ss;
+ ss << "Failed to parse: " << cmdObj.toString() << ". "
+ << "'skip' field must be numeric.";
+ return Status(ErrorCodes::FailedToParse, ss);
}
- else if (str::equals(fieldName, kProjectionField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- pq->_proj = el.Obj().getOwned();
+ int skip = el.numberInt();
+ if (skip < 0) {
+ return Status(ErrorCodes::BadValue, "skip value must be non-negative");
}
- else if (str::equals(fieldName, kSortField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- // Sort document normalization.
- BSONObj sort = el.Obj().getOwned();
- if (!isValidSortOrder(sort)) {
- return Status(ErrorCodes::BadValue, "bad sort specification");
- }
-
- pq->_sort = sort;
+ pq->_skip = skip;
+ } else if (str::equals(fieldName, kLimitField)) {
+ if (!el.isNumber()) {
+ str::stream ss;
+ ss << "Failed to parse: " << cmdObj.toString() << ". "
+ << "'limit' field must be numeric.";
+ return Status(ErrorCodes::FailedToParse, ss);
}
- else if (str::equals(fieldName, kHintField)) {
- BSONObj hintObj;
- if (Object == el.type()) {
- hintObj = cmdObj["hint"].Obj().getOwned();
- }
- else if (String == el.type()) {
- hintObj = el.wrap("$hint");
- }
- else {
- return Status(ErrorCodes::FailedToParse,
- "hint must be either a string or nested object");
- }
- pq->_hint = hintObj;
+ int limit = el.numberInt();
+ if (limit <= 0) {
+ return Status(ErrorCodes::BadValue, "limit value must be positive");
}
- else if (str::equals(fieldName, kSkipField)) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'skip' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
- int skip = el.numberInt();
- if (skip < 0) {
- return Status(ErrorCodes::BadValue, "skip value must be non-negative");
- }
-
- pq->_skip = skip;
+ pq->_limit = limit;
+ } else if (str::equals(fieldName, kBatchSizeField)) {
+ if (!el.isNumber()) {
+ str::stream ss;
+ ss << "Failed to parse: " << cmdObj.toString() << ". "
+ << "'batchSize' field must be numeric.";
+ return Status(ErrorCodes::FailedToParse, ss);
}
- else if (str::equals(fieldName, kLimitField)) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'limit' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
- int limit = el.numberInt();
- if (limit <= 0) {
- return Status(ErrorCodes::BadValue, "limit value must be positive");
- }
-
- pq->_limit = limit;
+ int batchSize = el.numberInt();
+ if (batchSize < 0) {
+ return Status(ErrorCodes::BadValue, "batchSize value must be non-negative");
}
- else if (str::equals(fieldName, kBatchSizeField)) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'batchSize' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
-
- int batchSize = el.numberInt();
- if (batchSize < 0) {
- return Status(ErrorCodes::BadValue, "batchSize value must be non-negative");
- }
- pq->_batchSize = batchSize;
+ pq->_batchSize = batchSize;
+ } else if (str::equals(fieldName, kSingleBatchField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kSingleBatchField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_wantMore = !el.boolean();
+ pq->_wantMore = !el.boolean();
+ } else if (str::equals(fieldName, kCommentField)) {
+ Status status = checkFieldType(el, String);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kCommentField)) {
- Status status = checkFieldType(el, String);
- if (!status.isOK()) {
- return status;
- }
- pq->_comment = el.str();
+ pq->_comment = el.str();
+ } else if (str::equals(fieldName, kMaxScanField)) {
+ if (!el.isNumber()) {
+ str::stream ss;
+ ss << "Failed to parse: " << cmdObj.toString() << ". "
+ << "'maxScan' field must be numeric.";
+ return Status(ErrorCodes::FailedToParse, ss);
}
- else if (str::equals(fieldName, kMaxScanField)) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'maxScan' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
- int maxScan = el.numberInt();
- if (maxScan < 0) {
- return Status(ErrorCodes::BadValue, "maxScan value must be non-negative");
- }
-
- pq->_maxScan = maxScan;
+ int maxScan = el.numberInt();
+ if (maxScan < 0) {
+ return Status(ErrorCodes::BadValue, "maxScan value must be non-negative");
}
- else if (str::equals(fieldName, cmdOptionMaxTimeMS.c_str())) {
- StatusWith<int> maxTimeMS = parseMaxTimeMS(el);
- if (!maxTimeMS.isOK()) {
- return maxTimeMS.getStatus();
- }
- pq->_maxTimeMS = maxTimeMS.getValue();
+ pq->_maxScan = maxScan;
+ } else if (str::equals(fieldName, cmdOptionMaxTimeMS.c_str())) {
+ StatusWith<int> maxTimeMS = parseMaxTimeMS(el);
+ if (!maxTimeMS.isOK()) {
+ return maxTimeMS.getStatus();
}
- else if (str::equals(fieldName, kMinField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- pq->_min = el.Obj().getOwned();
+ pq->_maxTimeMS = maxTimeMS.getValue();
+ } else if (str::equals(fieldName, kMinField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kMaxField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- pq->_max = el.Obj().getOwned();
+ pq->_min = el.Obj().getOwned();
+ } else if (str::equals(fieldName, kMaxField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kReturnKeyField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_returnKey = el.boolean();
+ pq->_max = el.Obj().getOwned();
+ } else if (str::equals(fieldName, kReturnKeyField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kShowRecordIdField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_showRecordId = el.boolean();
+ pq->_returnKey = el.boolean();
+ } else if (str::equals(fieldName, kShowRecordIdField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kSnapshotField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_snapshot = el.boolean();
+ pq->_showRecordId = el.boolean();
+ } else if (str::equals(fieldName, kSnapshotField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, "$readPreference")) {
- pq->_hasReadPref = true;
- }
- else if (str::equals(fieldName, kTailableField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_tailable = el.boolean();
+ pq->_snapshot = el.boolean();
+ } else if (str::equals(fieldName, "$readPreference")) {
+ pq->_hasReadPref = true;
+ } else if (str::equals(fieldName, kTailableField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, "slaveOk")) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_slaveOk = el.boolean();
+ pq->_tailable = el.boolean();
+ } else if (str::equals(fieldName, "slaveOk")) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kOplogReplayField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_oplogReplay = el.boolean();
+ pq->_slaveOk = el.boolean();
+ } else if (str::equals(fieldName, kOplogReplayField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kNoCursorTimeoutField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_noCursorTimeout = el.boolean();
+ pq->_oplogReplay = el.boolean();
+ } else if (str::equals(fieldName, kNoCursorTimeoutField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kAwaitDataField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_awaitData = el.boolean();
+ pq->_noCursorTimeout = el.boolean();
+ } else if (str::equals(fieldName, kAwaitDataField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kPartialField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_partial = el.boolean();
- }
- else if (str::equals(fieldName, "options")) {
- // 3.0.x versions of the shell may generate an explain of a find command with an
- // 'options' field. We accept this only if the 'options' field is empty so that
- // the shell's explain implementation is forwards compatible.
- //
- // TODO: Remove for 3.4.
- if (!pq->isExplain()) {
- return Status(ErrorCodes::FailedToParse,
- "Field 'options' is only allowed for explain.");
- }
-
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- BSONObj optionsObj = el.Obj();
- if (!optionsObj.isEmpty()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Failed to parse options: "
- << optionsObj.toString() << ". "
- << "You may need to update your shell or driver.");
- }
- }
- else if (str::equals(fieldName,
- repl::ReadAfterOpTimeArgs::kRootFieldName.c_str())) {
- // read after optime parsing is handled elsewhere.
- continue;
+ pq->_awaitData = el.boolean();
+ } else if (str::equals(fieldName, kPartialField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else {
+
+ pq->_partial = el.boolean();
+ } else if (str::equals(fieldName, "options")) {
+ // 3.0.x versions of the shell may generate an explain of a find command with an
+ // 'options' field. We accept this only if the 'options' field is empty so that
+ // the shell's explain implementation is forwards compatible.
+ //
+ // TODO: Remove for 3.4.
+ if (!pq->isExplain()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
- << "Unrecognized field '" << fieldName << "'.");
+ "Field 'options' is only allowed for explain.");
}
- }
- pq->addMetaProjection();
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
+ }
- Status validateStatus = pq->validateFindCmd();
- if (!validateStatus.isOK()) {
- return validateStatus;
+ BSONObj optionsObj = el.Obj();
+ if (!optionsObj.isEmpty()) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Failed to parse options: " << optionsObj.toString()
+ << ". "
+ << "You may need to update your shell or driver.");
+ }
+ } else if (str::equals(fieldName, repl::ReadAfterOpTimeArgs::kRootFieldName.c_str())) {
+ // read after optime parsing is handled elsewhere.
+ continue;
+ } else {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
+ << "Unrecognized field '" << fieldName << "'.");
}
+ }
- return std::move(pq);
- }
-
- // static
- StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsOpQuery(const string& ns,
- int ntoskip,
- int ntoreturn,
- int queryOptions,
- const BSONObj& query,
- const BSONObj& proj,
- const BSONObj& sort,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot,
- bool explain) {
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
- pq->_sort = sort.getOwned();
- pq->_hint = hint.getOwned();
- pq->_min = minObj.getOwned();
- pq->_max = maxObj.getOwned();
- pq->_snapshot = snapshot;
- pq->_explain = explain;
-
- Status status = pq->init(ns, ntoskip, ntoreturn, queryOptions, query, proj, false);
- if (!status.isOK()) {
- return status;
- }
+ pq->addMetaProjection();
- return std::move(pq);
+ Status validateStatus = pq->validateFindCmd();
+ if (!validateStatus.isOK()) {
+ return validateStatus;
}
- // static
- StatusWith<unique_ptr<LiteParsedQuery>>
- LiteParsedQuery::makeAsFindCmd(const NamespaceString& ns,
- const BSONObj& query,
- boost::optional<int> limit) {
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
+ return std::move(pq);
+}
+
+// static
+StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsOpQuery(const string& ns,
+ int ntoskip,
+ int ntoreturn,
+ int queryOptions,
+ const BSONObj& query,
+ const BSONObj& proj,
+ const BSONObj& sort,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot,
+ bool explain) {
+ unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
+ pq->_sort = sort.getOwned();
+ pq->_hint = hint.getOwned();
+ pq->_min = minObj.getOwned();
+ pq->_max = maxObj.getOwned();
+ pq->_snapshot = snapshot;
+ pq->_explain = explain;
+
+ Status status = pq->init(ns, ntoskip, ntoreturn, queryOptions, query, proj, false);
+ if (!status.isOK()) {
+ return status;
+ }
- pq->_fromCommand = true;
- pq->_ns = ns.ns();
- pq->_filter = query.getOwned();
+ return std::move(pq);
+}
- if (limit) {
- if (limit <= 0) {
- return Status(ErrorCodes::BadValue, "limit value must be positive");
- }
-
- pq->_limit = std::move(limit);
- }
+// static
+StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsFindCmd(const NamespaceString& ns,
+ const BSONObj& query,
+ boost::optional<int> limit) {
+ unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
- pq->addMetaProjection();
+ pq->_fromCommand = true;
+ pq->_ns = ns.ns();
+ pq->_filter = query.getOwned();
- Status validateStatus = pq->validateFindCmd();
- if (!validateStatus.isOK()) {
- return validateStatus;
+ if (limit) {
+ if (limit <= 0) {
+ return Status(ErrorCodes::BadValue, "limit value must be positive");
}
- return std::move(pq);
+ pq->_limit = std::move(limit);
}
- BSONObj LiteParsedQuery::asFindCommand() const {
- BSONObjBuilder bob;
+ pq->addMetaProjection();
- const NamespaceString nss(_ns);
- bob.append(kCmdName, nss.coll());
+ Status validateStatus = pq->validateFindCmd();
+ if (!validateStatus.isOK()) {
+ return validateStatus;
+ }
- if (!_filter.isEmpty()) {
- bob.append(kFilterField, _filter);
- }
+ return std::move(pq);
+}
- if (!_proj.isEmpty()) {
- bob.append(kProjectionField, _proj);
- }
+BSONObj LiteParsedQuery::asFindCommand() const {
+ BSONObjBuilder bob;
- if (!_sort.isEmpty()) {
- bob.append(kSortField, _sort);
- }
+ const NamespaceString nss(_ns);
+ bob.append(kCmdName, nss.coll());
- if (!_hint.isEmpty()) {
- bob.append(kHintField, _hint);
- }
+ if (!_filter.isEmpty()) {
+ bob.append(kFilterField, _filter);
+ }
- if (_skip > 0) {
- bob.append(kSkipField, _skip);
- }
+ if (!_proj.isEmpty()) {
+ bob.append(kProjectionField, _proj);
+ }
- if (_limit) {
- bob.append(kLimitField, *_limit);
- }
+ if (!_sort.isEmpty()) {
+ bob.append(kSortField, _sort);
+ }
- if (_batchSize) {
- bob.append(kBatchSizeField, *_batchSize);
- }
+ if (!_hint.isEmpty()) {
+ bob.append(kHintField, _hint);
+ }
- if (!_wantMore) {
- bob.append(kSingleBatchField, true);
- }
+ if (_skip > 0) {
+ bob.append(kSkipField, _skip);
+ }
- if (!_comment.empty()) {
- bob.append(kCommentField, _comment);
- }
+ if (_limit) {
+ bob.append(kLimitField, *_limit);
+ }
- if (_maxScan > 0) {
- bob.append(kMaxScanField, _maxScan);
- }
+ if (_batchSize) {
+ bob.append(kBatchSizeField, *_batchSize);
+ }
- if (_maxTimeMS > 0) {
- bob.append(cmdOptionMaxTimeMS, _maxTimeMS);
- }
+ if (!_wantMore) {
+ bob.append(kSingleBatchField, true);
+ }
- if (!_max.isEmpty()) {
- bob.append(kMaxField, _max);
- }
+ if (!_comment.empty()) {
+ bob.append(kCommentField, _comment);
+ }
- if (!_min.isEmpty()) {
- bob.append(kMinField, _min);
- }
+ if (_maxScan > 0) {
+ bob.append(kMaxScanField, _maxScan);
+ }
- if (_returnKey) {
- bob.append(kReturnKeyField, true);
- }
+ if (_maxTimeMS > 0) {
+ bob.append(cmdOptionMaxTimeMS, _maxTimeMS);
+ }
- if (_showRecordId) {
- bob.append(kShowRecordIdField, true);
- }
+ if (!_max.isEmpty()) {
+ bob.append(kMaxField, _max);
+ }
- if (_snapshot) {
- bob.append(kSnapshotField, true);
- }
+ if (!_min.isEmpty()) {
+ bob.append(kMinField, _min);
+ }
- if (_tailable) {
- bob.append(kTailableField, true);
- }
+ if (_returnKey) {
+ bob.append(kReturnKeyField, true);
+ }
- if (_oplogReplay) {
- bob.append(kOplogReplayField, true);
- }
+ if (_showRecordId) {
+ bob.append(kShowRecordIdField, true);
+ }
- if (_noCursorTimeout) {
- bob.append(kNoCursorTimeoutField, true);
- }
+ if (_snapshot) {
+ bob.append(kSnapshotField, true);
+ }
- if (_awaitData) {
- bob.append(kAwaitDataField, true);
- }
+ if (_tailable) {
+ bob.append(kTailableField, true);
+ }
- if (_partial) {
- bob.append(kPartialField, true);
- }
+ if (_oplogReplay) {
+ bob.append(kOplogReplayField, true);
+ }
- return bob.obj();
+ if (_noCursorTimeout) {
+ bob.append(kNoCursorTimeoutField, true);
}
- void LiteParsedQuery::addReturnKeyMetaProj() {
- BSONObjBuilder projBob;
- projBob.appendElements(_proj);
- // We use $$ because it's never going to show up in a user's projection.
- // The exact text doesn't matter.
- BSONObj indexKey = BSON("$$" <<
- BSON("$meta" << LiteParsedQuery::metaIndexKey));
- projBob.append(indexKey.firstElement());
- _proj = projBob.obj();
+ if (_awaitData) {
+ bob.append(kAwaitDataField, true);
}
- void LiteParsedQuery::addShowRecordIdMetaProj() {
- BSONObjBuilder projBob;
- projBob.appendElements(_proj);
- BSONObj metaRecordId = BSON("$recordId" <<
- BSON("$meta" << LiteParsedQuery::metaRecordId));
- projBob.append(metaRecordId.firstElement());
- _proj = projBob.obj();
+ if (_partial) {
+ bob.append(kPartialField, true);
}
- Status LiteParsedQuery::validate() const {
- // Min and Max objects must have the same fields.
- if (!_min.isEmpty() && !_max.isEmpty()) {
- if (!_min.isFieldNamePrefixOf(_max) ||
- (_min.nFields() != _max.nFields())) {
- return Status(ErrorCodes::BadValue, "min and max must have the same field names");
- }
+ return bob.obj();
+}
+
+void LiteParsedQuery::addReturnKeyMetaProj() {
+ BSONObjBuilder projBob;
+ projBob.appendElements(_proj);
+ // We use $$ because it's never going to show up in a user's projection.
+ // The exact text doesn't matter.
+ BSONObj indexKey = BSON("$$" << BSON("$meta" << LiteParsedQuery::metaIndexKey));
+ projBob.append(indexKey.firstElement());
+ _proj = projBob.obj();
+}
+
+void LiteParsedQuery::addShowRecordIdMetaProj() {
+ BSONObjBuilder projBob;
+ projBob.appendElements(_proj);
+ BSONObj metaRecordId = BSON("$recordId" << BSON("$meta" << LiteParsedQuery::metaRecordId));
+ projBob.append(metaRecordId.firstElement());
+ _proj = projBob.obj();
+}
+
+Status LiteParsedQuery::validate() const {
+ // Min and Max objects must have the same fields.
+ if (!_min.isEmpty() && !_max.isEmpty()) {
+ if (!_min.isFieldNamePrefixOf(_max) || (_min.nFields() != _max.nFields())) {
+ return Status(ErrorCodes::BadValue, "min and max must have the same field names");
}
+ }
- // Can't combine a normal sort and a $meta projection on the same field.
- BSONObjIterator projIt(_proj);
- while (projIt.more()) {
- BSONElement projElt = projIt.next();
- if (isTextScoreMeta(projElt)) {
- BSONElement sortElt = _sort[projElt.fieldName()];
- if (!sortElt.eoo() && !isTextScoreMeta(sortElt)) {
- return Status(ErrorCodes::BadValue,
- "can't have a non-$meta sort on a $meta projection");
- }
+ // Can't combine a normal sort and a $meta projection on the same field.
+ BSONObjIterator projIt(_proj);
+ while (projIt.more()) {
+ BSONElement projElt = projIt.next();
+ if (isTextScoreMeta(projElt)) {
+ BSONElement sortElt = _sort[projElt.fieldName()];
+ if (!sortElt.eoo() && !isTextScoreMeta(sortElt)) {
+ return Status(ErrorCodes::BadValue,
+ "can't have a non-$meta sort on a $meta projection");
}
}
+ }
- // All fields with a $meta sort must have a corresponding $meta projection.
- BSONObjIterator sortIt(_sort);
- while (sortIt.more()) {
- BSONElement sortElt = sortIt.next();
- if (isTextScoreMeta(sortElt)) {
- BSONElement projElt = _proj[sortElt.fieldName()];
- if (projElt.eoo() || !isTextScoreMeta(projElt)) {
- return Status(ErrorCodes::BadValue,
- "must have $meta projection for all $meta sort keys");
- }
+ // All fields with a $meta sort must have a corresponding $meta projection.
+ BSONObjIterator sortIt(_sort);
+ while (sortIt.more()) {
+ BSONElement sortElt = sortIt.next();
+ if (isTextScoreMeta(sortElt)) {
+ BSONElement projElt = _proj[sortElt.fieldName()];
+ if (projElt.eoo() || !isTextScoreMeta(projElt)) {
+ return Status(ErrorCodes::BadValue,
+ "must have $meta projection for all $meta sort keys");
}
}
+ }
- if (_snapshot) {
- if (!_sort.isEmpty()) {
- return Status(ErrorCodes::BadValue, "E12001 can't use sort with $snapshot");
- }
- if (!_hint.isEmpty()) {
- return Status(ErrorCodes::BadValue, "E12002 can't use hint with $snapshot");
- }
+ if (_snapshot) {
+ if (!_sort.isEmpty()) {
+ return Status(ErrorCodes::BadValue, "E12001 can't use sort with $snapshot");
+ }
+ if (!_hint.isEmpty()) {
+ return Status(ErrorCodes::BadValue, "E12002 can't use hint with $snapshot");
}
-
- return Status::OK();
}
- // static
- StatusWith<int> LiteParsedQuery::parseMaxTimeMSCommand(const BSONObj& cmdObj) {
- return parseMaxTimeMS(cmdObj[cmdOptionMaxTimeMS]);
+ return Status::OK();
+}
+
+// static
+StatusWith<int> LiteParsedQuery::parseMaxTimeMSCommand(const BSONObj& cmdObj) {
+ return parseMaxTimeMS(cmdObj[cmdOptionMaxTimeMS]);
+}
+
+// static
+StatusWith<int> LiteParsedQuery::parseMaxTimeMSQuery(const BSONObj& queryObj) {
+ return parseMaxTimeMS(queryObj[queryOptionMaxTimeMS]);
+}
+
+// static
+StatusWith<int> LiteParsedQuery::parseMaxTimeMS(const BSONElement& maxTimeMSElt) {
+ if (!maxTimeMSElt.eoo() && !maxTimeMSElt.isNumber()) {
+ return StatusWith<int>(
+ ErrorCodes::BadValue,
+ (StringBuilder() << maxTimeMSElt.fieldNameStringData() << " must be a number").str());
}
-
- // static
- StatusWith<int> LiteParsedQuery::parseMaxTimeMSQuery(const BSONObj& queryObj) {
- return parseMaxTimeMS(queryObj[queryOptionMaxTimeMS]);
+ long long maxTimeMSLongLong = maxTimeMSElt.safeNumberLong(); // returns 0 on EOO
+ if (maxTimeMSLongLong < 0 || maxTimeMSLongLong > INT_MAX) {
+ return StatusWith<int>(
+ ErrorCodes::BadValue,
+ (StringBuilder() << maxTimeMSElt.fieldNameStringData() << " is out of range").str());
}
-
- // static
- StatusWith<int> LiteParsedQuery::parseMaxTimeMS(const BSONElement& maxTimeMSElt) {
- if (!maxTimeMSElt.eoo() && !maxTimeMSElt.isNumber()) {
- return StatusWith<int>(ErrorCodes::BadValue,
- (StringBuilder()
- << maxTimeMSElt.fieldNameStringData()
- << " must be a number").str());
- }
- long long maxTimeMSLongLong = maxTimeMSElt.safeNumberLong(); // returns 0 on EOO
- if (maxTimeMSLongLong < 0 || maxTimeMSLongLong > INT_MAX) {
- return StatusWith<int>(ErrorCodes::BadValue,
- (StringBuilder()
- << maxTimeMSElt.fieldNameStringData()
- << " is out of range").str());
- }
- double maxTimeMSDouble = maxTimeMSElt.numberDouble();
- if (maxTimeMSElt.type() == mongo::NumberDouble
- && floor(maxTimeMSDouble) != maxTimeMSDouble) {
- return StatusWith<int>(ErrorCodes::BadValue,
- (StringBuilder()
- << maxTimeMSElt.fieldNameStringData()
- << " has non-integral value").str());
- }
- return StatusWith<int>(static_cast<int>(maxTimeMSLongLong));
+ double maxTimeMSDouble = maxTimeMSElt.numberDouble();
+ if (maxTimeMSElt.type() == mongo::NumberDouble && floor(maxTimeMSDouble) != maxTimeMSDouble) {
+ return StatusWith<int>(ErrorCodes::BadValue,
+ (StringBuilder() << maxTimeMSElt.fieldNameStringData()
+ << " has non-integral value").str());
}
+ return StatusWith<int>(static_cast<int>(maxTimeMSLongLong));
+}
- // static
- bool LiteParsedQuery::isTextScoreMeta(BSONElement elt) {
- // elt must be foo: {$meta: "textScore"}
- if (mongo::Object != elt.type()) {
- return false;
- }
- BSONObj metaObj = elt.Obj();
- BSONObjIterator metaIt(metaObj);
- // must have exactly 1 element
- if (!metaIt.more()) {
- return false;
- }
- BSONElement metaElt = metaIt.next();
- if (!str::equals("$meta", metaElt.fieldName())) {
- return false;
- }
- if (mongo::String != metaElt.type()) {
- return false;
- }
- if (LiteParsedQuery::metaTextScore != metaElt.valuestr()) {
- return false;
- }
- // must have exactly 1 element
- if (metaIt.more()) {
- return false;
- }
- return true;
+// static
+bool LiteParsedQuery::isTextScoreMeta(BSONElement elt) {
+ // elt must be foo: {$meta: "textScore"}
+ if (mongo::Object != elt.type()) {
+ return false;
}
+ BSONObj metaObj = elt.Obj();
+ BSONObjIterator metaIt(metaObj);
+ // must have exactly 1 element
+ if (!metaIt.more()) {
+ return false;
+ }
+ BSONElement metaElt = metaIt.next();
+ if (!str::equals("$meta", metaElt.fieldName())) {
+ return false;
+ }
+ if (mongo::String != metaElt.type()) {
+ return false;
+ }
+ if (LiteParsedQuery::metaTextScore != metaElt.valuestr()) {
+ return false;
+ }
+ // must have exactly 1 element
+ if (metaIt.more()) {
+ return false;
+ }
+ return true;
+}
- // static
- bool LiteParsedQuery::isRecordIdMeta(BSONElement elt) {
- // elt must be foo: {$meta: "recordId"}
- if (mongo::Object != elt.type()) {
- return false;
- }
- BSONObj metaObj = elt.Obj();
- BSONObjIterator metaIt(metaObj);
- // must have exactly 1 element
- if (!metaIt.more()) {
- return false;
- }
- BSONElement metaElt = metaIt.next();
- if (!str::equals("$meta", metaElt.fieldName())) {
- return false;
- }
- if (mongo::String != metaElt.type()) {
+// static
+bool LiteParsedQuery::isRecordIdMeta(BSONElement elt) {
+ // elt must be foo: {$meta: "recordId"}
+ if (mongo::Object != elt.type()) {
+ return false;
+ }
+ BSONObj metaObj = elt.Obj();
+ BSONObjIterator metaIt(metaObj);
+ // must have exactly 1 element
+ if (!metaIt.more()) {
+ return false;
+ }
+ BSONElement metaElt = metaIt.next();
+ if (!str::equals("$meta", metaElt.fieldName())) {
+ return false;
+ }
+ if (mongo::String != metaElt.type()) {
+ return false;
+ }
+ if (LiteParsedQuery::metaRecordId != metaElt.valuestr()) {
+ return false;
+ }
+ // must have exactly 1 element
+ if (metaIt.more()) {
+ return false;
+ }
+ return true;
+}
+
+// static
+bool LiteParsedQuery::isValidSortOrder(const BSONObj& sortObj) {
+ BSONObjIterator i(sortObj);
+ while (i.more()) {
+ BSONElement e = i.next();
+ // fieldNameSize() includes NULL terminator. For empty field name,
+ // we should be checking for 1 instead of 0.
+ if (1 == e.fieldNameSize()) {
return false;
}
- if (LiteParsedQuery::metaRecordId != metaElt.valuestr()) {
- return false;
+ if (isTextScoreMeta(e)) {
+ continue;
}
- // must have exactly 1 element
- if (metaIt.more()) {
+ long long n = e.safeNumberLong();
+ if (!(e.isNumber() && (n == -1LL || n == 1LL))) {
return false;
}
- return true;
}
-
- // static
- bool LiteParsedQuery::isValidSortOrder(const BSONObj& sortObj) {
- BSONObjIterator i(sortObj);
- while (i.more()) {
- BSONElement e = i.next();
- // fieldNameSize() includes NULL terminator. For empty field name,
- // we should be checking for 1 instead of 0.
- if (1 == e.fieldNameSize()) {
- return false;
- }
- if (isTextScoreMeta(e)) {
- continue;
- }
- long long n = e.safeNumberLong();
- if (!(e.isNumber() && (n == -1LL || n == 1LL))) {
- return false;
- }
- }
- return true;
- }
-
- // static
- bool LiteParsedQuery::isQueryIsolated(const BSONObj& query) {
- BSONObjIterator iter(query);
- while (iter.more()) {
- BSONElement elt = iter.next();
- if (str::equals(elt.fieldName(), "$isolated") && elt.trueValue())
- return true;
- if (str::equals(elt.fieldName(), "$atomic") && elt.trueValue())
- return true;
- }
- return false;
+ return true;
+}
+
+// static
+bool LiteParsedQuery::isQueryIsolated(const BSONObj& query) {
+ BSONObjIterator iter(query);
+ while (iter.more()) {
+ BSONElement elt = iter.next();
+ if (str::equals(elt.fieldName(), "$isolated") && elt.trueValue())
+ return true;
+ if (str::equals(elt.fieldName(), "$atomic") && elt.trueValue())
+ return true;
}
-
- //
- // Old LiteParsedQuery parsing code: SOON TO BE DEPRECATED.
- //
-
- // static
- StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::fromLegacyQueryMessage(
- const QueryMessage& qm) {
-
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
-
- Status status = pq->init(qm.ns,
- qm.ntoskip,
- qm.ntoreturn,
- qm.queryOptions,
- qm.query,
- qm.fields,
- true);
- if (!status.isOK()) {
- return status;
- }
-
- return std::move(pq);
+ return false;
+}
+
+//
+// Old LiteParsedQuery parsing code: SOON TO BE DEPRECATED.
+//
+
+// static
+StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::fromLegacyQueryMessage(
+ const QueryMessage& qm) {
+ unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
+
+ Status status =
+ pq->init(qm.ns, qm.ntoskip, qm.ntoreturn, qm.queryOptions, qm.query, qm.fields, true);
+ if (!status.isOK()) {
+ return status;
}
- Status LiteParsedQuery::init(const string& ns, int ntoskip, int ntoreturn, int queryOptions,
- const BSONObj& queryObj, const BSONObj& proj,
- bool fromQueryMessage) {
- _ns = ns;
- _skip = ntoskip;
- _proj = proj.getOwned();
+ return std::move(pq);
+}
+
+Status LiteParsedQuery::init(const string& ns,
+ int ntoskip,
+ int ntoreturn,
+ int queryOptions,
+ const BSONObj& queryObj,
+ const BSONObj& proj,
+ bool fromQueryMessage) {
+ _ns = ns;
+ _skip = ntoskip;
+ _proj = proj.getOwned();
+
+ if (ntoreturn) {
+ _batchSize = ntoreturn;
+ }
- if (ntoreturn) {
- _batchSize = ntoreturn;
- }
+ // Initialize flags passed as 'queryOptions' bit vector.
+ initFromInt(queryOptions);
- // Initialize flags passed as 'queryOptions' bit vector.
- initFromInt(queryOptions);
+ if (_skip < 0) {
+ return Status(ErrorCodes::BadValue, "bad skip value in query");
+ }
- if (_skip < 0) {
- return Status(ErrorCodes::BadValue, "bad skip value in query");
+ if (_batchSize && *_batchSize < 0) {
+ if (*_batchSize == std::numeric_limits<int>::min()) {
+ // _batchSize is negative but can't be negated.
+ return Status(ErrorCodes::BadValue, "bad limit value in query");
}
- if (_batchSize && *_batchSize < 0) {
- if (*_batchSize == std::numeric_limits<int>::min()) {
- // _batchSize is negative but can't be negated.
- return Status(ErrorCodes::BadValue, "bad limit value in query");
- }
+ // A negative number indicates that the cursor should be closed after the first batch.
+ _wantMore = false;
+ _batchSize = -*_batchSize;
+ }
- // A negative number indicates that the cursor should be closed after the first batch.
- _wantMore = false;
- _batchSize = -*_batchSize;
+ if (fromQueryMessage) {
+ BSONElement queryField = queryObj["query"];
+ if (!queryField.isABSONObj()) {
+ queryField = queryObj["$query"];
}
-
- if (fromQueryMessage) {
- BSONElement queryField = queryObj["query"];
- if (!queryField.isABSONObj()) { queryField = queryObj["$query"]; }
- if (queryField.isABSONObj()) {
- _filter = queryField.embeddedObject().getOwned();
- Status status = initFullQuery(queryObj);
- if (!status.isOK()) { return status; }
- }
- else {
- _filter = queryObj.getOwned();
+ if (queryField.isABSONObj()) {
+ _filter = queryField.embeddedObject().getOwned();
+ Status status = initFullQuery(queryObj);
+ if (!status.isOK()) {
+ return status;
}
- }
- else {
- // This is the debugging code path.
+ } else {
_filter = queryObj.getOwned();
}
-
- _hasReadPref = queryObj.hasField("$readPreference");
-
- if (!isValidSortOrder(_sort)) {
- return Status(ErrorCodes::BadValue, "bad sort specification");
- }
-
- return validate();
+ } else {
+ // This is the debugging code path.
+ _filter = queryObj.getOwned();
}
- Status LiteParsedQuery::initFullQuery(const BSONObj& top) {
- BSONObjIterator i(top);
-
- while (i.more()) {
- BSONElement e = i.next();
- const char* name = e.fieldName();
+ _hasReadPref = queryObj.hasField("$readPreference");
- if (0 == strcmp("$orderby", name) || 0 == strcmp("orderby", name)) {
- if (Object == e.type()) {
- _sort = e.embeddedObject().getOwned();
- }
- else if (Array == e.type()) {
- _sort = e.embeddedObject();
-
- // TODO: Is this ever used? I don't think so.
- // Quote:
- // This is for languages whose "objects" are not well ordered (JSON is well
- // ordered).
- // [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
- // note: this is slow, but that is ok as order will have very few pieces
- BSONObjBuilder b;
- char p[2] = "0";
-
- while (1) {
- BSONObj j = _sort.getObjectField(p);
- if (j.isEmpty()) { break; }
- BSONElement e = j.firstElement();
- if (e.eoo()) {
- return Status(ErrorCodes::BadValue, "bad order array");
- }
- if (!e.isNumber()) {
- return Status(ErrorCodes::BadValue, "bad order array [2]");
- }
- b.append(e);
- (*p)++;
- if (!(*p <= '9')) {
- return Status(ErrorCodes::BadValue, "too many ordering elements");
- }
- }
+ if (!isValidSortOrder(_sort)) {
+ return Status(ErrorCodes::BadValue, "bad sort specification");
+ }
- _sort = b.obj();
- }
- else {
- return Status(ErrorCodes::BadValue, "sort must be object or array");
- }
- }
- else if ('$' == *name) {
- name++;
- if (str::equals("explain", name)) {
- // Won't throw.
- _explain = e.trueValue();
- }
- else if (str::equals("snapshot", name)) {
- // Won't throw.
- _snapshot = e.trueValue();
- }
- else if (str::equals("min", name)) {
- if (!e.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "$min must be a BSONObj");
+ return validate();
+}
+
+Status LiteParsedQuery::initFullQuery(const BSONObj& top) {
+ BSONObjIterator i(top);
+
+ while (i.more()) {
+ BSONElement e = i.next();
+ const char* name = e.fieldName();
+
+ if (0 == strcmp("$orderby", name) || 0 == strcmp("orderby", name)) {
+ if (Object == e.type()) {
+ _sort = e.embeddedObject().getOwned();
+ } else if (Array == e.type()) {
+ _sort = e.embeddedObject();
+
+ // TODO: Is this ever used? I don't think so.
+ // Quote:
+ // This is for languages whose "objects" are not well ordered (JSON is well
+ // ordered).
+ // [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
+ // note: this is slow, but that is ok as order will have very few pieces
+ BSONObjBuilder b;
+ char p[2] = "0";
+
+ while (1) {
+ BSONObj j = _sort.getObjectField(p);
+ if (j.isEmpty()) {
+ break;
}
- _min = e.embeddedObject().getOwned();
- }
- else if (str::equals("max", name)) {
- if (!e.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "$max must be a BSONObj");
- }
- _max = e.embeddedObject().getOwned();
- }
- else if (str::equals("hint", name)) {
- if (e.isABSONObj()) {
- _hint = e.embeddedObject().getOwned();
- }
- else if (String == e.type()) {
- _hint = e.wrap();
+ BSONElement e = j.firstElement();
+ if (e.eoo()) {
+ return Status(ErrorCodes::BadValue, "bad order array");
}
- else {
- return Status(ErrorCodes::BadValue,
- "$hint must be either a string or nested object");
+ if (!e.isNumber()) {
+ return Status(ErrorCodes::BadValue, "bad order array [2]");
}
- }
- else if (str::equals("returnKey", name)) {
- // Won't throw.
- if (e.trueValue()) {
- _returnKey = true;
- addReturnKeyMetaProj();
- }
- }
- else if (str::equals("maxScan", name)) {
- // Won't throw.
- _maxScan = e.numberInt();
- }
- else if (str::equals("showDiskLoc", name)) {
- // Won't throw.
- if (e.trueValue()) {
- _showRecordId = true;
- addShowRecordIdMetaProj();
+ b.append(e);
+ (*p)++;
+ if (!(*p <= '9')) {
+ return Status(ErrorCodes::BadValue, "too many ordering elements");
}
}
- else if (str::equals("maxTimeMS", name)) {
- StatusWith<int> maxTimeMS = parseMaxTimeMS(e);
- if (!maxTimeMS.isOK()) {
- return maxTimeMS.getStatus();
- }
- _maxTimeMS = maxTimeMS.getValue();
+
+ _sort = b.obj();
+ } else {
+ return Status(ErrorCodes::BadValue, "sort must be object or array");
+ }
+ } else if ('$' == *name) {
+ name++;
+ if (str::equals("explain", name)) {
+ // Won't throw.
+ _explain = e.trueValue();
+ } else if (str::equals("snapshot", name)) {
+ // Won't throw.
+ _snapshot = e.trueValue();
+ } else if (str::equals("min", name)) {
+ if (!e.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "$min must be a BSONObj");
+ }
+ _min = e.embeddedObject().getOwned();
+ } else if (str::equals("max", name)) {
+ if (!e.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "$max must be a BSONObj");
+ }
+ _max = e.embeddedObject().getOwned();
+ } else if (str::equals("hint", name)) {
+ if (e.isABSONObj()) {
+ _hint = e.embeddedObject().getOwned();
+ } else if (String == e.type()) {
+ _hint = e.wrap();
+ } else {
+ return Status(ErrorCodes::BadValue,
+ "$hint must be either a string or nested object");
+ }
+ } else if (str::equals("returnKey", name)) {
+ // Won't throw.
+ if (e.trueValue()) {
+ _returnKey = true;
+ addReturnKeyMetaProj();
+ }
+ } else if (str::equals("maxScan", name)) {
+ // Won't throw.
+ _maxScan = e.numberInt();
+ } else if (str::equals("showDiskLoc", name)) {
+ // Won't throw.
+ if (e.trueValue()) {
+ _showRecordId = true;
+ addShowRecordIdMetaProj();
+ }
+ } else if (str::equals("maxTimeMS", name)) {
+ StatusWith<int> maxTimeMS = parseMaxTimeMS(e);
+ if (!maxTimeMS.isOK()) {
+ return maxTimeMS.getStatus();
}
+ _maxTimeMS = maxTimeMS.getValue();
}
}
-
- return Status::OK();
}
- int LiteParsedQuery::getOptions() const {
- int options = 0;
- if (_tailable) { options |= QueryOption_CursorTailable; }
- if (_slaveOk) { options |= QueryOption_SlaveOk; }
- if (_oplogReplay) { options |= QueryOption_OplogReplay; }
- if (_noCursorTimeout) { options |= QueryOption_NoCursorTimeout; }
- if (_awaitData) { options |= QueryOption_AwaitData; }
- if (_exhaust) { options |= QueryOption_Exhaust; }
- if (_partial) { options |= QueryOption_PartialResults; }
- return options;
- }
+ return Status::OK();
+}
- void LiteParsedQuery::initFromInt(int options) {
- _tailable = (options & QueryOption_CursorTailable) != 0;
- _slaveOk = (options & QueryOption_SlaveOk) != 0;
- _oplogReplay = (options & QueryOption_OplogReplay) != 0;
- _noCursorTimeout = (options & QueryOption_NoCursorTimeout) != 0;
- _awaitData = (options & QueryOption_AwaitData) != 0;
- _exhaust = (options & QueryOption_Exhaust) != 0;
- _partial = (options & QueryOption_PartialResults) != 0;
+int LiteParsedQuery::getOptions() const {
+ int options = 0;
+ if (_tailable) {
+ options |= QueryOption_CursorTailable;
}
-
- void LiteParsedQuery::addMetaProjection() {
- // We might need to update the projection object with a $meta projection.
- if (returnKey()) {
- addReturnKeyMetaProj();
- }
-
- if (showRecordId()) {
- addShowRecordIdMetaProj();
- }
+ if (_slaveOk) {
+ options |= QueryOption_SlaveOk;
+ }
+ if (_oplogReplay) {
+ options |= QueryOption_OplogReplay;
+ }
+ if (_noCursorTimeout) {
+ options |= QueryOption_NoCursorTimeout;
+ }
+ if (_awaitData) {
+ options |= QueryOption_AwaitData;
+ }
+ if (_exhaust) {
+ options |= QueryOption_Exhaust;
+ }
+ if (_partial) {
+ options |= QueryOption_PartialResults;
+ }
+ return options;
+}
+
+void LiteParsedQuery::initFromInt(int options) {
+ _tailable = (options & QueryOption_CursorTailable) != 0;
+ _slaveOk = (options & QueryOption_SlaveOk) != 0;
+ _oplogReplay = (options & QueryOption_OplogReplay) != 0;
+ _noCursorTimeout = (options & QueryOption_NoCursorTimeout) != 0;
+ _awaitData = (options & QueryOption_AwaitData) != 0;
+ _exhaust = (options & QueryOption_Exhaust) != 0;
+ _partial = (options & QueryOption_PartialResults) != 0;
+}
+
+void LiteParsedQuery::addMetaProjection() {
+ // We might need to update the projection object with a $meta projection.
+ if (returnKey()) {
+ addReturnKeyMetaProj();
}
- Status LiteParsedQuery::validateFindCmd() {
- if (isAwaitData() && !isTailable()) {
- return Status(ErrorCodes::BadValue, "Cannot set awaitData without tailable");
- }
+ if (showRecordId()) {
+ addShowRecordIdMetaProj();
+ }
+}
- return validate();
+Status LiteParsedQuery::validateFindCmd() {
+ if (isAwaitData() && !isTailable()) {
+ return Status(ErrorCodes::BadValue, "Cannot set awaitData without tailable");
}
-} // namespace mongo
+ return validate();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/lite_parsed_query.h b/src/mongo/db/query/lite_parsed_query.h
index de34ea0ffd6..f0744eeb782 100644
--- a/src/mongo/db/query/lite_parsed_query.h
+++ b/src/mongo/db/query/lite_parsed_query.h
@@ -35,245 +35,304 @@
namespace mongo {
- class NamespaceString;
- class QueryMessage;
- class Status;
- template<typename T> class StatusWith;
+class NamespaceString;
+class QueryMessage;
+class Status;
+template <typename T>
+class StatusWith;
+
+/**
+ * Parses the QueryMessage or find command received from the user and makes the various fields
+ * more easily accessible.
+ */
+class LiteParsedQuery {
+public:
+ /**
+ * Parses a find command object, 'cmdObj'. Caller must indicate whether or not this lite
+ * parsed query is an explained query or not via 'isExplain'.
+ *
+ * Returns a heap allocated LiteParsedQuery on success or an error if 'cmdObj' is not well
+ * formed.
+ */
+ static StatusWith<std::unique_ptr<LiteParsedQuery>> makeFromFindCommand(
+ const NamespaceString& nss, const BSONObj& cmdObj, bool isExplain);
+
+ /**
+ * Constructs a LiteParseQuery object as though it is from a legacy QueryMessage.
+ */
+ static StatusWith<std::unique_ptr<LiteParsedQuery>> makeAsOpQuery(const std::string& ns,
+ int ntoskip,
+ int ntoreturn,
+ int queryoptions,
+ const BSONObj& query,
+ const BSONObj& proj,
+ const BSONObj& sort,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot,
+ bool explain);
+
+ /**
+ * Constructs a LiteParseQuery object that can be used to serialize to find command
+ * BSON object.
+ */
+ static StatusWith<std::unique_ptr<LiteParsedQuery>> makeAsFindCmd(const NamespaceString& ns,
+ const BSONObj& query,
+ boost::optional<int> limit);
+
+ /**
+ * Converts this LPQ into a find command.
+ */
+ BSONObj asFindCommand() const;
/**
- * Parses the QueryMessage or find command received from the user and makes the various fields
- * more easily accessible.
+ * Helper functions to parse maxTimeMS from a command object. Returns the contained value,
+ * or an error on parsing fail. When passed an EOO-type element, returns 0 (special value
+ * for "allow to run indefinitely").
*/
- class LiteParsedQuery {
- public:
- /**
- * Parses a find command object, 'cmdObj'. Caller must indicate whether or not this lite
- * parsed query is an explained query or not via 'isExplain'.
- *
- * Returns a heap allocated LiteParsedQuery on success or an error if 'cmdObj' is not well
- * formed.
- */
- static StatusWith<std::unique_ptr<LiteParsedQuery>>
- makeFromFindCommand(const NamespaceString& nss, const BSONObj& cmdObj, bool isExplain);
-
- /**
- * Constructs a LiteParseQuery object as though it is from a legacy QueryMessage.
- */
- static StatusWith<std::unique_ptr<LiteParsedQuery>> makeAsOpQuery(const std::string& ns,
- int ntoskip,
- int ntoreturn,
- int queryoptions,
- const BSONObj& query,
- const BSONObj& proj,
- const BSONObj& sort,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot,
- bool explain);
-
- /**
- * Constructs a LiteParseQuery object that can be used to serialize to find command
- * BSON object.
- */
- static StatusWith<std::unique_ptr<LiteParsedQuery>>
- makeAsFindCmd(const NamespaceString& ns,
- const BSONObj& query,
- boost::optional<int> limit);
-
- /**
- * Converts this LPQ into a find command.
- */
- BSONObj asFindCommand() const;
-
- /**
- * Helper functions to parse maxTimeMS from a command object. Returns the contained value,
- * or an error on parsing fail. When passed an EOO-type element, returns 0 (special value
- * for "allow to run indefinitely").
- */
- static StatusWith<int> parseMaxTimeMSCommand(const BSONObj& cmdObj);
-
- /**
- * Same as parseMaxTimeMSCommand, but for a query object.
- */
- static StatusWith<int> parseMaxTimeMSQuery(const BSONObj& queryObj);
-
- /**
- * Helper function to identify text search sort key
- * Example: {a: {$meta: "textScore"}}
- */
- static bool isTextScoreMeta(BSONElement elt);
-
- /**
- * Helper function to identify recordId projection.
- *
- * Example: {a: {$meta: "recordId"}}.
- */
- static bool isRecordIdMeta(BSONElement elt);
-
- /**
- * Helper function to validate a sort object.
- * Returns true if each element satisfies one of:
- * 1. a number with value 1
- * 2. a number with value -1
- * 3. isTextScoreMeta
- */
- static bool isValidSortOrder(const BSONObj& sortObj);
-
- /**
- * Returns true if the query described by "query" should execute
- * at an elevated level of isolation (i.e., $isolated was specified).
- */
- static bool isQueryIsolated(const BSONObj& query);
-
- // Names of the maxTimeMS command and query option.
- static const std::string cmdOptionMaxTimeMS;
- static const std::string queryOptionMaxTimeMS;
-
- // Names of the $meta projection values.
- static const std::string metaTextScore;
- static const std::string metaGeoNearDistance;
- static const std::string metaGeoNearPoint;
- static const std::string metaRecordId;
- static const std::string metaIndexKey;
-
- const std::string& ns() const { return _ns; }
-
- const BSONObj& getFilter() const { return _filter; }
- const BSONObj& getProj() const { return _proj; }
- const BSONObj& getSort() const { return _sort; }
- const BSONObj& getHint() const { return _hint; }
-
- static const int kDefaultBatchSize;
-
- int getSkip() const { return _skip; }
- boost::optional<int> getLimit() const { return _limit; }
- boost::optional<int> getBatchSize() const { return _batchSize; }
- bool wantMore() const { return _wantMore; }
-
- bool isFromFindCommand() const { return _fromCommand; }
- bool isExplain() const { return _explain; }
-
- const std::string& getComment() const { return _comment; }
-
- int getMaxScan() const { return _maxScan; }
- int getMaxTimeMS() const { return _maxTimeMS; }
-
- const BSONObj& getMin() const { return _min; }
- const BSONObj& getMax() const { return _max; }
-
- bool returnKey() const { return _returnKey; }
- bool showRecordId() const { return _showRecordId; }
- bool isSnapshot() const { return _snapshot; }
- bool hasReadPref() const { return _hasReadPref; }
-
- bool isTailable() const { return _tailable; }
- bool isSlaveOk() const { return _slaveOk; }
- bool isOplogReplay() const { return _oplogReplay; }
- bool isNoCursorTimeout() const { return _noCursorTimeout; }
- bool isAwaitData() const { return _awaitData; }
- bool isExhaust() const { return _exhaust; }
- bool isPartial() const { return _partial; }
-
- /**
- * Return options as a bit vector.
- */
- int getOptions() const;
-
- //
- // Old parsing code: SOON TO BE DEPRECATED.
- //
-
- /**
- * Parse the provided QueryMessage and return a heap constructed LiteParsedQuery, which
- * represents it or an error.
- */
- static StatusWith<std::unique_ptr<LiteParsedQuery>> fromLegacyQueryMessage(
- const QueryMessage& qm);
-
- private:
- LiteParsedQuery() = default;
-
- /**
- * Parsing code calls this after construction of the LPQ is complete. There are additional
- * semantic properties that must be checked even if "lexically" the parse is OK.
- */
- Status validate() const;
-
- Status init(const std::string& ns, int ntoskip, int ntoreturn, int queryOptions,
- const BSONObj& queryObj, const BSONObj& proj, bool fromQueryMessage);
-
- Status initFullQuery(const BSONObj& top);
-
- static StatusWith<int> parseMaxTimeMS(const BSONElement& maxTimeMSElt);
-
- /**
- * Updates the projection object with a $meta projection for the returnKey option.
- */
- void addReturnKeyMetaProj();
-
- /**
- * Updates the projection object with a $meta projection for the showRecordId option.
- */
- void addShowRecordIdMetaProj();
-
- /**
- * Initializes options based on the value of the 'options' bit vector.
- *
- * This contains flags such as tailable, exhaust, and noCursorTimeout.
- */
- void initFromInt(int options);
-
- /**
- * Add the meta projection to this object if needed.
- */
- void addMetaProjection();
-
- /**
- * Returns OK if this is valid in the find command context.
- */
- Status validateFindCmd();
-
- std::string _ns;
-
- BSONObj _filter;
- BSONObj _proj;
- BSONObj _sort;
- // The hint provided, if any. If the hint was by index key pattern, the value of '_hint' is
- // the key pattern hinted. If the hint was by index name, the value of '_hint' is
- // {$hint: <String>}, where <String> is the index name hinted.
- BSONObj _hint;
-
- int _skip = 0;
- bool _wantMore = true;
-
- boost::optional<int> _limit;
- boost::optional<int> _batchSize;
-
- bool _fromCommand = false;
- bool _explain = false;
-
- std::string _comment;
-
- int _maxScan = 0;
- int _maxTimeMS = 0;
-
- BSONObj _min;
- BSONObj _max;
-
- bool _returnKey = false;
- bool _showRecordId = false;
- bool _snapshot = false;
- bool _hasReadPref = false;
-
- // Options that can be specified in the OP_QUERY 'flags' header.
- bool _tailable = false;
- bool _slaveOk = false;
- bool _oplogReplay = false;
- bool _noCursorTimeout = false;
- bool _awaitData = false;
- bool _exhaust = false;
- bool _partial = false;
- };
-
-} // namespace mongo
+ static StatusWith<int> parseMaxTimeMSCommand(const BSONObj& cmdObj);
+
+ /**
+ * Same as parseMaxTimeMSCommand, but for a query object.
+ */
+ static StatusWith<int> parseMaxTimeMSQuery(const BSONObj& queryObj);
+
+ /**
+ * Helper function to identify text search sort key
+ * Example: {a: {$meta: "textScore"}}
+ */
+ static bool isTextScoreMeta(BSONElement elt);
+
+ /**
+ * Helper function to identify recordId projection.
+ *
+ * Example: {a: {$meta: "recordId"}}.
+ */
+ static bool isRecordIdMeta(BSONElement elt);
+
+ /**
+ * Helper function to validate a sort object.
+ * Returns true if each element satisfies one of:
+ * 1. a number with value 1
+ * 2. a number with value -1
+ * 3. isTextScoreMeta
+ */
+ static bool isValidSortOrder(const BSONObj& sortObj);
+
+ /**
+ * Returns true if the query described by "query" should execute
+ * at an elevated level of isolation (i.e., $isolated was specified).
+ */
+ static bool isQueryIsolated(const BSONObj& query);
+
+ // Names of the maxTimeMS command and query option.
+ static const std::string cmdOptionMaxTimeMS;
+ static const std::string queryOptionMaxTimeMS;
+
+ // Names of the $meta projection values.
+ static const std::string metaTextScore;
+ static const std::string metaGeoNearDistance;
+ static const std::string metaGeoNearPoint;
+ static const std::string metaRecordId;
+ static const std::string metaIndexKey;
+
+ const std::string& ns() const {
+ return _ns;
+ }
+
+ const BSONObj& getFilter() const {
+ return _filter;
+ }
+ const BSONObj& getProj() const {
+ return _proj;
+ }
+ const BSONObj& getSort() const {
+ return _sort;
+ }
+ const BSONObj& getHint() const {
+ return _hint;
+ }
+
+ static const int kDefaultBatchSize;
+
+ int getSkip() const {
+ return _skip;
+ }
+ boost::optional<int> getLimit() const {
+ return _limit;
+ }
+ boost::optional<int> getBatchSize() const {
+ return _batchSize;
+ }
+ bool wantMore() const {
+ return _wantMore;
+ }
+
+ bool isFromFindCommand() const {
+ return _fromCommand;
+ }
+ bool isExplain() const {
+ return _explain;
+ }
+
+ const std::string& getComment() const {
+ return _comment;
+ }
+
+ int getMaxScan() const {
+ return _maxScan;
+ }
+ int getMaxTimeMS() const {
+ return _maxTimeMS;
+ }
+
+ const BSONObj& getMin() const {
+ return _min;
+ }
+ const BSONObj& getMax() const {
+ return _max;
+ }
+
+ bool returnKey() const {
+ return _returnKey;
+ }
+ bool showRecordId() const {
+ return _showRecordId;
+ }
+ bool isSnapshot() const {
+ return _snapshot;
+ }
+ bool hasReadPref() const {
+ return _hasReadPref;
+ }
+
+ bool isTailable() const {
+ return _tailable;
+ }
+ bool isSlaveOk() const {
+ return _slaveOk;
+ }
+ bool isOplogReplay() const {
+ return _oplogReplay;
+ }
+ bool isNoCursorTimeout() const {
+ return _noCursorTimeout;
+ }
+ bool isAwaitData() const {
+ return _awaitData;
+ }
+ bool isExhaust() const {
+ return _exhaust;
+ }
+ bool isPartial() const {
+ return _partial;
+ }
+
+ /**
+ * Return options as a bit vector.
+ */
+ int getOptions() const;
+
+ //
+ // Old parsing code: SOON TO BE DEPRECATED.
+ //
+
+ /**
+ * Parse the provided QueryMessage and return a heap constructed LiteParsedQuery, which
+ * represents it or an error.
+ */
+ static StatusWith<std::unique_ptr<LiteParsedQuery>> fromLegacyQueryMessage(
+ const QueryMessage& qm);
+
+private:
+ LiteParsedQuery() = default;
+
+ /**
+ * Parsing code calls this after construction of the LPQ is complete. There are additional
+ * semantic properties that must be checked even if "lexically" the parse is OK.
+ */
+ Status validate() const;
+
+ Status init(const std::string& ns,
+ int ntoskip,
+ int ntoreturn,
+ int queryOptions,
+ const BSONObj& queryObj,
+ const BSONObj& proj,
+ bool fromQueryMessage);
+
+ Status initFullQuery(const BSONObj& top);
+
+ static StatusWith<int> parseMaxTimeMS(const BSONElement& maxTimeMSElt);
+
+ /**
+ * Updates the projection object with a $meta projection for the returnKey option.
+ */
+ void addReturnKeyMetaProj();
+
+ /**
+ * Updates the projection object with a $meta projection for the showRecordId option.
+ */
+ void addShowRecordIdMetaProj();
+
+ /**
+ * Initializes options based on the value of the 'options' bit vector.
+ *
+ * This contains flags such as tailable, exhaust, and noCursorTimeout.
+ */
+ void initFromInt(int options);
+
+ /**
+ * Add the meta projection to this object if needed.
+ */
+ void addMetaProjection();
+
+ /**
+ * Returns OK if this is valid in the find command context.
+ */
+ Status validateFindCmd();
+
+ std::string _ns;
+
+ BSONObj _filter;
+ BSONObj _proj;
+ BSONObj _sort;
+ // The hint provided, if any. If the hint was by index key pattern, the value of '_hint' is
+ // the key pattern hinted. If the hint was by index name, the value of '_hint' is
+ // {$hint: <String>}, where <String> is the index name hinted.
+ BSONObj _hint;
+
+ int _skip = 0;
+ bool _wantMore = true;
+
+ boost::optional<int> _limit;
+ boost::optional<int> _batchSize;
+
+ bool _fromCommand = false;
+ bool _explain = false;
+
+ std::string _comment;
+
+ int _maxScan = 0;
+ int _maxTimeMS = 0;
+
+ BSONObj _min;
+ BSONObj _max;
+
+ bool _returnKey = false;
+ bool _showRecordId = false;
+ bool _snapshot = false;
+ bool _hasReadPref = false;
+
+ // Options that can be specified in the OP_QUERY 'flags' header.
+ bool _tailable = false;
+ bool _slaveOk = false;
+ bool _oplogReplay = false;
+ bool _noCursorTimeout = false;
+ bool _awaitData = false;
+ bool _exhaust = false;
+ bool _partial = false;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/query/lite_parsed_query_test.cpp b/src/mongo/db/query/lite_parsed_query_test.cpp
index eeae7786d6a..e395ca2c87a 100644
--- a/src/mongo/db/query/lite_parsed_query_test.cpp
+++ b/src/mongo/db/query/lite_parsed_query_test.cpp
@@ -39,956 +39,986 @@
namespace mongo {
namespace {
- using std::unique_ptr;
- using unittest::assertGet;
-
- TEST(LiteParsedQueryTest, InitSortOrder) {
- ASSERT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+using std::unique_ptr;
+using unittest::assertGet;
+
+TEST(LiteParsedQueryTest, InitSortOrder) {
+ ASSERT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 1,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ fromjson("{a: 1}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, InitSortOrderString) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
0,
1,
0,
BSONObj(),
BSONObj(),
+ fromjson("{a: \"\"}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, GetFilter) {
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 5,
+ 6,
+ 9,
+ BSON("x" << 5),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ ASSERT_EQUALS(BSON("x" << 5), lpq->getFilter());
+}
+
+TEST(LiteParsedQueryTest, NumToReturn) {
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 5,
+ 6,
+ 9,
+ BSON("x" << 5),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ ASSERT_EQUALS(6, lpq->getBatchSize());
+ ASSERT(lpq->wantMore());
+}
+
+TEST(LiteParsedQueryTest, NumToReturnNegative) {
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 5,
+ -6,
+ 9,
+ BSON("x" << 5),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ ASSERT_EQUALS(6, lpq->getBatchSize());
+ ASSERT(!lpq->wantMore());
+}
+
+TEST(LiteParsedQueryTest, MinFieldsNotPrefixOfMax) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
fromjson("{a: 1}"),
+ fromjson("{b: 1}"),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, MinFieldsMoreThanMax) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
BSONObj(),
BSONObj(),
BSONObj(),
+ fromjson("{a: 1, b: 1}"),
+ fromjson("{a: 1}"),
false, // snapshot
false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, InitSortOrderString) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 1,
- 0,
- BSONObj(),
- BSONObj(),
- fromjson("{a: \"\"}"),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, GetFilter) {
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 5,
- 6,
- 9,
- BSON("x" << 5),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- ASSERT_EQUALS(BSON("x" << 5 ), lpq->getFilter());
- }
-
- TEST(LiteParsedQueryTest, NumToReturn) {
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 5,
- 6,
- 9,
- BSON("x" << 5),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- ASSERT_EQUALS(6, lpq->getBatchSize());
- ASSERT(lpq->wantMore());
- }
-
- TEST(LiteParsedQueryTest, NumToReturnNegative) {
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 5,
- -6,
- 9,
- BSON("x" << 5),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- ASSERT_EQUALS(6, lpq->getBatchSize());
- ASSERT(!lpq->wantMore());
- }
-
- TEST(LiteParsedQueryTest, MinFieldsNotPrefixOfMax) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- fromjson("{a: 1}"),
- fromjson("{b: 1}"),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, MinFieldsMoreThanMax) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- fromjson("{a: 1, b: 1}"),
- fromjson("{a: 1}"),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, MinFieldsLessThanMax) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- fromjson("{a: 1}"),
- fromjson("{a: 1, b: 1}"),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- // Helper function which returns the Status of creating a LiteParsedQuery object with the given
- // parameters.
- void assertLiteParsedQuerySuccess(const BSONObj& query,
- const BSONObj& proj,
- const BSONObj& sort) {
-
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- query,
- proj,
- sort,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
- }
-
- //
- // Test compatibility of various projection and sort objects.
- //
-
- TEST(LiteParsedQueryTest, ValidSortProj) {
- assertLiteParsedQuerySuccess(BSONObj(),
- fromjson("{a: 1}"),
- fromjson("{a: 1}"));
-
- assertLiteParsedQuerySuccess(BSONObj(),
- fromjson("{a: {$meta: \"textScore\"}}"),
- fromjson("{a: {$meta: \"textScore\"}}"));
- }
-
- TEST(LiteParsedQueryTest, ForbidNonMetaSortOnFieldWithMetaProject) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- fromjson("{a: {$meta: \"textScore\"}}"),
- fromjson("{a: 1}"),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false) // explain
- .getStatus());
-
- assertLiteParsedQuerySuccess(BSONObj(),
- fromjson("{a: {$meta: \"textScore\"}}"),
- fromjson("{b: 1}"));
- }
-
- TEST(LiteParsedQueryTest, ForbidMetaSortOnFieldWithoutMetaProject) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- fromjson("{a: 1}"),
- fromjson("{a: {$meta: \"textScore\"}}"),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false) // explain
- .getStatus());
-
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- fromjson("{b: 1}"),
- fromjson("{a: {$meta: \"textScore\"}}"),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, MakeFindCmd) {
- auto result = LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"),
- BSON("x" << 1),
- 2);
- ASSERT_OK(result.getStatus());
-
- auto&& lpq = result.getValue();
- ASSERT_EQUALS("test.ns", lpq->ns());
- ASSERT_EQUALS(BSON("x" << 1), lpq->getFilter());
- ASSERT_EQUALS(2, lpq->getLimit());
-
- ASSERT_EQUALS(BSONObj(), lpq->getProj());
- ASSERT_EQUALS(BSONObj(), lpq->getSort());
- ASSERT_EQUALS(BSONObj(), lpq->getHint());
- ASSERT_EQUALS(BSONObj(), lpq->getMin());
- ASSERT_EQUALS(BSONObj(), lpq->getMax());
-
- ASSERT_EQUALS(0, lpq->getSkip());
- ASSERT_EQUALS(0, lpq->getMaxScan());
- ASSERT_EQUALS(0, lpq->getMaxTimeMS());
- ASSERT_EQUALS(0, lpq->getOptions());
-
- ASSERT_FALSE(lpq->getBatchSize());
-
- ASSERT_TRUE(lpq->isFromFindCommand());
- ASSERT_FALSE(lpq->isExplain());
- ASSERT_FALSE(lpq->returnKey());
- ASSERT_FALSE(lpq->showRecordId());
- ASSERT_FALSE(lpq->isSnapshot());
- ASSERT_FALSE(lpq->hasReadPref());
- ASSERT_FALSE(lpq->isTailable());
- ASSERT_FALSE(lpq->isSlaveOk());
- ASSERT_FALSE(lpq->isOplogReplay());
- ASSERT_FALSE(lpq->isNoCursorTimeout());
- ASSERT_FALSE(lpq->isAwaitData());
- ASSERT_FALSE(lpq->isExhaust());
- ASSERT_FALSE(lpq->isPartial());
- }
-
- TEST(LiteParsedQueryTest, MakeFindCmdNoLimit) {
- auto result = LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"),
- BSON("x" << 1),
- boost::none);
- ASSERT_OK(result.getStatus());
-
- auto&& lpq = result.getValue();
- ASSERT_EQUALS("test.ns", lpq->ns());
- ASSERT_EQUALS(BSON("x" << 1), lpq->getFilter());
-
- ASSERT_EQUALS(BSONObj(), lpq->getProj());
- ASSERT_EQUALS(BSONObj(), lpq->getSort());
- ASSERT_EQUALS(BSONObj(), lpq->getHint());
- ASSERT_EQUALS(BSONObj(), lpq->getMin());
- ASSERT_EQUALS(BSONObj(), lpq->getMax());
-
- ASSERT_EQUALS(0, lpq->getSkip());
- ASSERT_EQUALS(0, lpq->getMaxScan());
- ASSERT_EQUALS(0, lpq->getMaxTimeMS());
- ASSERT_EQUALS(0, lpq->getOptions());
-
- ASSERT_FALSE(lpq->getBatchSize());
- ASSERT_FALSE(lpq->getLimit());
-
- ASSERT_TRUE(lpq->isFromFindCommand());
- ASSERT_FALSE(lpq->isExplain());
- ASSERT_FALSE(lpq->returnKey());
- ASSERT_FALSE(lpq->showRecordId());
- ASSERT_FALSE(lpq->isSnapshot());
- ASSERT_FALSE(lpq->hasReadPref());
- ASSERT_FALSE(lpq->isTailable());
- ASSERT_FALSE(lpq->isSlaveOk());
- ASSERT_FALSE(lpq->isOplogReplay());
- ASSERT_FALSE(lpq->isNoCursorTimeout());
- ASSERT_FALSE(lpq->isAwaitData());
- ASSERT_FALSE(lpq->isExhaust());
- ASSERT_FALSE(lpq->isPartial());
- }
-
- TEST(LiteParsedQueryTest, MakeFindCmdBadLimit) {
- auto status = LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"),
- BSON("x" << 1),
- 0).getStatus();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
- }
-
- //
- // Text meta BSON element validation
- //
-
- bool isFirstElementTextScoreMeta(const char* sortStr) {
- BSONObj sortObj = fromjson(sortStr);
- BSONElement elt = sortObj.firstElement();
- bool result = LiteParsedQuery::isTextScoreMeta(elt);
- return result;
- }
-
- // Check validation of $meta expressions
- TEST(LiteParsedQueryTest, IsTextScoreMeta) {
- // Valid textScore meta sort
- ASSERT(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\"}}"));
-
- // Invalid textScore meta sorts
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: 1}}"));
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"image\"}}"));
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$world: \"textScore\"}}"));
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\", b: 1}}"));
- }
-
- //
- // Sort order validation
- // In a valid sort order, each element satisfies one of:
- // 1. a number with value 1
- // 2. a number with value -1
- // 3. isTextScoreMeta
- //
-
- TEST(LiteParsedQueryTest, ValidateSortOrder) {
- // Valid sorts
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{}")));
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: 1}")));
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: -1}")));
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\"}}")));
-
- // Invalid sorts
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 100}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 0}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -100}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: Infinity}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -Infinity}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: true}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: false}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: null}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {b: 1}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: []}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: [1, 2, 3]}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"\"}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"bb\"}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: 1}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"image\"}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$world: \"textScore\"}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\","
- " b: 1}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': 1}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': -1}")));
- }
-
- //
- // Tests for parsing a lite parsed query from a command BSON object.
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandBasic) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 3},"
- "sort: {a: 1},"
- "projection: {_id: 0, a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandWithOptions) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 3},"
- "sort: {a: 1},"
- "projection: {_id: 0, a: 1},"
- "showRecordId: true,"
- "maxScan: 1000}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- // Make sure the values from the command BSON are reflected in the LPQ.
- ASSERT(lpq->showRecordId());
- ASSERT_EQUALS(1000, lpq->getMaxScan());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandHintAsString) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "hint: 'foo_1'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- BSONObj hintObj = lpq->getHint();
- ASSERT_EQUALS(BSON("$hint" << "foo_1"), hintObj);
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandValidSortProj) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "projection: {a: 1},"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandValidSortProjMeta) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "projection: {a: {$meta: 'textScore'}},"
- "sort: {a: {$meta: 'textScore'}}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandAllFlagsTrue) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "tailable: true,"
- "slaveOk: true,"
- "oplogReplay: true,"
- "noCursorTimeout: true,"
- "awaitData: true,"
- "partial: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- // Test that all the flags got set to true.
- ASSERT(lpq->isTailable());
- ASSERT(lpq->isSlaveOk());
- ASSERT(lpq->isOplogReplay());
- ASSERT(lpq->isNoCursorTimeout());
- ASSERT(lpq->isAwaitData());
- ASSERT(lpq->isPartial());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandCommentWithValidMinMax) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "comment: 'the comment',"
- "min: {a: 1},"
- "max: {a: 2}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT_EQUALS("the comment", lpq->getComment());
- BSONObj expectedMin = BSON("a" << 1);
- ASSERT_EQUALS(0, expectedMin.woCompare(lpq->getMin()));
- BSONObj expectedMax = BSON("a" << 2);
- ASSERT_EQUALS(0, expectedMax.woCompare(lpq->getMax()));
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandAllNonOptionFields) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "sort: {b: 1},"
- "projection: {c: 1},"
- "hint: {d: 1},"
- "limit: 3,"
- "skip: 5,"
- "batchSize: 90,"
- "singleBatch: false}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- // Check the values inside the LPQ.
- BSONObj expectedQuery = BSON("a" << 1);
- ASSERT_EQUALS(0, expectedQuery.woCompare(lpq->getFilter()));
- BSONObj expectedSort = BSON("b" << 1);
- ASSERT_EQUALS(0, expectedSort.woCompare(lpq->getSort()));
- BSONObj expectedProj = BSON("c" << 1);
- ASSERT_EQUALS(0, expectedProj.woCompare(lpq->getProj()));
- BSONObj expectedHint = BSON("d" << 1);
- ASSERT_EQUALS(0, expectedHint.woCompare(lpq->getHint()));
- ASSERT_EQUALS(3, lpq->getLimit());
- ASSERT_EQUALS(5, lpq->getSkip());
- ASSERT_EQUALS(90, lpq->getBatchSize());
- ASSERT(lpq->wantMore());
- }
-
- //
- // Parsing errors where a field has the wrong type.
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandQueryWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSortWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "sort: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandProjWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "projection: 'foo'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSkipWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "skip: '5',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandLimitWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "limit: '5',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSingleBatchWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "singleBatch: 'false',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandCommentWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "comment: 1}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandMaxScanWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "maxScan: true,"
- "comment: 'foo'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandMaxTimeMSWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "maxTimeMS: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandMaxWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "max: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandMinWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "min: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandReturnKeyWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "returnKey: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
-
- TEST(LiteParsedQueryTest, ParseFromCommandShowRecordIdWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "showRecordId: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSnapshotWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "snapshot: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandTailableWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "tailable: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSlaveOkWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "slaveOk: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandOplogReplayWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "oplogReplay: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandNoCursorTimeoutWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "noCursorTimeout: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandAwaitDataWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "tailable: true,"
- "awaitData: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandExhaustWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "exhaust: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandPartialWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "exhaust: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- //
- // Parsing errors where a field has the right type but a bad value.
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandNegativeSkipError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "skip: -3,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandNegativeLimitError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "limit: -3,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandNegativeBatchSizeError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "batchSize: -10,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandBatchSizeZero) {
- BSONObj cmdObj = fromjson("{find: 'testns', batchSize: 0}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT(lpq->getBatchSize());
- ASSERT_EQ(0, lpq->getBatchSize());
-
- ASSERT(!lpq->getLimit());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandDefaultBatchSize) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT(!lpq->getBatchSize());
- ASSERT(!lpq->getLimit());
- }
-
- //
- // Errors checked in LiteParsedQuery::validate().
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandMinMaxDifferentFieldsError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "min: {a: 3},"
- "max: {b: 4}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusSortError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "sort: {a: 3},"
- "snapshot: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusHintError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "snapshot: true,"
- "hint: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandForbidNonMetaSortOnFieldWithMetaProject) {
- BSONObj cmdObj;
-
- cmdObj = fromjson("{find: 'testns',"
- "projection: {a: {$meta: 'textScore'}},"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-
- cmdObj = fromjson("{find: 'testns',"
- "projection: {a: {$meta: 'textScore'}},"
- "sort: {b: 1}}");
- ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandForbidMetaSortOnFieldWithoutMetaProject) {
- BSONObj cmdObj;
-
- cmdObj = fromjson("{find: 'testns',"
- "projection: {a: 1},"
- "sort: {a: {$meta: 'textScore'}}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-
- cmdObj = fromjson("{find: 'testns',"
- "projection: {b: 1},"
- "sort: {a: {$meta: 'textScore'}}}");
- result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandForbidExhaust) {
- BSONObj cmdObj = fromjson("{find: 'testns', exhaust: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandIsFromFindCommand) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT(lpq->isFromFindCommand());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandNotFromFindCommand) {
- std::unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 5,
- 6,
- 9,
- BSON( "x" << 5 ),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
- ASSERT(!lpq->isFromFindCommand());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandAwaitDataButNotTailable) {
- const NamespaceString nss("test.testns");
- BSONObj cmdObj = fromjson("{find: 'testns', awaitData: true}");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandFirstFieldNotString) {
- BSONObj cmdObj = fromjson("{find: 1}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, DefaultQueryParametersCorrect) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
-
- const NamespaceString nss("test.testns");
- std::unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, false)));
-
- ASSERT_EQUALS(0, lpq->getSkip());
- ASSERT_EQUALS(true, lpq->wantMore());
- ASSERT_EQUALS(true, lpq->isFromFindCommand());
- ASSERT_EQUALS(false, lpq->isExplain());
- ASSERT_EQUALS(0, lpq->getMaxScan());
- ASSERT_EQUALS(0, lpq->getMaxTimeMS());
- ASSERT_EQUALS(false, lpq->returnKey());
- ASSERT_EQUALS(false, lpq->showRecordId());
- ASSERT_EQUALS(false, lpq->isSnapshot());
- ASSERT_EQUALS(false, lpq->hasReadPref());
- ASSERT_EQUALS(false, lpq->isTailable());
- ASSERT_EQUALS(false, lpq->isSlaveOk());
- ASSERT_EQUALS(false, lpq->isOplogReplay());
- ASSERT_EQUALS(false, lpq->isNoCursorTimeout());
- ASSERT_EQUALS(false, lpq->isAwaitData());
- ASSERT_EQUALS(false, lpq->isExhaust());
- ASSERT_EQUALS(false, lpq->isPartial());
- }
-
- //
- // Extra fields cause the parse to fail.
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraField) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "snapshot: true,"
- "foo: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraOption) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "snapshot: true,"
- "foo: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
-} // namespace mongo
-} // namespace
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, MinFieldsLessThanMax) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ fromjson("{a: 1}"),
+ fromjson("{a: 1, b: 1}"),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+// Helper function which returns the Status of creating a LiteParsedQuery object with the given
+// parameters.
+void assertLiteParsedQuerySuccess(const BSONObj& query, const BSONObj& proj, const BSONObj& sort) {
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ query,
+ proj,
+ sort,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+}
+
+//
+// Test compatibility of various projection and sort objects.
+//
+
+TEST(LiteParsedQueryTest, ValidSortProj) {
+ assertLiteParsedQuerySuccess(BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
+
+ assertLiteParsedQuerySuccess(BSONObj(),
+ fromjson("{a: {$meta: \"textScore\"}}"),
+ fromjson("{a: {$meta: \"textScore\"}}"));
+}
+
+TEST(LiteParsedQueryTest, ForbidNonMetaSortOnFieldWithMetaProject) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: {$meta: \"textScore\"}}"),
+ fromjson("{a: 1}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+
+ assertLiteParsedQuerySuccess(
+ BSONObj(), fromjson("{a: {$meta: \"textScore\"}}"), fromjson("{b: 1}"));
+}
+
+TEST(LiteParsedQueryTest, ForbidMetaSortOnFieldWithoutMetaProject) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 1}"),
+ fromjson("{a: {$meta: \"textScore\"}}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{b: 1}"),
+ fromjson("{a: {$meta: \"textScore\"}}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, MakeFindCmd) {
+ auto result = LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"), BSON("x" << 1), 2);
+ ASSERT_OK(result.getStatus());
+
+ auto&& lpq = result.getValue();
+ ASSERT_EQUALS("test.ns", lpq->ns());
+ ASSERT_EQUALS(BSON("x" << 1), lpq->getFilter());
+ ASSERT_EQUALS(2, lpq->getLimit());
+
+ ASSERT_EQUALS(BSONObj(), lpq->getProj());
+ ASSERT_EQUALS(BSONObj(), lpq->getSort());
+ ASSERT_EQUALS(BSONObj(), lpq->getHint());
+ ASSERT_EQUALS(BSONObj(), lpq->getMin());
+ ASSERT_EQUALS(BSONObj(), lpq->getMax());
+
+ ASSERT_EQUALS(0, lpq->getSkip());
+ ASSERT_EQUALS(0, lpq->getMaxScan());
+ ASSERT_EQUALS(0, lpq->getMaxTimeMS());
+ ASSERT_EQUALS(0, lpq->getOptions());
+
+ ASSERT_FALSE(lpq->getBatchSize());
+
+ ASSERT_TRUE(lpq->isFromFindCommand());
+ ASSERT_FALSE(lpq->isExplain());
+ ASSERT_FALSE(lpq->returnKey());
+ ASSERT_FALSE(lpq->showRecordId());
+ ASSERT_FALSE(lpq->isSnapshot());
+ ASSERT_FALSE(lpq->hasReadPref());
+ ASSERT_FALSE(lpq->isTailable());
+ ASSERT_FALSE(lpq->isSlaveOk());
+ ASSERT_FALSE(lpq->isOplogReplay());
+ ASSERT_FALSE(lpq->isNoCursorTimeout());
+ ASSERT_FALSE(lpq->isAwaitData());
+ ASSERT_FALSE(lpq->isExhaust());
+ ASSERT_FALSE(lpq->isPartial());
+}
+
+TEST(LiteParsedQueryTest, MakeFindCmdNoLimit) {
+ auto result =
+ LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"), BSON("x" << 1), boost::none);
+ ASSERT_OK(result.getStatus());
+
+ auto&& lpq = result.getValue();
+ ASSERT_EQUALS("test.ns", lpq->ns());
+ ASSERT_EQUALS(BSON("x" << 1), lpq->getFilter());
+
+ ASSERT_EQUALS(BSONObj(), lpq->getProj());
+ ASSERT_EQUALS(BSONObj(), lpq->getSort());
+ ASSERT_EQUALS(BSONObj(), lpq->getHint());
+ ASSERT_EQUALS(BSONObj(), lpq->getMin());
+ ASSERT_EQUALS(BSONObj(), lpq->getMax());
+
+ ASSERT_EQUALS(0, lpq->getSkip());
+ ASSERT_EQUALS(0, lpq->getMaxScan());
+ ASSERT_EQUALS(0, lpq->getMaxTimeMS());
+ ASSERT_EQUALS(0, lpq->getOptions());
+
+ ASSERT_FALSE(lpq->getBatchSize());
+ ASSERT_FALSE(lpq->getLimit());
+
+ ASSERT_TRUE(lpq->isFromFindCommand());
+ ASSERT_FALSE(lpq->isExplain());
+ ASSERT_FALSE(lpq->returnKey());
+ ASSERT_FALSE(lpq->showRecordId());
+ ASSERT_FALSE(lpq->isSnapshot());
+ ASSERT_FALSE(lpq->hasReadPref());
+ ASSERT_FALSE(lpq->isTailable());
+ ASSERT_FALSE(lpq->isSlaveOk());
+ ASSERT_FALSE(lpq->isOplogReplay());
+ ASSERT_FALSE(lpq->isNoCursorTimeout());
+ ASSERT_FALSE(lpq->isAwaitData());
+ ASSERT_FALSE(lpq->isExhaust());
+ ASSERT_FALSE(lpq->isPartial());
+}
+
+TEST(LiteParsedQueryTest, MakeFindCmdBadLimit) {
+ auto status =
+ LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"), BSON("x" << 1), 0).getStatus();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
+}
+
+//
+// Text meta BSON element validation
+//
+
+bool isFirstElementTextScoreMeta(const char* sortStr) {
+ BSONObj sortObj = fromjson(sortStr);
+ BSONElement elt = sortObj.firstElement();
+ bool result = LiteParsedQuery::isTextScoreMeta(elt);
+ return result;
+}
+
+// Check validation of $meta expressions
+TEST(LiteParsedQueryTest, IsTextScoreMeta) {
+ // Valid textScore meta sort
+ ASSERT(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\"}}"));
+
+ // Invalid textScore meta sorts
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: 1}}"));
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"image\"}}"));
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$world: \"textScore\"}}"));
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\", b: 1}}"));
+}
+
+//
+// Sort order validation
+// In a valid sort order, each element satisfies one of:
+// 1. a number with value 1
+// 2. a number with value -1
+// 3. isTextScoreMeta
+//
+
+TEST(LiteParsedQueryTest, ValidateSortOrder) {
+ // Valid sorts
+ ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{}")));
+ ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: 1}")));
+ ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: -1}")));
+ ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\"}}")));
+
+ // Invalid sorts
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 100}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 0}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -100}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: Infinity}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -Infinity}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: true}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: false}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: null}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {b: 1}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: []}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: [1, 2, 3]}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"\"}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"bb\"}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: 1}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"image\"}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$world: \"textScore\"}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson(
+ "{a: {$meta: \"textScore\","
+ " b: 1}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': 1}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': -1}")));
+}
+
+//
+// Tests for parsing a lite parsed query from a command BSON object.
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandBasic) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 3},"
+ "sort: {a: 1},"
+ "projection: {_id: 0, a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandWithOptions) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 3},"
+ "sort: {a: 1},"
+ "projection: {_id: 0, a: 1},"
+ "showRecordId: true,"
+ "maxScan: 1000}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // Make sure the values from the command BSON are reflected in the LPQ.
+ ASSERT(lpq->showRecordId());
+ ASSERT_EQUALS(1000, lpq->getMaxScan());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandHintAsString) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "hint: 'foo_1'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ BSONObj hintObj = lpq->getHint();
+ ASSERT_EQUALS(BSON("$hint"
+ << "foo_1"),
+ hintObj);
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandValidSortProj) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: 1},"
+ "sort: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandValidSortProjMeta) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: {$meta: 'textScore'}},"
+ "sort: {a: {$meta: 'textScore'}}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandAllFlagsTrue) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "tailable: true,"
+ "slaveOk: true,"
+ "oplogReplay: true,"
+ "noCursorTimeout: true,"
+ "awaitData: true,"
+ "partial: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // Test that all the flags got set to true.
+ ASSERT(lpq->isTailable());
+ ASSERT(lpq->isSlaveOk());
+ ASSERT(lpq->isOplogReplay());
+ ASSERT(lpq->isNoCursorTimeout());
+ ASSERT(lpq->isAwaitData());
+ ASSERT(lpq->isPartial());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandCommentWithValidMinMax) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "comment: 'the comment',"
+ "min: {a: 1},"
+ "max: {a: 2}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_EQUALS("the comment", lpq->getComment());
+ BSONObj expectedMin = BSON("a" << 1);
+ ASSERT_EQUALS(0, expectedMin.woCompare(lpq->getMin()));
+ BSONObj expectedMax = BSON("a" << 2);
+ ASSERT_EQUALS(0, expectedMax.woCompare(lpq->getMax()));
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandAllNonOptionFields) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "sort: {b: 1},"
+ "projection: {c: 1},"
+ "hint: {d: 1},"
+ "limit: 3,"
+ "skip: 5,"
+ "batchSize: 90,"
+ "singleBatch: false}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // Check the values inside the LPQ.
+ BSONObj expectedQuery = BSON("a" << 1);
+ ASSERT_EQUALS(0, expectedQuery.woCompare(lpq->getFilter()));
+ BSONObj expectedSort = BSON("b" << 1);
+ ASSERT_EQUALS(0, expectedSort.woCompare(lpq->getSort()));
+ BSONObj expectedProj = BSON("c" << 1);
+ ASSERT_EQUALS(0, expectedProj.woCompare(lpq->getProj()));
+ BSONObj expectedHint = BSON("d" << 1);
+ ASSERT_EQUALS(0, expectedHint.woCompare(lpq->getHint()));
+ ASSERT_EQUALS(3, lpq->getLimit());
+ ASSERT_EQUALS(5, lpq->getSkip());
+ ASSERT_EQUALS(90, lpq->getBatchSize());
+ ASSERT(lpq->wantMore());
+}
+
+//
+// Parsing errors where a field has the wrong type.
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandQueryWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSortWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "sort: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandProjWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "projection: 'foo'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSkipWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "skip: '5',"
+ "projection: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandLimitWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "limit: '5',"
+ "projection: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSingleBatchWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "singleBatch: 'false',"
+ "projection: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandCommentWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "comment: 1}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandMaxScanWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "maxScan: true,"
+ "comment: 'foo'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandMaxTimeMSWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "maxTimeMS: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandMaxWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "max: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandMinWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "min: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandReturnKeyWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "returnKey: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+
+TEST(LiteParsedQueryTest, ParseFromCommandShowRecordIdWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "showRecordId: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSnapshotWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "snapshot: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandTailableWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "tailable: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSlaveOkWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "slaveOk: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandOplogReplayWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "oplogReplay: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandNoCursorTimeoutWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "noCursorTimeout: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandAwaitDataWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "tailable: true,"
+ "awaitData: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandExhaustWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "exhaust: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandPartialWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "exhaust: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+//
+// Parsing errors where a field has the right type but a bad value.
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandNegativeSkipError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "skip: -3,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandNegativeLimitError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "limit: -3,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandNegativeBatchSizeError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "batchSize: -10,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandBatchSizeZero) {
+ BSONObj cmdObj = fromjson("{find: 'testns', batchSize: 0}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT(lpq->getBatchSize());
+ ASSERT_EQ(0, lpq->getBatchSize());
+
+ ASSERT(!lpq->getLimit());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandDefaultBatchSize) {
+ BSONObj cmdObj = fromjson("{find: 'testns'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT(!lpq->getBatchSize());
+ ASSERT(!lpq->getLimit());
+}
+
+//
+// Errors checked in LiteParsedQuery::validate().
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandMinMaxDifferentFieldsError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "min: {a: 3},"
+ "max: {b: 4}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusSortError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "sort: {a: 3},"
+ "snapshot: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusHintError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "snapshot: true,"
+ "hint: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandForbidNonMetaSortOnFieldWithMetaProject) {
+ BSONObj cmdObj;
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: {$meta: 'textScore'}},"
+ "sort: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: {$meta: 'textScore'}},"
+ "sort: {b: 1}}");
+ ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandForbidMetaSortOnFieldWithoutMetaProject) {
+ BSONObj cmdObj;
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: 1},"
+ "sort: {a: {$meta: 'textScore'}}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {b: 1},"
+ "sort: {a: {$meta: 'textScore'}}}");
+ result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandForbidExhaust) {
+ BSONObj cmdObj = fromjson("{find: 'testns', exhaust: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandIsFromFindCommand) {
+ BSONObj cmdObj = fromjson("{find: 'testns'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT(lpq->isFromFindCommand());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandNotFromFindCommand) {
+ std::unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 5,
+ 6,
+ 9,
+ BSON("x" << 5),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+ ASSERT(!lpq->isFromFindCommand());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandAwaitDataButNotTailable) {
+ const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', awaitData: true}");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandFirstFieldNotString) {
+ BSONObj cmdObj = fromjson("{find: 1}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, DefaultQueryParametersCorrect) {
+ BSONObj cmdObj = fromjson("{find: 'testns'}");
+
+ const NamespaceString nss("test.testns");
+ std::unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, false)));
+
+ ASSERT_EQUALS(0, lpq->getSkip());
+ ASSERT_EQUALS(true, lpq->wantMore());
+ ASSERT_EQUALS(true, lpq->isFromFindCommand());
+ ASSERT_EQUALS(false, lpq->isExplain());
+ ASSERT_EQUALS(0, lpq->getMaxScan());
+ ASSERT_EQUALS(0, lpq->getMaxTimeMS());
+ ASSERT_EQUALS(false, lpq->returnKey());
+ ASSERT_EQUALS(false, lpq->showRecordId());
+ ASSERT_EQUALS(false, lpq->isSnapshot());
+ ASSERT_EQUALS(false, lpq->hasReadPref());
+ ASSERT_EQUALS(false, lpq->isTailable());
+ ASSERT_EQUALS(false, lpq->isSlaveOk());
+ ASSERT_EQUALS(false, lpq->isOplogReplay());
+ ASSERT_EQUALS(false, lpq->isNoCursorTimeout());
+ ASSERT_EQUALS(false, lpq->isAwaitData());
+ ASSERT_EQUALS(false, lpq->isExhaust());
+ ASSERT_EQUALS(false, lpq->isPartial());
+}
+
+//
+// Extra fields cause the parse to fail.
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraField) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "snapshot: true,"
+ "foo: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraOption) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "snapshot: true,"
+ "foo: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+} // namespace mongo
+} // namespace
diff --git a/src/mongo/db/query/lru_key_value.h b/src/mongo/db/query/lru_key_value.h
index 8215c029375..7553ced9020 100644
--- a/src/mongo/db/query/lru_key_value.h
+++ b/src/mongo/db/query/lru_key_value.h
@@ -37,186 +37,192 @@
namespace mongo {
+/**
+ * A key-value store structure with a least recently used (LRU) replacement
+ * policy. The number of entries allowed in the kv-store is set as a constant
+ * upon construction.
+ *
+ * Caveat:
+ * This kv-store is NOT thread safe! The client to this utility is responsible
+ * for protecting concurrent access to the LRU store if used in a threaded
+ * context.
+ *
+ * Implemented as a doubly-linked list (std::list) with a hash map
+ * (boost::unordered_map) for quickly locating the kv-store entries. The
+ * add(), get(), and remove() operations are all O(1).
+ *
+ * The keys of generic type K map to values of type V*. The V*
+ * pointers are owned by the kv-store.
+ *
+ * TODO: We could move this into the util/ directory and do any cleanup necessary to make it
+ * fully general.
+ */
+template <class K, class V>
+class LRUKeyValue {
+public:
+ LRUKeyValue(size_t maxSize) : _maxSize(maxSize), _currentSize(0){};
+
+ ~LRUKeyValue() {
+ clear();
+ }
+
+ typedef std::pair<K, V*> KVListEntry;
+
+ typedef std::list<KVListEntry> KVList;
+ typedef typename KVList::iterator KVListIt;
+ typedef typename KVList::const_iterator KVListConstIt;
+
+ typedef boost::unordered_map<K, KVListIt> KVMap;
+ typedef typename KVMap::const_iterator KVMapConstIt;
+
/**
- * A key-value store structure with a least recently used (LRU) replacement
- * policy. The number of entries allowed in the kv-store is set as a constant
- * upon construction.
+ * Add an (K, V*) pair to the store, where 'key' can
+ * be used to retrieve value 'entry' from the store.
*
- * Caveat:
- * This kv-store is NOT thread safe! The client to this utility is responsible
- * for protecting concurrent access to the LRU store if used in a threaded
- * context.
+ * Takes ownership of 'entry'.
*
- * Implemented as a doubly-linked list (std::list) with a hash map
- * (boost::unordered_map) for quickly locating the kv-store entries. The
- * add(), get(), and remove() operations are all O(1).
+ * If 'key' already exists in the kv-store, 'entry' will
+ * simply replace what is already there.
*
- * The keys of generic type K map to values of type V*. The V*
- * pointers are owned by the kv-store.
+ * The least recently used entry is evicted if the
+ * kv-store is full prior to the add() operation.
*
- * TODO: We could move this into the util/ directory and do any cleanup necessary to make it
- * fully general.
+ * If an entry is evicted, it will be returned in
+ * an unique_ptr for the caller to use before disposing.
*/
- template<class K, class V>
- class LRUKeyValue {
- public:
- LRUKeyValue(size_t maxSize) : _maxSize(maxSize), _currentSize(0) { };
-
- ~LRUKeyValue() {
- clear();
+ std::unique_ptr<V> add(const K& key, V* entry) {
+ // If the key already exists, delete it first.
+ KVMapConstIt i = _kvMap.find(key);
+ if (i != _kvMap.end()) {
+ KVListIt found = i->second;
+ delete found->second;
+ _kvMap.erase(i);
+ _kvList.erase(found);
+ _currentSize--;
}
- typedef std::pair<K, V*> KVListEntry;
-
- typedef std::list<KVListEntry> KVList;
- typedef typename KVList::iterator KVListIt;
- typedef typename KVList::const_iterator KVListConstIt;
-
- typedef boost::unordered_map<K, KVListIt> KVMap;
- typedef typename KVMap::const_iterator KVMapConstIt;
-
- /**
- * Add an (K, V*) pair to the store, where 'key' can
- * be used to retrieve value 'entry' from the store.
- *
- * Takes ownership of 'entry'.
- *
- * If 'key' already exists in the kv-store, 'entry' will
- * simply replace what is already there.
- *
- * The least recently used entry is evicted if the
- * kv-store is full prior to the add() operation.
- *
- * If an entry is evicted, it will be returned in
- * an unique_ptr for the caller to use before disposing.
- */
- std::unique_ptr<V> add(const K& key, V* entry) {
- // If the key already exists, delete it first.
- KVMapConstIt i = _kvMap.find(key);
- if (i != _kvMap.end()) {
- KVListIt found = i->second;
- delete found->second;
- _kvMap.erase(i);
- _kvList.erase(found);
- _currentSize--;
- }
-
- _kvList.push_front(std::make_pair(key, entry));
- _kvMap[key] = _kvList.begin();
- _currentSize++;
-
- // If the store has grown beyond its allowed size,
- // evict the least recently used entry.
- if (_currentSize > _maxSize) {
- V* evictedEntry = _kvList.back().second;
- invariant(evictedEntry);
-
- _kvMap.erase(_kvList.back().first);
- _kvList.pop_back();
- _currentSize--;
- invariant(_currentSize == _maxSize);
-
- // Pass ownership of evicted entry to caller.
- // If caller chooses to ignore this unique_ptr,
- // the evicted entry will be deleted automatically.
- return std::unique_ptr<V>(evictedEntry);
- }
- return std::unique_ptr<V>();
- }
+ _kvList.push_front(std::make_pair(key, entry));
+ _kvMap[key] = _kvList.begin();
+ _currentSize++;
- /**
- * Retrieve the value associated with 'key' from
- * the kv-store. The value is returned through the
- * out-parameter 'entryOut'.
- *
- * The kv-store retains ownership of 'entryOut', so
- * it should not be deleted by the caller.
- *
- * As a side effect, the retrieved entry is promoted
- * to the most recently used.
- */
- Status get(const K& key, V** entryOut) const {
- KVMapConstIt i = _kvMap.find(key);
- if (i == _kvMap.end()) {
- return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
- }
- KVListIt found = i->second;
- V* foundEntry = found->second;
+ // If the store has grown beyond its allowed size,
+ // evict the least recently used entry.
+ if (_currentSize > _maxSize) {
+ V* evictedEntry = _kvList.back().second;
+ invariant(evictedEntry);
- // Promote the kv-store entry to the front of the list.
- // It is now the most recently used.
- _kvMap.erase(i);
- _kvList.erase(found);
- _kvList.push_front(std::make_pair(key, foundEntry));
- _kvMap[key] = _kvList.begin();
+ _kvMap.erase(_kvList.back().first);
+ _kvList.pop_back();
+ _currentSize--;
+ invariant(_currentSize == _maxSize);
- *entryOut = foundEntry;
- return Status::OK();
+ // Pass ownership of evicted entry to caller.
+ // If caller chooses to ignore this unique_ptr,
+ // the evicted entry will be deleted automatically.
+ return std::unique_ptr<V>(evictedEntry);
}
+ return std::unique_ptr<V>();
+ }
- /**
- * Remove the kv-store entry keyed by 'key'.
- */
- Status remove(const K& key) {
- KVMapConstIt i = _kvMap.find(key);
- if (i == _kvMap.end()) {
- return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
- }
- KVListIt found = i->second;
- delete found->second;
- _kvMap.erase(i);
- _kvList.erase(found);
- _currentSize--;
- return Status::OK();
+ /**
+ * Retrieve the value associated with 'key' from
+ * the kv-store. The value is returned through the
+ * out-parameter 'entryOut'.
+ *
+ * The kv-store retains ownership of 'entryOut', so
+ * it should not be deleted by the caller.
+ *
+ * As a side effect, the retrieved entry is promoted
+ * to the most recently used.
+ */
+ Status get(const K& key, V** entryOut) const {
+ KVMapConstIt i = _kvMap.find(key);
+ if (i == _kvMap.end()) {
+ return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
}
+ KVListIt found = i->second;
+ V* foundEntry = found->second;
+
+ // Promote the kv-store entry to the front of the list.
+ // It is now the most recently used.
+ _kvMap.erase(i);
+ _kvList.erase(found);
+ _kvList.push_front(std::make_pair(key, foundEntry));
+ _kvMap[key] = _kvList.begin();
+
+ *entryOut = foundEntry;
+ return Status::OK();
+ }
- /**
- * Deletes all entries in the kv-store.
- */
- void clear() {
- for (KVListIt i = _kvList.begin(); i != _kvList.end(); i++) {
- delete i->second;
- }
- _kvList.clear();
- _kvMap.clear();
- _currentSize = 0;
+ /**
+ * Remove the kv-store entry keyed by 'key'.
+ */
+ Status remove(const K& key) {
+ KVMapConstIt i = _kvMap.find(key);
+ if (i == _kvMap.end()) {
+ return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
}
+ KVListIt found = i->second;
+ delete found->second;
+ _kvMap.erase(i);
+ _kvList.erase(found);
+ _currentSize--;
+ return Status::OK();
+ }
- /**
- * Returns true if entry is found in the kv-store.
- */
- bool hasKey(const K& key) const {
- return _kvMap.find(key) != _kvMap.end();
+ /**
+ * Deletes all entries in the kv-store.
+ */
+ void clear() {
+ for (KVListIt i = _kvList.begin(); i != _kvList.end(); i++) {
+ delete i->second;
}
+ _kvList.clear();
+ _kvMap.clear();
+ _currentSize = 0;
+ }
- /**
- * Returns the number of entries currently in the kv-store.
- */
- size_t size() const { return _currentSize; }
+ /**
+ * Returns true if entry is found in the kv-store.
+ */
+ bool hasKey(const K& key) const {
+ return _kvMap.find(key) != _kvMap.end();
+ }
- /**
- * TODO: The kv-store should implement its own iterator. Calling through to the underlying
- * iterator exposes the internals, and forces the caller to make a horrible type
- * declaration.
- */
- KVListConstIt begin() const { return _kvList.begin(); }
+ /**
+ * Returns the number of entries currently in the kv-store.
+ */
+ size_t size() const {
+ return _currentSize;
+ }
+
+ /**
+ * TODO: The kv-store should implement its own iterator. Calling through to the underlying
+ * iterator exposes the internals, and forces the caller to make a horrible type
+ * declaration.
+ */
+ KVListConstIt begin() const {
+ return _kvList.begin();
+ }
- KVListConstIt end() const { return _kvList.end(); }
+ KVListConstIt end() const {
+ return _kvList.end();
+ }
- private:
- // The maximum allowable number of entries in the kv-store.
- const size_t _maxSize;
+private:
+ // The maximum allowable number of entries in the kv-store.
+ const size_t _maxSize;
- // The number of entries currently in the kv-store.
- size_t _currentSize;
+ // The number of entries currently in the kv-store.
+ size_t _currentSize;
- // (K, V*) pairs are stored in this std::list. They are sorted in order
- // of use, where the front is the most recently used and the back is the
- // least recently used.
- mutable KVList _kvList;
+ // (K, V*) pairs are stored in this std::list. They are sorted in order
+ // of use, where the front is the most recently used and the back is the
+ // least recently used.
+ mutable KVList _kvList;
- // Maps from a key to the corresponding std::list entry.
- mutable KVMap _kvMap;
- };
+ // Maps from a key to the corresponding std::list entry.
+ mutable KVMap _kvMap;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/lru_key_value_test.cpp b/src/mongo/db/query/lru_key_value_test.cpp
index da9c9c73f34..e77fcec0bc9 100644
--- a/src/mongo/db/query/lru_key_value_test.cpp
+++ b/src/mongo/db/query/lru_key_value_test.cpp
@@ -35,163 +35,163 @@ using namespace mongo;
namespace {
- //
- // Convenience functions
- //
-
- void assertInKVStore(LRUKeyValue<int, int>& cache, int key, int value) {
- int* cachedValue = NULL;
- ASSERT_TRUE(cache.hasKey(key));
- Status s = cache.get(key, &cachedValue);
- ASSERT_OK(s);
- ASSERT_EQUALS(*cachedValue, value);
- }
+//
+// Convenience functions
+//
+
+void assertInKVStore(LRUKeyValue<int, int>& cache, int key, int value) {
+ int* cachedValue = NULL;
+ ASSERT_TRUE(cache.hasKey(key));
+ Status s = cache.get(key, &cachedValue);
+ ASSERT_OK(s);
+ ASSERT_EQUALS(*cachedValue, value);
+}
+
+void assertNotInKVStore(LRUKeyValue<int, int>& cache, int key) {
+ int* cachedValue = NULL;
+ ASSERT_FALSE(cache.hasKey(key));
+ Status s = cache.get(key, &cachedValue);
+ ASSERT_NOT_OK(s);
+}
- void assertNotInKVStore(LRUKeyValue<int, int>& cache, int key) {
- int* cachedValue = NULL;
- ASSERT_FALSE(cache.hasKey(key));
- Status s = cache.get(key, &cachedValue);
- ASSERT_NOT_OK(s);
- }
+/**
+ * Test that we can add an entry and get it back out.
+ */
+TEST(LRUKeyValueTest, BasicAddGet) {
+ LRUKeyValue<int, int> cache(100);
+ cache.add(1, new int(2));
+ assertInKVStore(cache, 1, 2);
+}
- /**
- * Test that we can add an entry and get it back out.
- */
- TEST(LRUKeyValueTest, BasicAddGet) {
- LRUKeyValue<int, int> cache(100);
- cache.add(1, new int(2));
- assertInKVStore(cache, 1, 2);
- }
+/**
+ * A kv-store with a max size of 0 isn't too useful, but test
+ * that at the very least we don't blow up.
+ */
+TEST(LRUKeyValueTest, SizeZeroCache) {
+ LRUKeyValue<int, int> cache(0);
+ cache.add(1, new int(2));
+ assertNotInKVStore(cache, 1);
+}
- /**
- * A kv-store with a max size of 0 isn't too useful, but test
- * that at the very least we don't blow up.
- */
- TEST(LRUKeyValueTest, SizeZeroCache) {
- LRUKeyValue<int, int> cache(0);
- cache.add(1, new int(2));
- assertNotInKVStore(cache, 1);
- }
+/**
+ * Make sure eviction and promotion work properly with
+ * a kv-store of size 1.
+ */
+TEST(LRUKeyValueTest, SizeOneCache) {
+ LRUKeyValue<int, int> cache(1);
+ cache.add(0, new int(0));
+ assertInKVStore(cache, 0, 0);
- /**
- * Make sure eviction and promotion work properly with
- * a kv-store of size 1.
- */
- TEST(LRUKeyValueTest, SizeOneCache) {
- LRUKeyValue<int, int> cache(1);
- cache.add(0, new int(0));
- assertInKVStore(cache, 0, 0);
-
- // Second entry should immediately evict the first.
- cache.add(1, new int(1));
- assertNotInKVStore(cache, 0);
- assertInKVStore(cache, 1, 1);
+ // Second entry should immediately evict the first.
+ cache.add(1, new int(1));
+ assertNotInKVStore(cache, 0);
+ assertInKVStore(cache, 1, 1);
+}
+
+/**
+ * Fill up a size 10 kv-store with 10 entries. Call get()
+ * on every entry except for one. Then call add() and
+ * make sure that the proper entry got evicted.
+ */
+TEST(LRUKeyValueTest, EvictionTest) {
+ int maxSize = 10;
+ LRUKeyValue<int, int> cache(maxSize);
+ for (int i = 0; i < maxSize; ++i) {
+ std::unique_ptr<int> evicted = cache.add(i, new int(i));
+ ASSERT(NULL == evicted.get());
}
+ ASSERT_EQUALS(cache.size(), (size_t)maxSize);
- /**
- * Fill up a size 10 kv-store with 10 entries. Call get()
- * on every entry except for one. Then call add() and
- * make sure that the proper entry got evicted.
- */
- TEST(LRUKeyValueTest, EvictionTest) {
- int maxSize = 10;
- LRUKeyValue<int, int> cache(maxSize);
- for (int i = 0; i < maxSize; ++i) {
- std::unique_ptr<int> evicted = cache.add(i, new int(i));
- ASSERT(NULL == evicted.get());
+ // Call get() on all but one key.
+ int evictKey = 5;
+ for (int i = 0; i < maxSize; ++i) {
+ if (i == evictKey) {
+ continue;
}
- ASSERT_EQUALS(cache.size(), (size_t)maxSize);
+ assertInKVStore(cache, i, i);
+ }
- // Call get() on all but one key.
- int evictKey = 5;
- for (int i = 0; i < maxSize; ++i) {
- if (i == evictKey) { continue; }
+ // Adding another entry causes an eviction.
+ std::unique_ptr<int> evicted = cache.add(maxSize + 1, new int(maxSize + 1));
+ ASSERT_EQUALS(cache.size(), (size_t)maxSize);
+ ASSERT(NULL != evicted.get());
+ ASSERT_EQUALS(*evicted, evictKey);
+
+ // Check that the least recently accessed has been evicted.
+ for (int i = 0; i < maxSize; ++i) {
+ if (i == evictKey) {
+ assertNotInKVStore(cache, evictKey);
+ } else {
assertInKVStore(cache, i, i);
}
-
- // Adding another entry causes an eviction.
- std::unique_ptr<int> evicted = cache.add(maxSize + 1, new int(maxSize + 1));
- ASSERT_EQUALS(cache.size(), (size_t)maxSize);
- ASSERT(NULL != evicted.get());
- ASSERT_EQUALS(*evicted, evictKey);
-
- // Check that the least recently accessed has been evicted.
- for (int i = 0; i < maxSize; ++i) {
- if (i == evictKey) {
- assertNotInKVStore(cache, evictKey);
- }
- else {
- assertInKVStore(cache, i, i);
- }
- }
}
+}
- /**
- * Fill up a size 10 kv-store with 10 entries. Call get()
- * on a single entry to promote it to most recently
- * accessed. Then cause 9 evictions and make sure only
- * the entry on which we called get() remains.
- */
- TEST(LRUKeyValueTest, PromotionTest) {
- int maxSize = 10;
- LRUKeyValue<int, int> cache(maxSize);
- for (int i = 0; i < maxSize; ++i) {
- std::unique_ptr<int> evicted = cache.add(i, new int(i));
- ASSERT(NULL == evicted.get());
- }
- ASSERT_EQUALS(cache.size(), (size_t)maxSize);
+/**
+ * Fill up a size 10 kv-store with 10 entries. Call get()
+ * on a single entry to promote it to most recently
+ * accessed. Then cause 9 evictions and make sure only
+ * the entry on which we called get() remains.
+ */
+TEST(LRUKeyValueTest, PromotionTest) {
+ int maxSize = 10;
+ LRUKeyValue<int, int> cache(maxSize);
+ for (int i = 0; i < maxSize; ++i) {
+ std::unique_ptr<int> evicted = cache.add(i, new int(i));
+ ASSERT(NULL == evicted.get());
+ }
+ ASSERT_EQUALS(cache.size(), (size_t)maxSize);
- // Call get() on a particular key.
- int promoteKey = 5;
- assertInKVStore(cache, promoteKey, promoteKey);
+ // Call get() on a particular key.
+ int promoteKey = 5;
+ assertInKVStore(cache, promoteKey, promoteKey);
- // Evict all but one of the original entries.
- for (int i = maxSize; i < (maxSize + maxSize - 1); ++i) {
- std::unique_ptr<int> evicted = cache.add(i, new int(i));
- ASSERT(NULL != evicted.get());
- }
- ASSERT_EQUALS(cache.size(), (size_t)maxSize);
-
- // Check that the promoteKey has not been evicted.
- for (int i = 0; i < maxSize; ++i) {
- if (i == promoteKey) {
- assertInKVStore(cache, promoteKey, promoteKey);
- }
- else {
- assertNotInKVStore(cache, i);
- }
+ // Evict all but one of the original entries.
+ for (int i = maxSize; i < (maxSize + maxSize - 1); ++i) {
+ std::unique_ptr<int> evicted = cache.add(i, new int(i));
+ ASSERT(NULL != evicted.get());
+ }
+ ASSERT_EQUALS(cache.size(), (size_t)maxSize);
+
+ // Check that the promoteKey has not been evicted.
+ for (int i = 0; i < maxSize; ++i) {
+ if (i == promoteKey) {
+ assertInKVStore(cache, promoteKey, promoteKey);
+ } else {
+ assertNotInKVStore(cache, i);
}
}
+}
- /**
- * Test that calling add() with a key that already exists
- * in the kv-store deletes the existing entry.
- */
- TEST(LRUKeyValueTest, ReplaceKeyTest) {
- LRUKeyValue<int, int> cache(10);
- cache.add(4, new int(4));
- assertInKVStore(cache, 4, 4);
- cache.add(4, new int(5));
- assertInKVStore(cache, 4, 5);
- }
+/**
+ * Test that calling add() with a key that already exists
+ * in the kv-store deletes the existing entry.
+ */
+TEST(LRUKeyValueTest, ReplaceKeyTest) {
+ LRUKeyValue<int, int> cache(10);
+ cache.add(4, new int(4));
+ assertInKVStore(cache, 4, 4);
+ cache.add(4, new int(5));
+ assertInKVStore(cache, 4, 5);
+}
- /**
- * Test iteration over the kv-store.
- */
- TEST(LRUKeyValueTest, IterationTest) {
- LRUKeyValue<int, int> cache(2);
- cache.add(1, new int(1));
- cache.add(2, new int(2));
-
- typedef std::list< std::pair<int, int*> >::const_iterator CacheIterator;
- CacheIterator i = cache.begin();
- ASSERT_EQUALS(i->first, 2);
- ASSERT_EQUALS(*i->second, 2);
- ++i;
- ASSERT_EQUALS(i->first, 1);
- ASSERT_EQUALS(*i->second, 1);
- ++i;
- ASSERT(i == cache.end());
- }
+/**
+ * Test iteration over the kv-store.
+ */
+TEST(LRUKeyValueTest, IterationTest) {
+ LRUKeyValue<int, int> cache(2);
+ cache.add(1, new int(1));
+ cache.add(2, new int(2));
+
+ typedef std::list<std::pair<int, int*>>::const_iterator CacheIterator;
+ CacheIterator i = cache.begin();
+ ASSERT_EQUALS(i->first, 2);
+ ASSERT_EQUALS(*i->second, 2);
+ ++i;
+ ASSERT_EQUALS(i->first, 1);
+ ASSERT_EQUALS(*i->second, 1);
+ ++i;
+ ASSERT(i == cache.end());
+}
} // namespace
diff --git a/src/mongo/db/query/parsed_projection.cpp b/src/mongo/db/query/parsed_projection.cpp
index 7552475d5f1..4ebcff2d1a3 100644
--- a/src/mongo/db/query/parsed_projection.cpp
+++ b/src/mongo/db/query/parsed_projection.cpp
@@ -32,296 +32,283 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- /**
- * Parses the projection 'spec' and checks its validity with respect to the query 'query'.
- * Puts covering information into 'out'.
- *
- * Does not take ownership of 'query'.
- *
- * Returns Status::OK() if it's a valid spec.
- * Returns a Status indicating how it's invalid otherwise.
- */
- // static
- Status ParsedProjection::make(const BSONObj& spec,
- const MatchExpression* const query,
- ParsedProjection** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- // Are we including or excluding fields? Values:
- // -1 when we haven't initialized it.
- // 1 when we're including
- // 0 when we're excluding.
- int include_exclude = -1;
-
- // If any of these are 'true' the projection isn't covered.
- bool include = true;
- bool hasNonSimple = false;
- bool hasDottedField = false;
-
- bool includeID = true;
-
- bool hasIndexKeyProjection = false;
-
- bool wantGeoNearPoint = false;
- bool wantGeoNearDistance = false;
-
- // Until we see a positional or elemMatch operator we're normal.
- ArrayOpType arrayOpType = ARRAY_OP_NORMAL;
-
- BSONObjIterator it(spec);
- while (it.more()) {
- BSONElement e = it.next();
-
- if (!e.isNumber() && !e.isBoolean()) {
- hasNonSimple = true;
- }
-
- if (Object == e.type()) {
- BSONObj obj = e.embeddedObject();
- if (1 != obj.nFields()) {
- return Status(ErrorCodes::BadValue, ">1 field in obj: " + obj.toString());
- }
-
- BSONElement e2 = obj.firstElement();
- if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
- if (e2.isNumber()) {
- // This is A-OK.
- }
- else if (e2.type() == Array) {
- BSONObj arr = e2.embeddedObject();
- if (2 != arr.nFields()) {
- return Status(ErrorCodes::BadValue, "$slice array wrong size");
- }
-
- BSONObjIterator it(arr);
- // Skip over 'skip'.
- it.next();
- int limit = it.next().numberInt();
- if (limit <= 0) {
- return Status(ErrorCodes::BadValue, "$slice limit must be positive");
- }
- }
- else {
- return Status(ErrorCodes::BadValue,
- "$slice only supports numbers and [skip, limit] arrays");
- }
- }
- else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
- // Validate $elemMatch arguments and dependencies.
- if (Object != e2.type()) {
- return Status(ErrorCodes::BadValue,
- "elemMatch: Invalid argument, object required.");
- }
+using std::unique_ptr;
+using std::string;
- if (ARRAY_OP_POSITIONAL == arrayOpType) {
- return Status(ErrorCodes::BadValue,
- "Cannot specify positional operator and $elemMatch.");
- }
-
- if (mongoutils::str::contains(e.fieldName(), '.')) {
- return Status(ErrorCodes::BadValue,
- "Cannot use $elemMatch projection on a nested field.");
- }
-
- arrayOpType = ARRAY_OP_ELEM_MATCH;
-
- // Create a MatchExpression for the elemMatch.
- BSONObj elemMatchObj = e.wrap();
- verify(elemMatchObj.isOwned());
-
- // TODO: Is there a faster way of validating the elemMatchObj?
- StatusWithMatchExpression swme = MatchExpressionParser::parse(elemMatchObj,
- whereCallback);
- if (!swme.isOK()) {
- return swme.getStatus();
- }
- delete swme.getValue();
- }
- else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
- // Field for meta must be top level. We can relax this at some point.
- if (mongoutils::str::contains(e.fieldName(), '.')) {
- return Status(ErrorCodes::BadValue, "field for $meta cannot be nested");
- }
+/**
+ * Parses the projection 'spec' and checks its validity with respect to the query 'query'.
+ * Puts covering information into 'out'.
+ *
+ * Does not take ownership of 'query'.
+ *
+ * Returns Status::OK() if it's a valid spec.
+ * Returns a Status indicating how it's invalid otherwise.
+ */
+// static
+Status ParsedProjection::make(const BSONObj& spec,
+ const MatchExpression* const query,
+ ParsedProjection** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ // Are we including or excluding fields? Values:
+ // -1 when we haven't initialized it.
+ // 1 when we're including
+ // 0 when we're excluding.
+ int include_exclude = -1;
+
+ // If any of these are 'true' the projection isn't covered.
+ bool include = true;
+ bool hasNonSimple = false;
+ bool hasDottedField = false;
+
+ bool includeID = true;
+
+ bool hasIndexKeyProjection = false;
+
+ bool wantGeoNearPoint = false;
+ bool wantGeoNearDistance = false;
+
+ // Until we see a positional or elemMatch operator we're normal.
+ ArrayOpType arrayOpType = ARRAY_OP_NORMAL;
+
+ BSONObjIterator it(spec);
+ while (it.more()) {
+ BSONElement e = it.next();
+
+ if (!e.isNumber() && !e.isBoolean()) {
+ hasNonSimple = true;
+ }
- // Make sure the argument to $meta is something we recognize.
- // e.g. {x: {$meta: "textScore"}}
- if (String != e2.type()) {
- return Status(ErrorCodes::BadValue, "unexpected argument to $meta in proj");
- }
+ if (Object == e.type()) {
+ BSONObj obj = e.embeddedObject();
+ if (1 != obj.nFields()) {
+ return Status(ErrorCodes::BadValue, ">1 field in obj: " + obj.toString());
+ }
- if (e2.valuestr() != LiteParsedQuery::metaTextScore
- && e2.valuestr() != LiteParsedQuery::metaRecordId
- && e2.valuestr() != LiteParsedQuery::metaIndexKey
- && e2.valuestr() != LiteParsedQuery::metaGeoNearDistance
- && e2.valuestr() != LiteParsedQuery::metaGeoNearPoint) {
- return Status(ErrorCodes::BadValue,
- "unsupported $meta operator: " + e2.str());
+ BSONElement e2 = obj.firstElement();
+ if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
+ if (e2.isNumber()) {
+ // This is A-OK.
+ } else if (e2.type() == Array) {
+ BSONObj arr = e2.embeddedObject();
+ if (2 != arr.nFields()) {
+ return Status(ErrorCodes::BadValue, "$slice array wrong size");
}
- // This clobbers everything else.
- if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
- hasIndexKeyProjection = true;
- }
- else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
- wantGeoNearDistance = true;
+ BSONObjIterator it(arr);
+ // Skip over 'skip'.
+ it.next();
+ int limit = it.next().numberInt();
+ if (limit <= 0) {
+ return Status(ErrorCodes::BadValue, "$slice limit must be positive");
}
- else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
- wantGeoNearPoint = true;
- }
- }
- else {
+ } else {
return Status(ErrorCodes::BadValue,
- string("Unsupported projection option: ") + e.toString());
+ "$slice only supports numbers and [skip, limit] arrays");
}
- }
- else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
- includeID = false;
- }
- else {
- // Projections of dotted fields aren't covered.
- if (mongoutils::str::contains(e.fieldName(), '.')) {
- hasDottedField = true;
+ } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
+ // Validate $elemMatch arguments and dependencies.
+ if (Object != e2.type()) {
+ return Status(ErrorCodes::BadValue,
+ "elemMatch: Invalid argument, object required.");
}
- // Validate input.
- if (include_exclude == -1) {
- // If we haven't specified an include/exclude, initialize include_exclude.
- // We expect further include/excludes to match it.
- include_exclude = e.trueValue();
- include = !e.trueValue();
- }
- else if (static_cast<bool>(include_exclude) != e.trueValue()) {
- // Make sure that the incl./excl. matches the previous.
+ if (ARRAY_OP_POSITIONAL == arrayOpType) {
return Status(ErrorCodes::BadValue,
- "Projection cannot have a mix of inclusion and exclusion.");
+ "Cannot specify positional operator and $elemMatch.");
}
- }
-
- if (_isPositionalOperator(e.fieldName())) {
- // Validate the positional op.
- if (!e.trueValue()) {
+ if (mongoutils::str::contains(e.fieldName(), '.')) {
return Status(ErrorCodes::BadValue,
- "Cannot exclude array elements with the positional operator.");
+ "Cannot use $elemMatch projection on a nested field.");
}
- if (ARRAY_OP_POSITIONAL == arrayOpType) {
- return Status(ErrorCodes::BadValue,
- "Cannot specify more than one positional proj. per query.");
+ arrayOpType = ARRAY_OP_ELEM_MATCH;
+
+ // Create a MatchExpression for the elemMatch.
+ BSONObj elemMatchObj = e.wrap();
+ verify(elemMatchObj.isOwned());
+
+ // TODO: Is there a faster way of validating the elemMatchObj?
+ StatusWithMatchExpression swme =
+ MatchExpressionParser::parse(elemMatchObj, whereCallback);
+ if (!swme.isOK()) {
+ return swme.getStatus();
+ }
+ delete swme.getValue();
+ } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
+ // Field for meta must be top level. We can relax this at some point.
+ if (mongoutils::str::contains(e.fieldName(), '.')) {
+ return Status(ErrorCodes::BadValue, "field for $meta cannot be nested");
}
- if (ARRAY_OP_ELEM_MATCH == arrayOpType) {
- return Status(ErrorCodes::BadValue,
- "Cannot specify positional operator and $elemMatch.");
+ // Make sure the argument to $meta is something we recognize.
+ // e.g. {x: {$meta: "textScore"}}
+ if (String != e2.type()) {
+ return Status(ErrorCodes::BadValue, "unexpected argument to $meta in proj");
}
- std::string after = mongoutils::str::after(e.fieldName(), ".$");
- if (mongoutils::str::contains(after, ".$")) {
- mongoutils::str::stream ss;
- ss << "Positional projection '" << e.fieldName() << "' contains "
- << "the positional operator more than once.";
- return Status(ErrorCodes::BadValue, ss);
+ if (e2.valuestr() != LiteParsedQuery::metaTextScore &&
+ e2.valuestr() != LiteParsedQuery::metaRecordId &&
+ e2.valuestr() != LiteParsedQuery::metaIndexKey &&
+ e2.valuestr() != LiteParsedQuery::metaGeoNearDistance &&
+ e2.valuestr() != LiteParsedQuery::metaGeoNearPoint) {
+ return Status(ErrorCodes::BadValue, "unsupported $meta operator: " + e2.str());
}
- std::string matchfield = mongoutils::str::before(e.fieldName(), '.');
- if (!_hasPositionalOperatorMatch(query, matchfield)) {
- mongoutils::str::stream ss;
- ss << "Positional projection '" << e.fieldName() << "' does not "
- << "match the query document.";
- return Status(ErrorCodes::BadValue, ss);
+ // This clobbers everything else.
+ if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
+ hasIndexKeyProjection = true;
+ } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
+ wantGeoNearDistance = true;
+ } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
+ wantGeoNearPoint = true;
}
+ } else {
+ return Status(ErrorCodes::BadValue,
+ string("Unsupported projection option: ") + e.toString());
+ }
+ } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
+ includeID = false;
+ } else {
+ // Projections of dotted fields aren't covered.
+ if (mongoutils::str::contains(e.fieldName(), '.')) {
+ hasDottedField = true;
+ }
- arrayOpType = ARRAY_OP_POSITIONAL;
+ // Validate input.
+ if (include_exclude == -1) {
+ // If we haven't specified an include/exclude, initialize include_exclude.
+ // We expect further include/excludes to match it.
+ include_exclude = e.trueValue();
+ include = !e.trueValue();
+ } else if (static_cast<bool>(include_exclude) != e.trueValue()) {
+ // Make sure that the incl./excl. matches the previous.
+ return Status(ErrorCodes::BadValue,
+ "Projection cannot have a mix of inclusion and exclusion.");
}
}
- // Fill out the returned obj.
- unique_ptr<ParsedProjection> pp(new ParsedProjection());
-
- // The positional operator uses the MatchDetails from the query
- // expression to know which array element was matched.
- pp->_requiresMatchDetails = arrayOpType == ARRAY_OP_POSITIONAL;
- // Save the raw spec. It should be owned by the LiteParsedQuery.
- verify(spec.isOwned());
- pp->_source = spec;
- pp->_returnKey = hasIndexKeyProjection;
+ if (_isPositionalOperator(e.fieldName())) {
+ // Validate the positional op.
+ if (!e.trueValue()) {
+ return Status(ErrorCodes::BadValue,
+ "Cannot exclude array elements with the positional operator.");
+ }
- // Dotted fields aren't covered, non-simple require match details, and as for include, "if
- // we default to including then we can't use an index because we don't know what we're
- // missing."
- pp->_requiresDocument = include || hasNonSimple || hasDottedField;
+ if (ARRAY_OP_POSITIONAL == arrayOpType) {
+ return Status(ErrorCodes::BadValue,
+ "Cannot specify more than one positional proj. per query.");
+ }
- // Add geoNear projections.
- pp->_wantGeoNearPoint = wantGeoNearPoint;
- pp->_wantGeoNearDistance = wantGeoNearDistance;
+ if (ARRAY_OP_ELEM_MATCH == arrayOpType) {
+ return Status(ErrorCodes::BadValue,
+ "Cannot specify positional operator and $elemMatch.");
+ }
- // If it's possible to compute the projection in a covered fashion, populate _requiredFields
- // so the planner can perform projection analysis.
- if (!pp->_requiresDocument) {
- if (includeID) {
- pp->_requiredFields.push_back("_id");
+ std::string after = mongoutils::str::after(e.fieldName(), ".$");
+ if (mongoutils::str::contains(after, ".$")) {
+ mongoutils::str::stream ss;
+ ss << "Positional projection '" << e.fieldName() << "' contains "
+ << "the positional operator more than once.";
+ return Status(ErrorCodes::BadValue, ss);
}
- // The only way we could be here is if spec is only simple non-dotted-field projections.
- // Therefore we can iterate over spec to get the fields required.
- BSONObjIterator srcIt(spec);
- while (srcIt.more()) {
- BSONElement elt = srcIt.next();
- // We've already handled the _id field before entering this loop.
- if (includeID && mongoutils::str::equals(elt.fieldName(), "_id")) {
- continue;
- }
- if (elt.trueValue()) {
- pp->_requiredFields.push_back(elt.fieldName());
- }
+ std::string matchfield = mongoutils::str::before(e.fieldName(), '.');
+ if (!_hasPositionalOperatorMatch(query, matchfield)) {
+ mongoutils::str::stream ss;
+ ss << "Positional projection '" << e.fieldName() << "' does not "
+ << "match the query document.";
+ return Status(ErrorCodes::BadValue, ss);
}
+
+ arrayOpType = ARRAY_OP_POSITIONAL;
}
+ }
+
+ // Fill out the returned obj.
+ unique_ptr<ParsedProjection> pp(new ParsedProjection());
+
+ // The positional operator uses the MatchDetails from the query
+ // expression to know which array element was matched.
+ pp->_requiresMatchDetails = arrayOpType == ARRAY_OP_POSITIONAL;
+
+ // Save the raw spec. It should be owned by the LiteParsedQuery.
+ verify(spec.isOwned());
+ pp->_source = spec;
+ pp->_returnKey = hasIndexKeyProjection;
- // returnKey clobbers everything.
- if (hasIndexKeyProjection) {
- pp->_requiresDocument = false;
+ // Dotted fields aren't covered, non-simple require match details, and as for include, "if
+ // we default to including then we can't use an index because we don't know what we're
+ // missing."
+ pp->_requiresDocument = include || hasNonSimple || hasDottedField;
+
+ // Add geoNear projections.
+ pp->_wantGeoNearPoint = wantGeoNearPoint;
+ pp->_wantGeoNearDistance = wantGeoNearDistance;
+
+ // If it's possible to compute the projection in a covered fashion, populate _requiredFields
+ // so the planner can perform projection analysis.
+ if (!pp->_requiresDocument) {
+ if (includeID) {
+ pp->_requiredFields.push_back("_id");
}
- *out = pp.release();
- return Status::OK();
+ // The only way we could be here is if spec is only simple non-dotted-field projections.
+ // Therefore we can iterate over spec to get the fields required.
+ BSONObjIterator srcIt(spec);
+ while (srcIt.more()) {
+ BSONElement elt = srcIt.next();
+ // We've already handled the _id field before entering this loop.
+ if (includeID && mongoutils::str::equals(elt.fieldName(), "_id")) {
+ continue;
+ }
+ if (elt.trueValue()) {
+ pp->_requiredFields.push_back(elt.fieldName());
+ }
+ }
}
- // static
- bool ParsedProjection::_isPositionalOperator(const char* fieldName) {
- return mongoutils::str::contains(fieldName, ".$") &&
- !mongoutils::str::contains(fieldName, ".$ref") &&
- !mongoutils::str::contains(fieldName, ".$id") &&
- !mongoutils::str::contains(fieldName, ".$db");
-
+ // returnKey clobbers everything.
+ if (hasIndexKeyProjection) {
+ pp->_requiresDocument = false;
}
- // static
- bool ParsedProjection::_hasPositionalOperatorMatch(const MatchExpression* const query,
- const std::string& matchfield) {
- if (query->isLogical()) {
- for (unsigned int i = 0; i < query->numChildren(); ++i) {
- if (_hasPositionalOperatorMatch(query->getChild(i), matchfield)) {
- return true;
- }
+ *out = pp.release();
+ return Status::OK();
+}
+
+// static
+bool ParsedProjection::_isPositionalOperator(const char* fieldName) {
+ return mongoutils::str::contains(fieldName, ".$") &&
+ !mongoutils::str::contains(fieldName, ".$ref") &&
+ !mongoutils::str::contains(fieldName, ".$id") &&
+ !mongoutils::str::contains(fieldName, ".$db");
+}
+
+// static
+bool ParsedProjection::_hasPositionalOperatorMatch(const MatchExpression* const query,
+ const std::string& matchfield) {
+ if (query->isLogical()) {
+ for (unsigned int i = 0; i < query->numChildren(); ++i) {
+ if (_hasPositionalOperatorMatch(query->getChild(i), matchfield)) {
+ return true;
}
}
- else {
- StringData queryPath = query->path();
- const char* pathRawData = queryPath.rawData();
- // We have to make a distinction between match expressions that are
- // initialized with an empty field/path name "" and match expressions
- // for which the path is not meaningful (eg. $where and the internal
- // expression type ALWAYS_FALSE).
- if (!pathRawData) {
- return false;
- }
- std::string pathPrefix = mongoutils::str::before(pathRawData, '.');
- return pathPrefix == matchfield;
+ } else {
+ StringData queryPath = query->path();
+ const char* pathRawData = queryPath.rawData();
+ // We have to make a distinction between match expressions that are
+ // initialized with an empty field/path name "" and match expressions
+ // for which the path is not meaningful (eg. $where and the internal
+ // expression type ALWAYS_FALSE).
+ if (!pathRawData) {
+ return false;
}
- return false;
+ std::string pathPrefix = mongoutils::str::before(pathRawData, '.');
+ return pathPrefix == matchfield;
}
+ return false;
+}
} // namespace mongo
diff --git a/src/mongo/db/query/parsed_projection.h b/src/mongo/db/query/parsed_projection.h
index 3fa40fe2ca8..b135b5f47ed 100644
--- a/src/mongo/db/query/parsed_projection.h
+++ b/src/mongo/db/query/parsed_projection.h
@@ -32,112 +32,112 @@
namespace mongo {
- class ParsedProjection {
- public:
- // TODO: this is duplicated in here and in the proj exec code. When we have
- // ProjectionExpression we can remove dups.
- enum ArrayOpType {
- ARRAY_OP_NORMAL = 0,
- ARRAY_OP_ELEM_MATCH,
- ARRAY_OP_POSITIONAL
- };
-
- /**
- * Parses the projection 'spec' and checks its validity with respect to the query 'query'.
- * Puts covering information into 'out'.
- *
- * Returns Status::OK() if it's a valid spec.
- * Returns a Status indicating how it's invalid otherwise.
- */
- static Status make(const BSONObj& spec,
- const MatchExpression* const query,
- ParsedProjection** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- /**
- * Returns true if the projection requires match details from the query,
- * and false otherwise.
- */
- bool requiresMatchDetails() const { return _requiresMatchDetails; }
-
- /**
- * Is the full document required to compute this projection?
- */
- bool requiresDocument() const { return _requiresDocument; }
-
- /**
- * If requiresDocument() == false, what fields are required to compute
- * the projection?
- */
- const std::vector<std::string>& getRequiredFields() const {
- return _requiredFields;
- }
-
- /**
- * Get the raw BSONObj proj spec obj
- */
- const BSONObj& getProjObj() const {
- return _source;
- }
-
- /**
- * Does the projection want geoNear metadata? If so any geoNear stage should include them.
- */
- bool wantGeoNearDistance() const {
- return _wantGeoNearDistance;
- }
-
- bool wantGeoNearPoint() const {
- return _wantGeoNearPoint;
- }
-
- bool wantIndexKey() const {
- return _returnKey;
- }
-
- private:
- /**
- * Must go through ::make
- */
- ParsedProjection()
- : _requiresMatchDetails(false),
- _requiresDocument(true),
- _wantGeoNearDistance(false),
- _wantGeoNearPoint(false),
- _returnKey(false) { }
-
- /**
- * Returns true if field name refers to a positional projection.
- */
- static bool _isPositionalOperator(const char* fieldName);
-
- /**
- * Returns true if the MatchExpression 'query' queries against
- * the field named by 'matchfield'. This deeply traverses logical
- * nodes in the matchfield and returns true if any of the children
- * have the field (so if 'query' is {$and: [{a: 1}, {b: 1}]} and
- * 'matchfield' is "b", the return value is true).
- *
- * Does not take ownership of 'query'.
- */
- static bool _hasPositionalOperatorMatch(const MatchExpression* const query,
- const std::string& matchfield);
-
- // TODO: stringdata?
- std::vector<std::string> _requiredFields;
-
- bool _requiresMatchDetails;
-
- bool _requiresDocument;
-
- BSONObj _source;
-
- bool _wantGeoNearDistance;
-
- bool _wantGeoNearPoint;
-
- bool _returnKey;
- };
+class ParsedProjection {
+public:
+ // TODO: this is duplicated in here and in the proj exec code. When we have
+ // ProjectionExpression we can remove dups.
+ enum ArrayOpType { ARRAY_OP_NORMAL = 0, ARRAY_OP_ELEM_MATCH, ARRAY_OP_POSITIONAL };
+
+ /**
+ * Parses the projection 'spec' and checks its validity with respect to the query 'query'.
+ * Puts covering information into 'out'.
+ *
+ * Returns Status::OK() if it's a valid spec.
+ * Returns a Status indicating how it's invalid otherwise.
+ */
+ static Status make(const BSONObj& spec,
+ const MatchExpression* const query,
+ ParsedProjection** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ /**
+ * Returns true if the projection requires match details from the query,
+ * and false otherwise.
+ */
+ bool requiresMatchDetails() const {
+ return _requiresMatchDetails;
+ }
+
+ /**
+ * Is the full document required to compute this projection?
+ */
+ bool requiresDocument() const {
+ return _requiresDocument;
+ }
+
+ /**
+ * If requiresDocument() == false, what fields are required to compute
+ * the projection?
+ */
+ const std::vector<std::string>& getRequiredFields() const {
+ return _requiredFields;
+ }
+
+ /**
+ * Get the raw BSONObj proj spec obj
+ */
+ const BSONObj& getProjObj() const {
+ return _source;
+ }
+
+ /**
+ * Does the projection want geoNear metadata? If so any geoNear stage should include them.
+ */
+ bool wantGeoNearDistance() const {
+ return _wantGeoNearDistance;
+ }
+
+ bool wantGeoNearPoint() const {
+ return _wantGeoNearPoint;
+ }
+
+ bool wantIndexKey() const {
+ return _returnKey;
+ }
+
+private:
+ /**
+ * Must go through ::make
+ */
+ ParsedProjection()
+ : _requiresMatchDetails(false),
+ _requiresDocument(true),
+ _wantGeoNearDistance(false),
+ _wantGeoNearPoint(false),
+ _returnKey(false) {}
+
+ /**
+ * Returns true if field name refers to a positional projection.
+ */
+ static bool _isPositionalOperator(const char* fieldName);
+
+ /**
+ * Returns true if the MatchExpression 'query' queries against
+ * the field named by 'matchfield'. This deeply traverses logical
+ * nodes in the matchfield and returns true if any of the children
+ * have the field (so if 'query' is {$and: [{a: 1}, {b: 1}]} and
+ * 'matchfield' is "b", the return value is true).
+ *
+ * Does not take ownership of 'query'.
+ */
+ static bool _hasPositionalOperatorMatch(const MatchExpression* const query,
+ const std::string& matchfield);
+
+ // TODO: stringdata?
+ std::vector<std::string> _requiredFields;
+
+ bool _requiresMatchDetails;
+
+ bool _requiresDocument;
+
+ BSONObj _source;
+
+ bool _wantGeoNearDistance;
+
+ bool _wantGeoNearPoint;
+
+ bool _returnKey;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/parsed_projection_test.cpp b/src/mongo/db/query/parsed_projection_test.cpp
index c669490f35a..9128575ea95 100644
--- a/src/mongo/db/query/parsed_projection_test.cpp
+++ b/src/mongo/db/query/parsed_projection_test.cpp
@@ -35,182 +35,180 @@
namespace {
- using std::unique_ptr;
- using std::string;
- using std::vector;
-
- using namespace mongo;
-
- //
- // creation function
- //
-
- unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const BSONObj& projObj) {
- StatusWithMatchExpression swme = MatchExpressionParser::parse(query);
- ASSERT(swme.isOK());
- std::unique_ptr<MatchExpression> queryMatchExpr(swme.getValue());
- ParsedProjection* out = NULL;
- Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
- if (!status.isOK()) {
- FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj
- << " (query: " << query << "): " << status.toString());
- }
- ASSERT(out);
- return unique_ptr<ParsedProjection>(out);
+using std::unique_ptr;
+using std::string;
+using std::vector;
+
+using namespace mongo;
+
+//
+// creation function
+//
+
+unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const BSONObj& projObj) {
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(query);
+ ASSERT(swme.isOK());
+ std::unique_ptr<MatchExpression> queryMatchExpr(swme.getValue());
+ ParsedProjection* out = NULL;
+ Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
+ if (!status.isOK()) {
+ FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj
+ << " (query: " << query << "): " << status.toString());
}
-
- unique_ptr<ParsedProjection> createParsedProjection(const char* queryStr, const char* projStr) {
- BSONObj query = fromjson(queryStr);
- BSONObj projObj = fromjson(projStr);
- return createParsedProjection(query, projObj);
- }
-
- //
- // Failure to create a parsed projection is expected
- //
-
- void assertInvalidProjection(const char* queryStr, const char* projStr) {
- BSONObj query = fromjson(queryStr);
- BSONObj projObj = fromjson(projStr);
- StatusWithMatchExpression swme = MatchExpressionParser::parse(query);
- ASSERT(swme.isOK());
- std::unique_ptr<MatchExpression> queryMatchExpr(swme.getValue());
- ParsedProjection* out = NULL;
- Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
- std::unique_ptr<ParsedProjection> destroy(out);
- ASSERT(!status.isOK());
- }
-
- // canonical_query.cpp will invoke ParsedProjection::make only when
- // the projection spec is non-empty. This test case is included for
- // completeness and do not reflect actual usage.
- TEST(ParsedProjectionTest, MakeId) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{}"));
- ASSERT(parsedProj->requiresDocument());
- }
-
- TEST(ParsedProjectionTest, MakeEmpty) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0}"));
- ASSERT(parsedProj->requiresDocument());
- }
-
- TEST(ParsedProjectionTest, MakeSingleField) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{a: 1}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 2U);
- ASSERT_EQUALS(fields[0], "_id");
- ASSERT_EQUALS(fields[1], "a");
- }
-
- TEST(ParsedProjectionTest, MakeSingleFieldCovered) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0, a: 1}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 1U);
- ASSERT_EQUALS(fields[0], "a");
- }
-
- TEST(ParsedProjectionTest, MakeSingleFieldIDCovered) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 1}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 1U);
- ASSERT_EQUALS(fields[0], "_id");
- }
-
- // boolean support is undocumented
- TEST(ParsedProjectionTest, MakeSingleFieldCoveredBoolean) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0, a: true}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 1U);
- ASSERT_EQUALS(fields[0], "a");
- }
-
- // boolean support is undocumented
- TEST(ParsedProjectionTest, MakeSingleFieldCoveredIdBoolean) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: false, a: 1}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 1U);
- ASSERT_EQUALS(fields[0], "a");
- }
-
- //
- // Positional operator validation
- //
-
- TEST(ParsedProjectionTest, InvalidPositionalOperatorProjections) {
- assertInvalidProjection("{}", "{'a.$': 1}");
- assertInvalidProjection("{a: 1}", "{'b.$': 1}");
- assertInvalidProjection("{a: 1}", "{'a.$': 0}");
- assertInvalidProjection("{a: 1}", "{'a.$.d.$': 1}");
- assertInvalidProjection("{a: 1}", "{'a.$.$': 1}");
- assertInvalidProjection("{a: 1}", "{'a.$.$': 1}");
- assertInvalidProjection("{a: 1, b: 1, c: 1}", "{'abc.$': 1}");
- assertInvalidProjection("{$or: [{a: 1}, {$or: [{b: 1}, {c: 1}]}]}", "{'d.$': 1}");
- assertInvalidProjection("{a: [1, 2, 3]}", "{'.$': 1}");
- }
-
- TEST(ParsedProjectionTest, ValidPositionalOperatorProjections) {
- createParsedProjection("{a: 1}", "{'a.$': 1}");
- createParsedProjection("{a: 1}", "{'a.foo.bar.$': 1}");
- createParsedProjection("{a: 1}", "{'a.foo.bar.$.x.y': 1}");
- createParsedProjection("{'a.b.c': 1}", "{'a.b.c.$': 1}");
- createParsedProjection("{'a.b.c': 1}", "{'a.e.f.$': 1}");
- createParsedProjection("{a: {b: 1}}", "{'a.$': 1}");
- createParsedProjection("{a: 1, b: 1}}", "{'a.$': 1}");
- createParsedProjection("{a: 1, b: 1}}", "{'b.$': 1}");
- createParsedProjection("{$and: [{a: 1}, {b: 1}]}", "{'a.$': 1}");
- createParsedProjection("{$and: [{a: 1}, {b: 1}]}", "{'b.$': 1}");
- createParsedProjection("{$or: [{a: 1}, {b: 1}]}", "{'a.$': 1}");
- createParsedProjection("{$or: [{a: 1}, {b: 1}]}", "{'b.$': 1}");
- createParsedProjection("{$and: [{$or: [{a: 1}, {$and: [{b: 1}, {c: 1}]}]}]}",
- "{'c.d.f.$': 1}");
- // Fields with empty name can be projected using the positional $ operator.
- createParsedProjection("{'': [1, 2, 3]}", "{'.$': 1}");
- }
-
- // Some match expressions (eg. $where) do not override MatchExpression::path()
- // In this test case, we use an internal match expression implementation ALWAYS_FALSE
- // to achieve the same effect.
- // Projection parser should handle this the same way as an empty path.
- TEST(ParsedProjectionTest, InvalidPositionalProjectionDefaultPathMatchExpression) {
- unique_ptr<MatchExpression> queryMatchExpr(new FalseMatchExpression());
- ASSERT(NULL == queryMatchExpr->path().rawData());
-
- ParsedProjection* out = NULL;
- BSONObj projObj = fromjson("{'a.$': 1}");
- Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
- ASSERT(!status.isOK());
- std::unique_ptr<ParsedProjection> destroy(out);
-
- // Projecting onto empty field should fail.
- BSONObj emptyFieldProjObj = fromjson("{'.$': 1}");
- status = ParsedProjection::make(emptyFieldProjObj, queryMatchExpr.get(), &out);
- ASSERT(!status.isOK());
- }
-
- //
- // DBRef projections
- //
-
- TEST(ParsedProjectionTest, DBRefProjections) {
- // non-dotted
- createParsedProjection(BSONObj(), BSON( "$ref" << 1));
- createParsedProjection(BSONObj(), BSON( "$id" << 1));
- createParsedProjection(BSONObj(), BSON( "$ref" << 1));
- // dotted before
- createParsedProjection("{}", "{'a.$ref': 1}");
- createParsedProjection("{}", "{'a.$id': 1}");
- createParsedProjection("{}", "{'a.$db': 1}");
- // dotted after
- createParsedProjection("{}", "{'$id.a': 1}");
- // position operator on $id
- // $ref and $db hold the collection and database names respectively,
- // so these fields cannot be arrays.
- createParsedProjection("{'a.$id': {$elemMatch: {x: 1}}}", "{'a.$id.$': 1}");
-
- }
-} // unnamed namespace
+ ASSERT(out);
+ return unique_ptr<ParsedProjection>(out);
+}
+
+unique_ptr<ParsedProjection> createParsedProjection(const char* queryStr, const char* projStr) {
+ BSONObj query = fromjson(queryStr);
+ BSONObj projObj = fromjson(projStr);
+ return createParsedProjection(query, projObj);
+}
+
+//
+// Failure to create a parsed projection is expected
+//
+
+void assertInvalidProjection(const char* queryStr, const char* projStr) {
+ BSONObj query = fromjson(queryStr);
+ BSONObj projObj = fromjson(projStr);
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(query);
+ ASSERT(swme.isOK());
+ std::unique_ptr<MatchExpression> queryMatchExpr(swme.getValue());
+ ParsedProjection* out = NULL;
+ Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
+ std::unique_ptr<ParsedProjection> destroy(out);
+ ASSERT(!status.isOK());
+}
+
+// canonical_query.cpp will invoke ParsedProjection::make only when
+// the projection spec is non-empty. This test case is included for
+// completeness and do not reflect actual usage.
+TEST(ParsedProjectionTest, MakeId) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{}"));
+ ASSERT(parsedProj->requiresDocument());
+}
+
+TEST(ParsedProjectionTest, MakeEmpty) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0}"));
+ ASSERT(parsedProj->requiresDocument());
+}
+
+TEST(ParsedProjectionTest, MakeSingleField) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{a: 1}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 2U);
+ ASSERT_EQUALS(fields[0], "_id");
+ ASSERT_EQUALS(fields[1], "a");
+}
+
+TEST(ParsedProjectionTest, MakeSingleFieldCovered) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0, a: 1}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 1U);
+ ASSERT_EQUALS(fields[0], "a");
+}
+
+TEST(ParsedProjectionTest, MakeSingleFieldIDCovered) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 1}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 1U);
+ ASSERT_EQUALS(fields[0], "_id");
+}
+
+// boolean support is undocumented
+TEST(ParsedProjectionTest, MakeSingleFieldCoveredBoolean) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0, a: true}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 1U);
+ ASSERT_EQUALS(fields[0], "a");
+}
+
+// boolean support is undocumented
+TEST(ParsedProjectionTest, MakeSingleFieldCoveredIdBoolean) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: false, a: 1}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 1U);
+ ASSERT_EQUALS(fields[0], "a");
+}
+
+//
+// Positional operator validation
+//
+
+TEST(ParsedProjectionTest, InvalidPositionalOperatorProjections) {
+ assertInvalidProjection("{}", "{'a.$': 1}");
+ assertInvalidProjection("{a: 1}", "{'b.$': 1}");
+ assertInvalidProjection("{a: 1}", "{'a.$': 0}");
+ assertInvalidProjection("{a: 1}", "{'a.$.d.$': 1}");
+ assertInvalidProjection("{a: 1}", "{'a.$.$': 1}");
+ assertInvalidProjection("{a: 1}", "{'a.$.$': 1}");
+ assertInvalidProjection("{a: 1, b: 1, c: 1}", "{'abc.$': 1}");
+ assertInvalidProjection("{$or: [{a: 1}, {$or: [{b: 1}, {c: 1}]}]}", "{'d.$': 1}");
+ assertInvalidProjection("{a: [1, 2, 3]}", "{'.$': 1}");
+}
+
+TEST(ParsedProjectionTest, ValidPositionalOperatorProjections) {
+ createParsedProjection("{a: 1}", "{'a.$': 1}");
+ createParsedProjection("{a: 1}", "{'a.foo.bar.$': 1}");
+ createParsedProjection("{a: 1}", "{'a.foo.bar.$.x.y': 1}");
+ createParsedProjection("{'a.b.c': 1}", "{'a.b.c.$': 1}");
+ createParsedProjection("{'a.b.c': 1}", "{'a.e.f.$': 1}");
+ createParsedProjection("{a: {b: 1}}", "{'a.$': 1}");
+ createParsedProjection("{a: 1, b: 1}}", "{'a.$': 1}");
+ createParsedProjection("{a: 1, b: 1}}", "{'b.$': 1}");
+ createParsedProjection("{$and: [{a: 1}, {b: 1}]}", "{'a.$': 1}");
+ createParsedProjection("{$and: [{a: 1}, {b: 1}]}", "{'b.$': 1}");
+ createParsedProjection("{$or: [{a: 1}, {b: 1}]}", "{'a.$': 1}");
+ createParsedProjection("{$or: [{a: 1}, {b: 1}]}", "{'b.$': 1}");
+ createParsedProjection("{$and: [{$or: [{a: 1}, {$and: [{b: 1}, {c: 1}]}]}]}", "{'c.d.f.$': 1}");
+ // Fields with empty name can be projected using the positional $ operator.
+ createParsedProjection("{'': [1, 2, 3]}", "{'.$': 1}");
+}
+
+// Some match expressions (eg. $where) do not override MatchExpression::path()
+// In this test case, we use an internal match expression implementation ALWAYS_FALSE
+// to achieve the same effect.
+// Projection parser should handle this the same way as an empty path.
+TEST(ParsedProjectionTest, InvalidPositionalProjectionDefaultPathMatchExpression) {
+ unique_ptr<MatchExpression> queryMatchExpr(new FalseMatchExpression());
+ ASSERT(NULL == queryMatchExpr->path().rawData());
+
+ ParsedProjection* out = NULL;
+ BSONObj projObj = fromjson("{'a.$': 1}");
+ Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
+ ASSERT(!status.isOK());
+ std::unique_ptr<ParsedProjection> destroy(out);
+
+ // Projecting onto empty field should fail.
+ BSONObj emptyFieldProjObj = fromjson("{'.$': 1}");
+ status = ParsedProjection::make(emptyFieldProjObj, queryMatchExpr.get(), &out);
+ ASSERT(!status.isOK());
+}
+
+//
+// DBRef projections
+//
+
+TEST(ParsedProjectionTest, DBRefProjections) {
+ // non-dotted
+ createParsedProjection(BSONObj(), BSON("$ref" << 1));
+ createParsedProjection(BSONObj(), BSON("$id" << 1));
+ createParsedProjection(BSONObj(), BSON("$ref" << 1));
+ // dotted before
+ createParsedProjection("{}", "{'a.$ref': 1}");
+ createParsedProjection("{}", "{'a.$id': 1}");
+ createParsedProjection("{}", "{'a.$db': 1}");
+ // dotted after
+ createParsedProjection("{}", "{'$id.a': 1}");
+ // position operator on $id
+ // $ref and $db hold the collection and database names respectively,
+ // so these fields cannot be arrays.
+ createParsedProjection("{'a.$id': {$elemMatch: {x: 1}}}", "{'a.$id.$': 1}");
+}
+} // unnamed namespace
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index b79c2e8f659..f97a81c0f01 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -37,7 +37,7 @@
#include <memory>
#include "boost/thread/locks.hpp"
#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
+#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
#include "mongo/db/matcher/expression_array.h"
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/query/plan_ranker.h"
@@ -50,23 +50,23 @@
namespace mongo {
namespace {
- // Delimiters for cache key encoding.
- const char kEncodeDiscriminatorsBegin = '<';
- const char kEncodeDiscriminatorsEnd = '>';
- const char kEncodeChildrenBegin = '[';
- const char kEncodeChildrenEnd = ']';
- const char kEncodeChildrenSeparator = ',';
- const char kEncodeSortSection = '~';
- const char kEncodeProjectionSection = '|';
-
- /**
- * Encode user-provided string. Cache key delimiters seen in the
- * user string are escaped with a backslash.
- */
- void encodeUserString(StringData s, StringBuilder* keyBuilder) {
- for (size_t i = 0; i < s.size(); ++i) {
- char c = s[i];
- switch (c) {
+// Delimiters for cache key encoding.
+const char kEncodeDiscriminatorsBegin = '<';
+const char kEncodeDiscriminatorsEnd = '>';
+const char kEncodeChildrenBegin = '[';
+const char kEncodeChildrenEnd = ']';
+const char kEncodeChildrenSeparator = ',';
+const char kEncodeSortSection = '~';
+const char kEncodeProjectionSection = '|';
+
+/**
+ * Encode user-provided string. Cache key delimiters seen in the
+ * user string are escaped with a backslash.
+ */
+void encodeUserString(StringData s, StringBuilder* keyBuilder) {
+ for (size_t i = 0; i < s.size(); ++i) {
+ char c = s[i];
+ switch (c) {
case kEncodeDiscriminatorsBegin:
case kEncodeDiscriminatorsEnd:
case kEncodeChildrenBegin:
@@ -75,637 +75,680 @@ namespace {
case kEncodeSortSection:
case kEncodeProjectionSection:
case '\\':
- *keyBuilder << '\\';
- // Fall through to default case.
+ *keyBuilder << '\\';
+ // Fall through to default case.
default:
*keyBuilder << c;
- }
}
}
+}
- /**
- * 2-character encoding of MatchExpression::MatchType.
- */
- const char* encodeMatchType(MatchExpression::MatchType mt) {
- switch(mt) {
- case MatchExpression::AND: return "an"; break;
- case MatchExpression::OR: return "or"; break;
- case MatchExpression::NOR: return "nr"; break;
- case MatchExpression::NOT: return "nt"; break;
- case MatchExpression::ELEM_MATCH_OBJECT: return "eo"; break;
- case MatchExpression::ELEM_MATCH_VALUE: return "ev"; break;
- case MatchExpression::SIZE: return "sz"; break;
- case MatchExpression::LTE: return "le"; break;
- case MatchExpression::LT: return "lt"; break;
- case MatchExpression::EQ: return "eq"; break;
- case MatchExpression::GT: return "gt"; break;
- case MatchExpression::GTE: return "ge"; break;
- case MatchExpression::REGEX: return "re"; break;
- case MatchExpression::MOD: return "mo"; break;
- case MatchExpression::EXISTS: return "ex"; break;
- case MatchExpression::MATCH_IN: return "in"; break;
- case MatchExpression::TYPE_OPERATOR: return "ty"; break;
- case MatchExpression::GEO: return "go"; break;
- case MatchExpression::WHERE: return "wh"; break;
- case MatchExpression::ATOMIC: return "at"; break;
- case MatchExpression::ALWAYS_FALSE: return "af"; break;
- case MatchExpression::GEO_NEAR: return "gn"; break;
- case MatchExpression::TEXT: return "te"; break;
- default: verify(0); return "";
- }
+/**
+ * 2-character encoding of MatchExpression::MatchType.
+ */
+const char* encodeMatchType(MatchExpression::MatchType mt) {
+ switch (mt) {
+ case MatchExpression::AND:
+ return "an";
+ break;
+ case MatchExpression::OR:
+ return "or";
+ break;
+ case MatchExpression::NOR:
+ return "nr";
+ break;
+ case MatchExpression::NOT:
+ return "nt";
+ break;
+ case MatchExpression::ELEM_MATCH_OBJECT:
+ return "eo";
+ break;
+ case MatchExpression::ELEM_MATCH_VALUE:
+ return "ev";
+ break;
+ case MatchExpression::SIZE:
+ return "sz";
+ break;
+ case MatchExpression::LTE:
+ return "le";
+ break;
+ case MatchExpression::LT:
+ return "lt";
+ break;
+ case MatchExpression::EQ:
+ return "eq";
+ break;
+ case MatchExpression::GT:
+ return "gt";
+ break;
+ case MatchExpression::GTE:
+ return "ge";
+ break;
+ case MatchExpression::REGEX:
+ return "re";
+ break;
+ case MatchExpression::MOD:
+ return "mo";
+ break;
+ case MatchExpression::EXISTS:
+ return "ex";
+ break;
+ case MatchExpression::MATCH_IN:
+ return "in";
+ break;
+ case MatchExpression::TYPE_OPERATOR:
+ return "ty";
+ break;
+ case MatchExpression::GEO:
+ return "go";
+ break;
+ case MatchExpression::WHERE:
+ return "wh";
+ break;
+ case MatchExpression::ATOMIC:
+ return "at";
+ break;
+ case MatchExpression::ALWAYS_FALSE:
+ return "af";
+ break;
+ case MatchExpression::GEO_NEAR:
+ return "gn";
+ break;
+ case MatchExpression::TEXT:
+ return "te";
+ break;
+ default:
+ verify(0);
+ return "";
}
+}
- /**
- * Encodes GEO match expression.
- * Encoding includes:
- * - type of geo query (within/intersect/near)
- * - geometry type
- * - CRS (flat or spherical)
- */
- void encodeGeoMatchExpression(const GeoMatchExpression* tree, StringBuilder* keyBuilder) {
- const GeoExpression& geoQuery = tree->getGeoExpression();
-
- // Type of geo query.
- switch (geoQuery.getPred()) {
- case GeoExpression::WITHIN: *keyBuilder << "wi"; break;
- case GeoExpression::INTERSECT: *keyBuilder << "in"; break;
- case GeoExpression::INVALID: *keyBuilder << "id"; break;
- }
+/**
+ * Encodes GEO match expression.
+ * Encoding includes:
+ * - type of geo query (within/intersect/near)
+ * - geometry type
+ * - CRS (flat or spherical)
+ */
+void encodeGeoMatchExpression(const GeoMatchExpression* tree, StringBuilder* keyBuilder) {
+ const GeoExpression& geoQuery = tree->getGeoExpression();
+
+ // Type of geo query.
+ switch (geoQuery.getPred()) {
+ case GeoExpression::WITHIN:
+ *keyBuilder << "wi";
+ break;
+ case GeoExpression::INTERSECT:
+ *keyBuilder << "in";
+ break;
+ case GeoExpression::INVALID:
+ *keyBuilder << "id";
+ break;
+ }
- // Geometry type.
- // Only one of the shared_ptrs in GeoContainer may be non-NULL.
- *keyBuilder << geoQuery.getGeometry().getDebugType();
+ // Geometry type.
+ // Only one of the shared_ptrs in GeoContainer may be non-NULL.
+ *keyBuilder << geoQuery.getGeometry().getDebugType();
- // CRS (flat or spherical)
- if (FLAT == geoQuery.getGeometry().getNativeCRS()) {
+ // CRS (flat or spherical)
+ if (FLAT == geoQuery.getGeometry().getNativeCRS()) {
+ *keyBuilder << "fl";
+ } else if (SPHERE == geoQuery.getGeometry().getNativeCRS()) {
+ *keyBuilder << "sp";
+ } else if (STRICT_SPHERE == geoQuery.getGeometry().getNativeCRS()) {
+ *keyBuilder << "ss";
+ } else {
+ error() << "unknown CRS type " << (int)geoQuery.getGeometry().getNativeCRS()
+ << " in geometry of type " << geoQuery.getGeometry().getDebugType();
+ invariant(false);
+ }
+}
+
+/**
+ * Encodes GEO_NEAR match expression.
+ * Encode:
+ * - isNearSphere
+ * - CRS (flat or spherical)
+ */
+void encodeGeoNearMatchExpression(const GeoNearMatchExpression* tree, StringBuilder* keyBuilder) {
+ const GeoNearExpression& nearQuery = tree->getData();
+
+ // isNearSphere
+ *keyBuilder << (nearQuery.isNearSphere ? "ns" : "nr");
+
+ // CRS (flat or spherical or strict-winding spherical)
+ switch (nearQuery.centroid->crs) {
+ case FLAT:
*keyBuilder << "fl";
- }
- else if (SPHERE == geoQuery.getGeometry().getNativeCRS()) {
+ break;
+ case SPHERE:
*keyBuilder << "sp";
- }
- else if (STRICT_SPHERE == geoQuery.getGeometry().getNativeCRS()) {
+ break;
+ case STRICT_SPHERE:
*keyBuilder << "ss";
- }
- else {
- error() << "unknown CRS type " << (int)geoQuery.getGeometry().getNativeCRS()
- << " in geometry of type " << geoQuery.getGeometry().getDebugType();
- invariant(false);
- }
- }
-
- /**
- * Encodes GEO_NEAR match expression.
- * Encode:
- * - isNearSphere
- * - CRS (flat or spherical)
- */
- void encodeGeoNearMatchExpression(const GeoNearMatchExpression* tree,
- StringBuilder* keyBuilder) {
- const GeoNearExpression& nearQuery = tree->getData();
-
- // isNearSphere
- *keyBuilder << (nearQuery.isNearSphere ? "ns" : "nr");
-
- // CRS (flat or spherical or strict-winding spherical)
- switch (nearQuery.centroid->crs) {
- case FLAT: *keyBuilder << "fl"; break;
- case SPHERE: *keyBuilder << "sp"; break;
- case STRICT_SPHERE: *keyBuilder << "ss"; break;
+ break;
case UNSET:
error() << "unknown CRS type " << (int)nearQuery.centroid->crs
<< " in point geometry for near query";
invariant(false);
break;
- }
}
+}
} // namespace
- //
- // Cache-related functions for CanonicalQuery
- //
+//
+// Cache-related functions for CanonicalQuery
+//
- bool PlanCache::shouldCacheQuery(const CanonicalQuery& query) {
- const LiteParsedQuery& lpq = query.getParsed();
- const MatchExpression* expr = query.root();
+bool PlanCache::shouldCacheQuery(const CanonicalQuery& query) {
+ const LiteParsedQuery& lpq = query.getParsed();
+ const MatchExpression* expr = query.root();
- // Collection scan
- // No sort order requested
- if (lpq.getSort().isEmpty() &&
- expr->matchType() == MatchExpression::AND && expr->numChildren() == 0) {
- return false;
- }
-
- // Hint provided
- if (!lpq.getHint().isEmpty()) {
- return false;
- }
-
- // Min provided
- // Min queries are a special case of hinted queries.
- if (!lpq.getMin().isEmpty()) {
- return false;
- }
-
- // Max provided
- // Similar to min, max queries are a special case of hinted queries.
- if (!lpq.getMax().isEmpty()) {
- return false;
- }
+ // Collection scan
+ // No sort order requested
+ if (lpq.getSort().isEmpty() && expr->matchType() == MatchExpression::AND &&
+ expr->numChildren() == 0) {
+ return false;
+ }
- // Explain queries are not-cacheable. This is primarily because of
- // the need to generate current and accurate information in allPlans.
- // If the explain report is generated by the cached plan runner using
- // stale information from the cache for the losing plans, allPlans would
- // simply be wrong.
- if (lpq.isExplain()) {
- return false;
- }
+ // Hint provided
+ if (!lpq.getHint().isEmpty()) {
+ return false;
+ }
- // Tailable cursors won't get cached, just turn into collscans.
- if (query.getParsed().isTailable()) {
- return false;
- }
+ // Min provided
+ // Min queries are a special case of hinted queries.
+ if (!lpq.getMin().isEmpty()) {
+ return false;
+ }
- // Snapshot is really a hint.
- if (query.getParsed().isSnapshot()) {
- return false;
- }
+ // Max provided
+ // Similar to min, max queries are a special case of hinted queries.
+ if (!lpq.getMax().isEmpty()) {
+ return false;
+ }
- return true;
- }
-
- //
- // CachedSolution
- //
-
- CachedSolution::CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& entry)
- : plannerData(entry.plannerData.size()),
- key(key),
- query(entry.query.getOwned()),
- sort(entry.sort.getOwned()),
- projection(entry.projection.getOwned()),
- decisionWorks(entry.decision->stats[0]->common.works) {
- // CachedSolution should not having any references into
- // cache entry. All relevant data should be cloned/copied.
- for (size_t i = 0; i < entry.plannerData.size(); ++i) {
- verify(entry.plannerData[i]);
- plannerData[i] = entry.plannerData[i]->clone();
- }
+ // Explain queries are not-cacheable. This is primarily because of
+ // the need to generate current and accurate information in allPlans.
+ // If the explain report is generated by the cached plan runner using
+ // stale information from the cache for the losing plans, allPlans would
+ // simply be wrong.
+ if (lpq.isExplain()) {
+ return false;
}
- CachedSolution::~CachedSolution() {
- for (std::vector<SolutionCacheData*>::const_iterator i = plannerData.begin();
- i != plannerData.end(); ++i) {
- SolutionCacheData* scd = *i;
- delete scd;
- }
+ // Tailable cursors won't get cached, just turn into collscans.
+ if (query.getParsed().isTailable()) {
+ return false;
}
- //
- // PlanCacheEntry
- //
+ // Snapshot is really a hint.
+ if (query.getParsed().isSnapshot()) {
+ return false;
+ }
- PlanCacheEntry::PlanCacheEntry(const std::vector<QuerySolution*>& solutions,
- PlanRankingDecision* why)
- : plannerData(solutions.size()),
- decision(why) {
- invariant(why);
+ return true;
+}
- // The caller of this constructor is responsible for ensuring
- // that the QuerySolution 's' has valid cacheData. If there's no
- // data to cache you shouldn't be trying to construct a PlanCacheEntry.
+//
+// CachedSolution
+//
- // Copy the solution's cache data into the plan cache entry.
- for (size_t i = 0; i < solutions.size(); ++i) {
- invariant(solutions[i]->cacheData.get());
- plannerData[i] = solutions[i]->cacheData->clone();
- }
+CachedSolution::CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& entry)
+ : plannerData(entry.plannerData.size()),
+ key(key),
+ query(entry.query.getOwned()),
+ sort(entry.sort.getOwned()),
+ projection(entry.projection.getOwned()),
+ decisionWorks(entry.decision->stats[0]->common.works) {
+ // CachedSolution should not having any references into
+ // cache entry. All relevant data should be cloned/copied.
+ for (size_t i = 0; i < entry.plannerData.size(); ++i) {
+ verify(entry.plannerData[i]);
+ plannerData[i] = entry.plannerData[i]->clone();
}
+}
- PlanCacheEntry::~PlanCacheEntry() {
- for (size_t i = 0; i < feedback.size(); ++i) {
- delete feedback[i];
- }
- for (size_t i = 0; i < plannerData.size(); ++i) {
- delete plannerData[i];
- }
+CachedSolution::~CachedSolution() {
+ for (std::vector<SolutionCacheData*>::const_iterator i = plannerData.begin();
+ i != plannerData.end();
+ ++i) {
+ SolutionCacheData* scd = *i;
+ delete scd;
}
+}
- PlanCacheEntry* PlanCacheEntry::clone() const {
- OwnedPointerVector<QuerySolution> solutions;
- for (size_t i = 0; i < plannerData.size(); ++i) {
- QuerySolution* qs = new QuerySolution();
- qs->cacheData.reset(plannerData[i]->clone());
- solutions.mutableVector().push_back(qs);
- }
- PlanCacheEntry* entry = new PlanCacheEntry(solutions.vector(), decision->clone());
-
- // Copy query shape.
- entry->query = query.getOwned();
- entry->sort = sort.getOwned();
- entry->projection = projection.getOwned();
-
- // Copy performance stats.
- for (size_t i = 0; i < feedback.size(); ++i) {
- PlanCacheEntryFeedback* fb = new PlanCacheEntryFeedback();
- fb->stats.reset(feedback[i]->stats->clone());
- fb->score = feedback[i]->score;
- entry->feedback.push_back(fb);
- }
- return entry;
- }
+//
+// PlanCacheEntry
+//
- std::string PlanCacheEntry::toString() const {
- return str::stream()
- << "(query: " << query.toString()
- << ";sort: " << sort.toString()
- << ";projection: " << projection.toString()
- << ";solutions: " << plannerData.size()
- << ")";
- }
+PlanCacheEntry::PlanCacheEntry(const std::vector<QuerySolution*>& solutions,
+ PlanRankingDecision* why)
+ : plannerData(solutions.size()), decision(why) {
+ invariant(why);
- std::string CachedSolution::toString() const {
- return str::stream() << "key: " << key << '\n';
+ // The caller of this constructor is responsible for ensuring
+ // that the QuerySolution 's' has valid cacheData. If there's no
+ // data to cache you shouldn't be trying to construct a PlanCacheEntry.
+
+ // Copy the solution's cache data into the plan cache entry.
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ invariant(solutions[i]->cacheData.get());
+ plannerData[i] = solutions[i]->cacheData->clone();
}
+}
- //
- // PlanCacheIndexTree
- //
+PlanCacheEntry::~PlanCacheEntry() {
+ for (size_t i = 0; i < feedback.size(); ++i) {
+ delete feedback[i];
+ }
+ for (size_t i = 0; i < plannerData.size(); ++i) {
+ delete plannerData[i];
+ }
+}
- void PlanCacheIndexTree::setIndexEntry(const IndexEntry& ie) {
- entry.reset(new IndexEntry(ie));
+PlanCacheEntry* PlanCacheEntry::clone() const {
+ OwnedPointerVector<QuerySolution> solutions;
+ for (size_t i = 0; i < plannerData.size(); ++i) {
+ QuerySolution* qs = new QuerySolution();
+ qs->cacheData.reset(plannerData[i]->clone());
+ solutions.mutableVector().push_back(qs);
}
+ PlanCacheEntry* entry = new PlanCacheEntry(solutions.vector(), decision->clone());
- PlanCacheIndexTree* PlanCacheIndexTree::clone() const {
- PlanCacheIndexTree* root = new PlanCacheIndexTree();
- if (NULL != entry.get()) {
- root->index_pos = index_pos;
- root->setIndexEntry(*entry.get());
- }
+ // Copy query shape.
+ entry->query = query.getOwned();
+ entry->sort = sort.getOwned();
+ entry->projection = projection.getOwned();
+ // Copy performance stats.
+ for (size_t i = 0; i < feedback.size(); ++i) {
+ PlanCacheEntryFeedback* fb = new PlanCacheEntryFeedback();
+ fb->stats.reset(feedback[i]->stats->clone());
+ fb->score = feedback[i]->score;
+ entry->feedback.push_back(fb);
+ }
+ return entry;
+}
+
+std::string PlanCacheEntry::toString() const {
+ return str::stream() << "(query: " << query.toString() << ";sort: " << sort.toString()
+ << ";projection: " << projection.toString()
+ << ";solutions: " << plannerData.size() << ")";
+}
+
+std::string CachedSolution::toString() const {
+ return str::stream() << "key: " << key << '\n';
+}
+
+//
+// PlanCacheIndexTree
+//
+
+void PlanCacheIndexTree::setIndexEntry(const IndexEntry& ie) {
+ entry.reset(new IndexEntry(ie));
+}
+
+PlanCacheIndexTree* PlanCacheIndexTree::clone() const {
+ PlanCacheIndexTree* root = new PlanCacheIndexTree();
+ if (NULL != entry.get()) {
+ root->index_pos = index_pos;
+ root->setIndexEntry(*entry.get());
+ }
+
+ for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
+ it != children.end();
+ ++it) {
+ PlanCacheIndexTree* clonedChild = (*it)->clone();
+ root->children.push_back(clonedChild);
+ }
+ return root;
+}
+
+std::string PlanCacheIndexTree::toString(int indents) const {
+ StringBuilder result;
+ if (!children.empty()) {
+ result << std::string(3 * indents, '-') << "Node\n";
+ int newIndent = indents + 1;
for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
- it != children.end(); ++it) {
- PlanCacheIndexTree* clonedChild = (*it)->clone();
- root->children.push_back(clonedChild);
- }
- return root;
- }
-
- std::string PlanCacheIndexTree::toString(int indents) const {
- StringBuilder result;
- if (!children.empty()) {
- result << std::string(3 * indents, '-') << "Node\n";
- int newIndent = indents + 1;
- for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
- it != children.end(); ++it) {
- result << (*it)->toString(newIndent);
- }
- return result.str();
- }
- else {
- result << std::string(3 * indents, '-') << "Leaf ";
- if (NULL != entry.get()) {
- result << entry->keyPattern.toString() << ", pos: " << index_pos;
- }
- result << '\n';
+ it != children.end();
+ ++it) {
+ result << (*it)->toString(newIndent);
}
return result.str();
+ } else {
+ result << std::string(3 * indents, '-') << "Leaf ";
+ if (NULL != entry.get()) {
+ result << entry->keyPattern.toString() << ", pos: " << index_pos;
+ }
+ result << '\n';
}
+ return result.str();
+}
- //
- // SolutionCacheData
- //
+//
+// SolutionCacheData
+//
- SolutionCacheData* SolutionCacheData::clone() const {
- SolutionCacheData* other = new SolutionCacheData();
- if (NULL != this->tree.get()) {
- // 'tree' could be NULL if the cached solution
- // is a collection scan.
- other->tree.reset(this->tree->clone());
- }
- other->solnType = this->solnType;
- other->wholeIXSolnDir = this->wholeIXSolnDir;
- other->indexFilterApplied = this->indexFilterApplied;
- return other;
+SolutionCacheData* SolutionCacheData::clone() const {
+ SolutionCacheData* other = new SolutionCacheData();
+ if (NULL != this->tree.get()) {
+ // 'tree' could be NULL if the cached solution
+ // is a collection scan.
+ other->tree.reset(this->tree->clone());
}
+ other->solnType = this->solnType;
+ other->wholeIXSolnDir = this->wholeIXSolnDir;
+ other->indexFilterApplied = this->indexFilterApplied;
+ return other;
+}
- std::string SolutionCacheData::toString() const {
- switch (this->solnType) {
+std::string SolutionCacheData::toString() const {
+ switch (this->solnType) {
case WHOLE_IXSCAN_SOLN:
verify(this->tree.get());
- return str::stream()
- << "(whole index scan solution: "
- << "dir=" << this->wholeIXSolnDir << "; "
- << "tree=" << this->tree->toString()
- << ")";
+ return str::stream() << "(whole index scan solution: "
+ << "dir=" << this->wholeIXSolnDir << "; "
+ << "tree=" << this->tree->toString() << ")";
case COLLSCAN_SOLN:
return "(collection scan)";
case USE_INDEX_TAGS_SOLN:
verify(this->tree.get());
- return str::stream()
- << "(index-tagged expression tree: "
- << "tree=" << this->tree->toString()
- << ")";
- }
- MONGO_UNREACHABLE;
+ return str::stream() << "(index-tagged expression tree: "
+ << "tree=" << this->tree->toString() << ")";
}
+ MONGO_UNREACHABLE;
+}
- //
- // PlanCache
- //
+//
+// PlanCache
+//
- PlanCache::PlanCache() : _cache(internalQueryCacheSize) { }
+PlanCache::PlanCache() : _cache(internalQueryCacheSize) {}
- PlanCache::PlanCache(const std::string& ns) : _cache(internalQueryCacheSize), _ns(ns) { }
+PlanCache::PlanCache(const std::string& ns) : _cache(internalQueryCacheSize), _ns(ns) {}
- PlanCache::~PlanCache() { }
+PlanCache::~PlanCache() {}
- /**
- * Traverses expression tree pre-order.
- * Appends an encoding of each node's match type and path name
- * to the output stream.
- */
- void PlanCache::encodeKeyForMatch(const MatchExpression* tree,
- StringBuilder* keyBuilder) const {
- // Encode match type and path.
- *keyBuilder << encodeMatchType(tree->matchType());
+/**
+ * Traverses expression tree pre-order.
+ * Appends an encoding of each node's match type and path name
+ * to the output stream.
+ */
+void PlanCache::encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) const {
+ // Encode match type and path.
+ *keyBuilder << encodeMatchType(tree->matchType());
- encodeUserString(tree->path(), keyBuilder);
+ encodeUserString(tree->path(), keyBuilder);
- // GEO and GEO_NEAR require additional encoding.
- if (MatchExpression::GEO == tree->matchType()) {
- encodeGeoMatchExpression(static_cast<const GeoMatchExpression*>(tree), keyBuilder);
- }
- else if (MatchExpression::GEO_NEAR == tree->matchType()) {
- encodeGeoNearMatchExpression(static_cast<const GeoNearMatchExpression*>(tree),
- keyBuilder);
+ // GEO and GEO_NEAR require additional encoding.
+ if (MatchExpression::GEO == tree->matchType()) {
+ encodeGeoMatchExpression(static_cast<const GeoMatchExpression*>(tree), keyBuilder);
+ } else if (MatchExpression::GEO_NEAR == tree->matchType()) {
+ encodeGeoNearMatchExpression(static_cast<const GeoNearMatchExpression*>(tree), keyBuilder);
+ }
+
+ // Encode indexability.
+ const IndexabilityDiscriminators& discriminators =
+ _indexabilityState.getDiscriminators(tree->path());
+ if (!discriminators.empty()) {
+ *keyBuilder << kEncodeDiscriminatorsBegin;
+ // For each discriminator on this path, append the character '0' or '1'.
+ for (const IndexabilityDiscriminator& discriminator : discriminators) {
+ *keyBuilder << discriminator(tree);
}
+ *keyBuilder << kEncodeDiscriminatorsEnd;
+ }
- // Encode indexability.
- const IndexabilityDiscriminators& discriminators =
- _indexabilityState.getDiscriminators(tree->path());
- if (!discriminators.empty()) {
- *keyBuilder << kEncodeDiscriminatorsBegin;
- // For each discriminator on this path, append the character '0' or '1'.
- for (const IndexabilityDiscriminator& discriminator : discriminators) {
- *keyBuilder << discriminator(tree);
- }
- *keyBuilder << kEncodeDiscriminatorsEnd;
+ // Traverse child nodes.
+ // Enclose children in [].
+ if (tree->numChildren() > 0) {
+ *keyBuilder << kEncodeChildrenBegin;
+ }
+ // Use comma to separate children encoding.
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ if (i > 0) {
+ *keyBuilder << kEncodeChildrenSeparator;
}
+ encodeKeyForMatch(tree->getChild(i), keyBuilder);
+ }
+ if (tree->numChildren() > 0) {
+ *keyBuilder << kEncodeChildrenEnd;
+ }
+}
+
+/**
+ * Encodes sort order into cache key.
+ * Sort order is normalized because it provided by
+ * LiteParsedQuery.
+ */
+void PlanCache::encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const {
+ if (sortObj.isEmpty()) {
+ return;
+ }
+
+ *keyBuilder << kEncodeSortSection;
- // Traverse child nodes.
- // Enclose children in [].
- if (tree->numChildren() > 0) {
- *keyBuilder << kEncodeChildrenBegin;
+ BSONObjIterator it(sortObj);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ // $meta text score
+ if (LiteParsedQuery::isTextScoreMeta(elt)) {
+ *keyBuilder << "t";
}
- // Use comma to separate children encoding.
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- if (i > 0) {
- *keyBuilder << kEncodeChildrenSeparator;
- }
- encodeKeyForMatch(tree->getChild(i), keyBuilder);
+ // Ascending
+ else if (elt.numberInt() == 1) {
+ *keyBuilder << "a";
}
- if (tree->numChildren() > 0) {
- *keyBuilder << kEncodeChildrenEnd;
+ // Descending
+ else {
+ *keyBuilder << "d";
}
- }
+ encodeUserString(elt.fieldName(), keyBuilder);
- /**
- * Encodes sort order into cache key.
- * Sort order is normalized because it provided by
- * LiteParsedQuery.
- */
- void PlanCache::encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const {
- if (sortObj.isEmpty()) {
- return;
+ // Sort argument separator
+ if (it.more()) {
+ *keyBuilder << ",";
}
+ }
+}
- *keyBuilder << kEncodeSortSection;
-
- BSONObjIterator it(sortObj);
- while (it.more()) {
- BSONElement elt = it.next();
- // $meta text score
- if (LiteParsedQuery::isTextScoreMeta(elt)) {
- *keyBuilder << "t";
- }
- // Ascending
- else if (elt.numberInt() == 1) {
- *keyBuilder << "a";
- }
- // Descending
- else {
- *keyBuilder << "d";
- }
- encodeUserString(elt.fieldName(), keyBuilder);
-
- // Sort argument separator
- if (it.more()) {
- *keyBuilder << ",";
- }
- }
+/**
+ * Encodes parsed projection into cache key.
+ * Does a simple toString() on each projected field
+ * in the BSON object.
+ * Orders the encoded elements in the projection by field name.
+ * This handles all the special projection types ($meta, $elemMatch, etc.)
+ */
+void PlanCache::encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) const {
+ if (projObj.isEmpty()) {
+ return;
}
- /**
- * Encodes parsed projection into cache key.
- * Does a simple toString() on each projected field
- * in the BSON object.
- * Orders the encoded elements in the projection by field name.
- * This handles all the special projection types ($meta, $elemMatch, etc.)
- */
- void PlanCache::encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) const {
- if (projObj.isEmpty()) {
- return;
- }
+ *keyBuilder << kEncodeProjectionSection;
- *keyBuilder << kEncodeProjectionSection;
+ // Sorts the BSON elements by field name using a map.
+ std::map<StringData, BSONElement> elements;
- // Sorts the BSON elements by field name using a map.
- std::map<StringData, BSONElement> elements;
+ BSONObjIterator it(projObj);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ StringData fieldName = elt.fieldNameStringData();
+ elements[fieldName] = elt;
+ }
- BSONObjIterator it(projObj);
- while (it.more()) {
- BSONElement elt = it.next();
- StringData fieldName = elt.fieldNameStringData();
- elements[fieldName] = elt;
- }
+ // Read elements in order of field name
+ for (std::map<StringData, BSONElement>::const_iterator i = elements.begin();
+ i != elements.end();
+ ++i) {
+ const BSONElement& elt = (*i).second;
- // Read elements in order of field name
- for (std::map<StringData, BSONElement>::const_iterator i = elements.begin();
- i != elements.end(); ++i) {
- const BSONElement& elt = (*i).second;
-
- if (elt.isSimpleType()) {
- // For inclusion/exclusion projections, we encode as "i" or "e".
- *keyBuilder << (elt.trueValue() ? "i" : "e");
- }
- else {
- // For projection operators, we use the verbatim string encoding of the element.
- encodeUserString(elt.toString(false, // includeFieldName
- false), // full
- keyBuilder);
- }
-
- encodeUserString(elt.fieldName(), keyBuilder);
+ if (elt.isSimpleType()) {
+ // For inclusion/exclusion projections, we encode as "i" or "e".
+ *keyBuilder << (elt.trueValue() ? "i" : "e");
+ } else {
+ // For projection operators, we use the verbatim string encoding of the element.
+ encodeUserString(elt.toString(false, // includeFieldName
+ false), // full
+ keyBuilder);
}
- }
- Status PlanCache::add(const CanonicalQuery& query,
- const std::vector<QuerySolution*>& solns,
- PlanRankingDecision* why) {
- invariant(why);
+ encodeUserString(elt.fieldName(), keyBuilder);
+ }
+}
- if (solns.empty()) {
- return Status(ErrorCodes::BadValue, "no solutions provided");
- }
+Status PlanCache::add(const CanonicalQuery& query,
+ const std::vector<QuerySolution*>& solns,
+ PlanRankingDecision* why) {
+ invariant(why);
- if (why->stats.size() != solns.size()) {
- return Status(ErrorCodes::BadValue,
- "number of stats in decision must match solutions");
- }
+ if (solns.empty()) {
+ return Status(ErrorCodes::BadValue, "no solutions provided");
+ }
- if (why->scores.size() != solns.size()) {
- return Status(ErrorCodes::BadValue,
- "number of scores in decision must match solutions");
- }
+ if (why->stats.size() != solns.size()) {
+ return Status(ErrorCodes::BadValue, "number of stats in decision must match solutions");
+ }
- if (why->candidateOrder.size() != solns.size()) {
- return Status(ErrorCodes::BadValue,
- "candidate ordering entries in decision must match solutions");
- }
+ if (why->scores.size() != solns.size()) {
+ return Status(ErrorCodes::BadValue, "number of scores in decision must match solutions");
+ }
- PlanCacheEntry* entry = new PlanCacheEntry(solns, why);
- const LiteParsedQuery& pq = query.getParsed();
- entry->query = pq.getFilter().getOwned();
- entry->sort = pq.getSort().getOwned();
- entry->projection = pq.getProj().getOwned();
+ if (why->candidateOrder.size() != solns.size()) {
+ return Status(ErrorCodes::BadValue,
+ "candidate ordering entries in decision must match solutions");
+ }
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(computeKey(query), entry);
+ PlanCacheEntry* entry = new PlanCacheEntry(solns, why);
+ const LiteParsedQuery& pq = query.getParsed();
+ entry->query = pq.getFilter().getOwned();
+ entry->sort = pq.getSort().getOwned();
+ entry->projection = pq.getProj().getOwned();
- if (NULL != evictedEntry.get()) {
- LOG(1) << _ns << ": plan cache maximum size exceeded - "
- << "removed least recently used entry "
- << evictedEntry->toString();
- }
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(computeKey(query), entry);
- return Status::OK();
+ if (NULL != evictedEntry.get()) {
+ LOG(1) << _ns << ": plan cache maximum size exceeded - "
+ << "removed least recently used entry " << evictedEntry->toString();
}
- Status PlanCache::get(const CanonicalQuery& query, CachedSolution** crOut) const {
- PlanCacheKey key = computeKey(query);
- verify(crOut);
+ return Status::OK();
+}
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- PlanCacheEntry* entry;
- Status cacheStatus = _cache.get(key, &entry);
- if (!cacheStatus.isOK()) {
- return cacheStatus;
- }
- invariant(entry);
+Status PlanCache::get(const CanonicalQuery& query, CachedSolution** crOut) const {
+ PlanCacheKey key = computeKey(query);
+ verify(crOut);
- *crOut = new CachedSolution(key, *entry);
-
- return Status::OK();
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ PlanCacheEntry* entry;
+ Status cacheStatus = _cache.get(key, &entry);
+ if (!cacheStatus.isOK()) {
+ return cacheStatus;
}
+ invariant(entry);
- Status PlanCache::feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback) {
- if (NULL == feedback) {
- return Status(ErrorCodes::BadValue, "feedback is NULL");
- }
- std::unique_ptr<PlanCacheEntryFeedback> autoFeedback(feedback);
- PlanCacheKey ck = computeKey(cq);
-
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- PlanCacheEntry* entry;
- Status cacheStatus = _cache.get(ck, &entry);
- if (!cacheStatus.isOK()) {
- return cacheStatus;
- }
- invariant(entry);
+ *crOut = new CachedSolution(key, *entry);
- // We store up to a constant number of feedback entries.
- if (entry->feedback.size() < size_t(internalQueryCacheFeedbacksStored)) {
- entry->feedback.push_back(autoFeedback.release());
- }
+ return Status::OK();
+}
- return Status::OK();
+Status PlanCache::feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback) {
+ if (NULL == feedback) {
+ return Status(ErrorCodes::BadValue, "feedback is NULL");
}
+ std::unique_ptr<PlanCacheEntryFeedback> autoFeedback(feedback);
+ PlanCacheKey ck = computeKey(cq);
- Status PlanCache::remove(const CanonicalQuery& canonicalQuery) {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- return _cache.remove(computeKey(canonicalQuery));
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ PlanCacheEntry* entry;
+ Status cacheStatus = _cache.get(ck, &entry);
+ if (!cacheStatus.isOK()) {
+ return cacheStatus;
}
+ invariant(entry);
- void PlanCache::clear() {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- _cache.clear();
- _writeOperations.store(0);
+ // We store up to a constant number of feedback entries.
+ if (entry->feedback.size() < size_t(internalQueryCacheFeedbacksStored)) {
+ entry->feedback.push_back(autoFeedback.release());
}
- PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
- StringBuilder keyBuilder;
- encodeKeyForMatch(cq.root(), &keyBuilder);
- encodeKeyForSort(cq.getParsed().getSort(), &keyBuilder);
- encodeKeyForProj(cq.getParsed().getProj(), &keyBuilder);
- return keyBuilder.str();
- }
+ return Status::OK();
+}
- Status PlanCache::getEntry(const CanonicalQuery& query, PlanCacheEntry** entryOut) const {
- PlanCacheKey key = computeKey(query);
- verify(entryOut);
+Status PlanCache::remove(const CanonicalQuery& canonicalQuery) {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ return _cache.remove(computeKey(canonicalQuery));
+}
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- PlanCacheEntry* entry;
- Status cacheStatus = _cache.get(key, &entry);
- if (!cacheStatus.isOK()) {
- return cacheStatus;
- }
- invariant(entry);
+void PlanCache::clear() {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ _cache.clear();
+ _writeOperations.store(0);
+}
- *entryOut = entry->clone();
+PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
+ StringBuilder keyBuilder;
+ encodeKeyForMatch(cq.root(), &keyBuilder);
+ encodeKeyForSort(cq.getParsed().getSort(), &keyBuilder);
+ encodeKeyForProj(cq.getParsed().getProj(), &keyBuilder);
+ return keyBuilder.str();
+}
- return Status::OK();
+Status PlanCache::getEntry(const CanonicalQuery& query, PlanCacheEntry** entryOut) const {
+ PlanCacheKey key = computeKey(query);
+ verify(entryOut);
+
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ PlanCacheEntry* entry;
+ Status cacheStatus = _cache.get(key, &entry);
+ if (!cacheStatus.isOK()) {
+ return cacheStatus;
}
+ invariant(entry);
- std::vector<PlanCacheEntry*> PlanCache::getAllEntries() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- std::vector<PlanCacheEntry*> entries;
- typedef std::list< std::pair<PlanCacheKey, PlanCacheEntry*> >::const_iterator ConstIterator;
- for (ConstIterator i = _cache.begin(); i != _cache.end(); i++) {
- PlanCacheEntry* entry = i->second;
- entries.push_back(entry->clone());
- }
+ *entryOut = entry->clone();
- return entries;
- }
+ return Status::OK();
+}
- bool PlanCache::contains(const CanonicalQuery& cq) const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- return _cache.hasKey(computeKey(cq));
+std::vector<PlanCacheEntry*> PlanCache::getAllEntries() const {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ std::vector<PlanCacheEntry*> entries;
+ typedef std::list<std::pair<PlanCacheKey, PlanCacheEntry*>>::const_iterator ConstIterator;
+ for (ConstIterator i = _cache.begin(); i != _cache.end(); i++) {
+ PlanCacheEntry* entry = i->second;
+ entries.push_back(entry->clone());
}
- size_t PlanCache::size() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- return _cache.size();
- }
+ return entries;
+}
- void PlanCache::notifyOfWriteOp() {
- // It's fine to clear the cache multiple times if multiple threads
- // increment the counter to kPlanCacheMaxWriteOperations or greater.
- if (_writeOperations.addAndFetch(1) < internalQueryCacheWriteOpsBetweenFlush) {
- return;
- }
+bool PlanCache::contains(const CanonicalQuery& cq) const {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ return _cache.hasKey(computeKey(cq));
+}
- LOG(1) << _ns << ": clearing collection plan cache - "
- << internalQueryCacheWriteOpsBetweenFlush
- << " write operations detected since last refresh.";
- clear();
- }
+size_t PlanCache::size() const {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ return _cache.size();
+}
- void PlanCache::notifyOfIndexEntries(const std::vector<IndexEntry>& indexEntries) {
- _indexabilityState.updateDiscriminators(indexEntries);
+void PlanCache::notifyOfWriteOp() {
+ // It's fine to clear the cache multiple times if multiple threads
+ // increment the counter to kPlanCacheMaxWriteOperations or greater.
+ if (_writeOperations.addAndFetch(1) < internalQueryCacheWriteOpsBetweenFlush) {
+ return;
}
+ LOG(1) << _ns << ": clearing collection plan cache - " << internalQueryCacheWriteOpsBetweenFlush
+ << " write operations detected since last refresh.";
+ clear();
+}
+
+void PlanCache::notifyOfIndexEntries(const std::vector<IndexEntry>& indexEntries) {
+ _indexabilityState.updateDiscriminators(indexEntries);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 3bc1e474365..974d827e31f 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -42,379 +42,381 @@
namespace mongo {
- // A PlanCacheKey is a string-ified version of a query's predicate/projection/sort.
- typedef std::string PlanCacheKey;
+// A PlanCacheKey is a string-ified version of a query's predicate/projection/sort.
+typedef std::string PlanCacheKey;
- struct PlanRankingDecision;
- struct QuerySolution;
- struct QuerySolutionNode;
+struct PlanRankingDecision;
+struct QuerySolution;
+struct QuerySolutionNode;
+
+/**
+ * When the CachedPlanStage runs a cached query, it can provide feedback to the cache. This
+ * feedback is available to anyone who retrieves that query in the future.
+ */
+struct PlanCacheEntryFeedback {
+ // How well did the cached plan perform?
+ std::unique_ptr<PlanStageStats> stats;
+
+ // The "goodness" score produced by the plan ranker
+ // corresponding to 'stats'.
+ double score;
+};
+
+// TODO: Replace with opaque type.
+typedef std::string PlanID;
+
+/**
+ * A PlanCacheIndexTree is the meaty component of the data
+ * stored in SolutionCacheData. It is a tree structure with
+ * index tags that indicates to the access planner which indices
+ * it should try to use.
+ *
+ * How a PlanCacheIndexTree is created:
+ * The query planner tags a match expression with indices. It
+ * then uses the tagged tree to create a PlanCacheIndexTree,
+ * using QueryPlanner::cacheDataFromTaggedTree. The PlanCacheIndexTree
+ * is isomorphic to the tagged match expression, and has matching
+ * index tags.
+ *
+ * How a PlanCacheIndexTree is used:
+ * When the query planner is planning from the cache, it uses
+ * the PlanCacheIndexTree retrieved from the cache in order to
+ * recreate index assignments. Specifically, a raw MatchExpression
+ * is tagged according to the index tags in the PlanCacheIndexTree.
+ * This is done by QueryPlanner::tagAccordingToCache.
+ */
+struct PlanCacheIndexTree {
+ PlanCacheIndexTree() : entry(nullptr), index_pos(0) {}
+
+ ~PlanCacheIndexTree() {
+ for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
+ it != children.end();
+ ++it) {
+ delete *it;
+ }
+ }
/**
- * When the CachedPlanStage runs a cached query, it can provide feedback to the cache. This
- * feedback is available to anyone who retrieves that query in the future.
+ * Clone 'ie' and set 'this->entry' to be the clone.
*/
- struct PlanCacheEntryFeedback {
- // How well did the cached plan perform?
- std::unique_ptr<PlanStageStats> stats;
-
- // The "goodness" score produced by the plan ranker
- // corresponding to 'stats'.
- double score;
- };
+ void setIndexEntry(const IndexEntry& ie);
- // TODO: Replace with opaque type.
- typedef std::string PlanID;
+ /**
+ * Make a deep copy.
+ */
+ PlanCacheIndexTree* clone() const;
/**
- * A PlanCacheIndexTree is the meaty component of the data
- * stored in SolutionCacheData. It is a tree structure with
- * index tags that indicates to the access planner which indices
- * it should try to use.
- *
- * How a PlanCacheIndexTree is created:
- * The query planner tags a match expression with indices. It
- * then uses the tagged tree to create a PlanCacheIndexTree,
- * using QueryPlanner::cacheDataFromTaggedTree. The PlanCacheIndexTree
- * is isomorphic to the tagged match expression, and has matching
- * index tags.
- *
- * How a PlanCacheIndexTree is used:
- * When the query planner is planning from the cache, it uses
- * the PlanCacheIndexTree retrieved from the cache in order to
- * recreate index assignments. Specifically, a raw MatchExpression
- * is tagged according to the index tags in the PlanCacheIndexTree.
- * This is done by QueryPlanner::tagAccordingToCache.
+ * For debugging.
*/
- struct PlanCacheIndexTree {
- PlanCacheIndexTree() : entry(nullptr), index_pos(0) { }
-
- ~PlanCacheIndexTree() {
- for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
- it != children.end(); ++it) {
- delete *it;
- }
- }
+ std::string toString(int indents = 0) const;
+
+ // Children owned here.
+ std::vector<PlanCacheIndexTree*> children;
+
+ // Owned here.
+ std::unique_ptr<IndexEntry> entry;
+
+ size_t index_pos;
+};
- /**
- * Clone 'ie' and set 'this->entry' to be the clone.
- */
- void setIndexEntry(const IndexEntry& ie);
+/**
+ * Data stored inside a QuerySolution which can subsequently be
+ * used to create a cache entry. When this data is retrieved
+ * from the cache, it is sufficient to reconstruct the original
+ * QuerySolution.
+ */
+struct SolutionCacheData {
+ SolutionCacheData()
+ : tree(nullptr),
+ solnType(USE_INDEX_TAGS_SOLN),
+ wholeIXSolnDir(1),
+ indexFilterApplied(false) {}
+
+ // Make a deep copy.
+ SolutionCacheData* clone() const;
+
+ // For debugging.
+ std::string toString() const;
+
+ // Owned here. If 'wholeIXSoln' is false, then 'tree'
+ // can be used to tag an isomorphic match expression. If 'wholeIXSoln'
+ // is true, then 'tree' is used to store the relevant IndexEntry.
+ // If 'collscanSoln' is true, then 'tree' should be NULL.
+ std::unique_ptr<PlanCacheIndexTree> tree;
+
+ enum SolutionType {
+ // Indicates that the plan should use
+ // the index as a proxy for a collection
+ // scan (e.g. using index to provide sort).
+ WHOLE_IXSCAN_SOLN,
+
+ // The cached plan is a collection scan.
+ COLLSCAN_SOLN,
+
+ // Build the solution by using 'tree'
+ // to tag the match expression.
+ USE_INDEX_TAGS_SOLN
+ } solnType;
+
+ // The direction of the index scan used as
+ // a proxy for a collection scan. Used only
+ // for WHOLE_IXSCAN_SOLN.
+ int wholeIXSolnDir;
+
+ // True if index filter was applied.
+ bool indexFilterApplied;
+};
+
+class PlanCacheEntry;
+
+/**
+ * Information returned from a get(...) query.
+ */
+class CachedSolution {
+private:
+ MONGO_DISALLOW_COPYING(CachedSolution);
- /**
- * Make a deep copy.
- */
- PlanCacheIndexTree* clone() const;
+public:
+ CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& entry);
+ ~CachedSolution();
- /**
- * For debugging.
- */
- std::string toString(int indents = 0) const;
+ // Owned here.
+ std::vector<SolutionCacheData*> plannerData;
- // Children owned here.
- std::vector<PlanCacheIndexTree*> children;
+ // Key used to provide feedback on the entry.
+ PlanCacheKey key;
- // Owned here.
- std::unique_ptr<IndexEntry> entry;
+ // For debugging.
+ std::string toString() const;
- size_t index_pos;
- };
+ // We are extracting just enough information from the canonical
+ // query. We could clone the canonical query but the following
+ // items are all that is displayed to the user.
+ BSONObj query;
+ BSONObj sort;
+ BSONObj projection;
+ // The number of work cycles taken to decide on a winning plan when the plan was first
+ // cached.
+ size_t decisionWorks;
+};
+
+/**
+ * Used by the cache to track entries and their performance over time.
+ * Also used by the plan cache commands to display plan cache state.
+ */
+class PlanCacheEntry {
+private:
+ MONGO_DISALLOW_COPYING(PlanCacheEntry);
+
+public:
/**
- * Data stored inside a QuerySolution which can subsequently be
- * used to create a cache entry. When this data is retrieved
- * from the cache, it is sufficient to reconstruct the original
- * QuerySolution.
+ * Create a new PlanCacheEntry.
+ * Grabs any planner-specific data required from the solutions.
+ * Takes ownership of the PlanRankingDecision that placed the plan in the cache.
*/
- struct SolutionCacheData {
- SolutionCacheData() :
- tree(nullptr),
- solnType(USE_INDEX_TAGS_SOLN),
- wholeIXSolnDir(1),
- indexFilterApplied(false) {
- }
+ PlanCacheEntry(const std::vector<QuerySolution*>& solutions, PlanRankingDecision* why);
+
+ ~PlanCacheEntry();
+
+ /**
+ * Make a deep copy.
+ */
+ PlanCacheEntry* clone() const;
+
+ // For debugging.
+ std::string toString() const;
+
+ //
+ // Planner data
+ //
+
+ // Data provided to the planner to allow it to recreate the solutions this entry
+ // represents. Each SolutionCacheData is fully owned here, so in order to return
+ // it from the cache a deep copy is made and returned inside CachedSolution.
+ std::vector<SolutionCacheData*> plannerData;
+
+ // TODO: Do we really want to just hold a copy of the CanonicalQuery? For now we just
+ // extract the data we need.
+ //
+ // Used by the plan cache commands to display an example query
+ // of the appropriate shape.
+ BSONObj query;
+ BSONObj sort;
+ BSONObj projection;
+
+ //
+ // Performance stats
+ //
+
+ // Information that went into picking the winning plan and also why
+ // the other plans lost.
+ std::unique_ptr<PlanRankingDecision> decision;
+
+ // Annotations from cached runs. The CachedPlanStage provides these stats about its
+ // runs when they complete.
+ std::vector<PlanCacheEntryFeedback*> feedback;
+};
+
+/**
+ * Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution)
+ * mapping, the cache contains information on why that mapping was made and statistics on the
+ * cache entry's actual performance on subsequent runs.
+ *
+ */
+class PlanCache {
+private:
+ MONGO_DISALLOW_COPYING(PlanCache);
+
+public:
+ /**
+ * We don't want to cache every possible query. This function
+ * encapsulates the criteria for what makes a canonical query
+ * suitable for lookup/inclusion in the cache.
+ */
+ static bool shouldCacheQuery(const CanonicalQuery& query);
+
+ /**
+ * If omitted, namespace set to empty string.
+ */
+ PlanCache();
+
+ PlanCache(const std::string& ns);
- // Make a deep copy.
- SolutionCacheData* clone() const;
+ ~PlanCache();
+
+ /**
+ * Record solutions for query. Best plan is first element in list.
+ * Each query in the cache will have more than 1 plan because we only
+ * add queries which are considered by the multi plan runner (which happens
+ * only when the query planner generates multiple candidate plans).
+ *
+ * Takes ownership of 'why'.
+ *
+ * If the mapping was added successfully, returns Status::OK().
+ * If the mapping already existed or some other error occurred, returns another Status.
+ */
+ Status add(const CanonicalQuery& query,
+ const std::vector<QuerySolution*>& solns,
+ PlanRankingDecision* why);
- // For debugging.
- std::string toString() const;
+ /**
+ * Look up the cached data access for the provided 'query'. Used by the query planner
+ * to shortcut planning.
+ *
+ * If there is no entry in the cache for the 'query', returns an error Status.
+ *
+ * If there is an entry in the cache, populates 'crOut' and returns Status::OK(). Caller
+ * owns '*crOut'.
+ */
+ Status get(const CanonicalQuery& query, CachedSolution** crOut) const;
- // Owned here. If 'wholeIXSoln' is false, then 'tree'
- // can be used to tag an isomorphic match expression. If 'wholeIXSoln'
- // is true, then 'tree' is used to store the relevant IndexEntry.
- // If 'collscanSoln' is true, then 'tree' should be NULL.
- std::unique_ptr<PlanCacheIndexTree> tree;
+ /**
+ * When the CachedPlanStage runs a plan out of the cache, we want to record data about the
+ * plan's performance. The CachedPlanStage calls feedback(...) after executing the cached
+ * plan for a trial period in order to do this.
+ *
+ * Cache takes ownership of 'feedback'.
+ *
+ * If the entry corresponding to 'cq' isn't in the cache anymore, the feedback is ignored
+ * and an error Status is returned.
+ *
+ * If the entry corresponding to 'cq' still exists, 'feedback' is added to the run
+ * statistics about the plan. Status::OK() is returned.
+ */
+ Status feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback);
- enum SolutionType {
- // Indicates that the plan should use
- // the index as a proxy for a collection
- // scan (e.g. using index to provide sort).
- WHOLE_IXSCAN_SOLN,
+ /**
+ * Remove the entry corresponding to 'ck' from the cache. Returns Status::OK() if the plan
+ * was present and removed and an error status otherwise.
+ */
+ Status remove(const CanonicalQuery& canonicalQuery);
- // The cached plan is a collection scan.
- COLLSCAN_SOLN,
+ /**
+ * Remove *all* cached plans. Does not clear index information.
+ */
+ void clear();
- // Build the solution by using 'tree'
- // to tag the match expression.
- USE_INDEX_TAGS_SOLN
- } solnType;
+ /**
+ * Get the cache key corresponding to the given canonical query. The query need not already
+ * be cached.
+ *
+ * This is provided in the public API simply as a convenience for consumers who need some
+ * description of query shape (e.g. index filters).
+ *
+ * Callers must hold the collection lock when calling this method.
+ */
+ PlanCacheKey computeKey(const CanonicalQuery&) const;
- // The direction of the index scan used as
- // a proxy for a collection scan. Used only
- // for WHOLE_IXSCAN_SOLN.
- int wholeIXSolnDir;
+ /**
+ * Returns a copy of a cache entry.
+ * Used by planCacheListPlans to display plan details.
+ *
+ * If there is no entry in the cache for the 'query', returns an error Status.
+ *
+ * If there is an entry in the cache, populates 'entryOut' and returns Status::OK(). Caller
+ * owns '*entryOut'.
+ */
+ Status getEntry(const CanonicalQuery& cq, PlanCacheEntry** entryOut) const;
- // True if index filter was applied.
- bool indexFilterApplied;
- };
+ /**
+ * Returns a vector of all cache entries.
+ * Caller owns the result vector and is responsible for cleaning up
+ * the cache entry copies.
+ * Used by planCacheListQueryShapes and index_filter_commands_test.cpp.
+ */
+ std::vector<PlanCacheEntry*> getAllEntries() const;
- class PlanCacheEntry;
+ /**
+ * Returns true if there is an entry in the cache for the 'query'.
+ * Internally calls hasKey() on the LRU cache.
+ */
+ bool contains(const CanonicalQuery& cq) const;
/**
- * Information returned from a get(...) query.
+ * Returns number of entries in cache.
+ * Used for testing.
*/
- class CachedSolution {
- private:
- MONGO_DISALLOW_COPYING(CachedSolution);
- public:
- CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& entry);
- ~CachedSolution();
-
- // Owned here.
- std::vector<SolutionCacheData*> plannerData;
-
- // Key used to provide feedback on the entry.
- PlanCacheKey key;
-
- // For debugging.
- std::string toString() const;
-
- // We are extracting just enough information from the canonical
- // query. We could clone the canonical query but the following
- // items are all that is displayed to the user.
- BSONObj query;
- BSONObj sort;
- BSONObj projection;
-
- // The number of work cycles taken to decide on a winning plan when the plan was first
- // cached.
- size_t decisionWorks;
- };
+ size_t size() const;
/**
- * Used by the cache to track entries and their performance over time.
- * Also used by the plan cache commands to display plan cache state.
+ * You must notify the cache if you are doing writes, as query plan utility will change.
+ * Cache is flushed after every 1000 notifications.
*/
- class PlanCacheEntry {
- private:
- MONGO_DISALLOW_COPYING(PlanCacheEntry);
- public:
- /**
- * Create a new PlanCacheEntry.
- * Grabs any planner-specific data required from the solutions.
- * Takes ownership of the PlanRankingDecision that placed the plan in the cache.
- */
- PlanCacheEntry(const std::vector<QuerySolution*>& solutions,
- PlanRankingDecision* why);
-
- ~PlanCacheEntry();
-
- /**
- * Make a deep copy.
- */
- PlanCacheEntry* clone() const;
-
- // For debugging.
- std::string toString() const;
-
- //
- // Planner data
- //
-
- // Data provided to the planner to allow it to recreate the solutions this entry
- // represents. Each SolutionCacheData is fully owned here, so in order to return
- // it from the cache a deep copy is made and returned inside CachedSolution.
- std::vector<SolutionCacheData*> plannerData;
-
- // TODO: Do we really want to just hold a copy of the CanonicalQuery? For now we just
- // extract the data we need.
- //
- // Used by the plan cache commands to display an example query
- // of the appropriate shape.
- BSONObj query;
- BSONObj sort;
- BSONObj projection;
-
- //
- // Performance stats
- //
-
- // Information that went into picking the winning plan and also why
- // the other plans lost.
- std::unique_ptr<PlanRankingDecision> decision;
-
- // Annotations from cached runs. The CachedPlanStage provides these stats about its
- // runs when they complete.
- std::vector<PlanCacheEntryFeedback*> feedback;
- };
+ void notifyOfWriteOp();
/**
- * Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution)
- * mapping, the cache contains information on why that mapping was made and statistics on the
- * cache entry's actual performance on subsequent runs.
+ * Updates internal state kept about the collection's indexes. Must be called when the set
+ * of indexes on the associated collection have changed.
*
+ * Callers must hold the collection lock in exclusive mode when calling this method.
*/
- class PlanCache {
- private:
- MONGO_DISALLOW_COPYING(PlanCache);
- public:
- /**
- * We don't want to cache every possible query. This function
- * encapsulates the criteria for what makes a canonical query
- * suitable for lookup/inclusion in the cache.
- */
- static bool shouldCacheQuery(const CanonicalQuery& query);
-
- /**
- * If omitted, namespace set to empty string.
- */
- PlanCache();
-
- PlanCache(const std::string& ns);
-
- ~PlanCache();
-
- /**
- * Record solutions for query. Best plan is first element in list.
- * Each query in the cache will have more than 1 plan because we only
- * add queries which are considered by the multi plan runner (which happens
- * only when the query planner generates multiple candidate plans).
- *
- * Takes ownership of 'why'.
- *
- * If the mapping was added successfully, returns Status::OK().
- * If the mapping already existed or some other error occurred, returns another Status.
- */
- Status add(const CanonicalQuery& query,
- const std::vector<QuerySolution*>& solns,
- PlanRankingDecision* why);
-
- /**
- * Look up the cached data access for the provided 'query'. Used by the query planner
- * to shortcut planning.
- *
- * If there is no entry in the cache for the 'query', returns an error Status.
- *
- * If there is an entry in the cache, populates 'crOut' and returns Status::OK(). Caller
- * owns '*crOut'.
- */
- Status get(const CanonicalQuery& query, CachedSolution** crOut) const;
-
- /**
- * When the CachedPlanStage runs a plan out of the cache, we want to record data about the
- * plan's performance. The CachedPlanStage calls feedback(...) after executing the cached
- * plan for a trial period in order to do this.
- *
- * Cache takes ownership of 'feedback'.
- *
- * If the entry corresponding to 'cq' isn't in the cache anymore, the feedback is ignored
- * and an error Status is returned.
- *
- * If the entry corresponding to 'cq' still exists, 'feedback' is added to the run
- * statistics about the plan. Status::OK() is returned.
- */
- Status feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback);
-
- /**
- * Remove the entry corresponding to 'ck' from the cache. Returns Status::OK() if the plan
- * was present and removed and an error status otherwise.
- */
- Status remove(const CanonicalQuery& canonicalQuery);
-
- /**
- * Remove *all* cached plans. Does not clear index information.
- */
- void clear();
-
- /**
- * Get the cache key corresponding to the given canonical query. The query need not already
- * be cached.
- *
- * This is provided in the public API simply as a convenience for consumers who need some
- * description of query shape (e.g. index filters).
- *
- * Callers must hold the collection lock when calling this method.
- */
- PlanCacheKey computeKey(const CanonicalQuery&) const;
-
- /**
- * Returns a copy of a cache entry.
- * Used by planCacheListPlans to display plan details.
- *
- * If there is no entry in the cache for the 'query', returns an error Status.
- *
- * If there is an entry in the cache, populates 'entryOut' and returns Status::OK(). Caller
- * owns '*entryOut'.
- */
- Status getEntry(const CanonicalQuery& cq, PlanCacheEntry** entryOut) const;
-
- /**
- * Returns a vector of all cache entries.
- * Caller owns the result vector and is responsible for cleaning up
- * the cache entry copies.
- * Used by planCacheListQueryShapes and index_filter_commands_test.cpp.
- */
- std::vector<PlanCacheEntry*> getAllEntries() const;
-
- /**
- * Returns true if there is an entry in the cache for the 'query'.
- * Internally calls hasKey() on the LRU cache.
- */
- bool contains(const CanonicalQuery& cq) const;
-
- /**
- * Returns number of entries in cache.
- * Used for testing.
- */
- size_t size() const;
-
- /**
- * You must notify the cache if you are doing writes, as query plan utility will change.
- * Cache is flushed after every 1000 notifications.
- */
- void notifyOfWriteOp();
-
- /**
- * Updates internal state kept about the collection's indexes. Must be called when the set
- * of indexes on the associated collection have changed.
- *
- * Callers must hold the collection lock in exclusive mode when calling this method.
- */
- void notifyOfIndexEntries(const std::vector<IndexEntry>& indexEntries);
-
- private:
- void encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) const;
- void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const;
- void encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) const;
-
- LRUKeyValue<PlanCacheKey, PlanCacheEntry> _cache;
-
- // Protects _cache.
- mutable stdx::mutex _cacheMutex;
-
- // Counter for write notifications since initialization or last clear() invocation. Starts
- // at 0.
- AtomicInt32 _writeOperations;
-
- // Full namespace of collection.
- std::string _ns;
-
- // Holds computed information about the collection's indexes. Used for generating plan
- // cache keys.
- //
- // Concurrent access is synchronized by the collection lock. Multiple concurrent readers
- // are allowed.
- PlanCacheIndexabilityState _indexabilityState;
- };
+ void notifyOfIndexEntries(const std::vector<IndexEntry>& indexEntries);
+
+private:
+ void encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) const;
+ void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const;
+ void encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) const;
+
+ LRUKeyValue<PlanCacheKey, PlanCacheEntry> _cache;
+
+ // Protects _cache.
+ mutable stdx::mutex _cacheMutex;
+
+ // Counter for write notifications since initialization or last clear() invocation. Starts
+ // at 0.
+ AtomicInt32 _writeOperations;
+
+ // Full namespace of collection.
+ std::string _ns;
+
+ // Holds computed information about the collection's indexes. Used for generating plan
+ // cache keys.
+ //
+ // Concurrent access is synchronized by the collection lock. Multiple concurrent readers
+ // are allowed.
+ PlanCacheIndexabilityState _indexabilityState;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp
index 24e60bbd7b7..a6e0fa32e3f 100644
--- a/src/mongo/db/query/plan_cache_indexability.cpp
+++ b/src/mongo/db/query/plan_cache_indexability.cpp
@@ -41,67 +41,61 @@
namespace mongo {
- void PlanCacheIndexabilityState::processSparseIndex(const BSONObj& keyPattern) {
- for (BSONElement elem : keyPattern) {
- _pathDiscriminatorsMap[elem.fieldNameStringData()].push_back(
- [] (const MatchExpression* queryExpr) {
- if (queryExpr->matchType() == MatchExpression::EQ) {
- const auto* queryExprEquality =
- static_cast<const EqualityMatchExpression*>(queryExpr);
- return !queryExprEquality->getData().isNull();
- }
- else if (queryExpr->matchType() == MatchExpression::MATCH_IN) {
- const auto* queryExprIn =
- static_cast<const InMatchExpression*>(queryExpr);
- return !queryExprIn->getData().hasNull();
- }
- else {
- return true;
- }
+void PlanCacheIndexabilityState::processSparseIndex(const BSONObj& keyPattern) {
+ for (BSONElement elem : keyPattern) {
+ _pathDiscriminatorsMap[elem.fieldNameStringData()].push_back(
+ [](const MatchExpression* queryExpr) {
+ if (queryExpr->matchType() == MatchExpression::EQ) {
+ const auto* queryExprEquality =
+ static_cast<const EqualityMatchExpression*>(queryExpr);
+ return !queryExprEquality->getData().isNull();
+ } else if (queryExpr->matchType() == MatchExpression::MATCH_IN) {
+ const auto* queryExprIn = static_cast<const InMatchExpression*>(queryExpr);
+ return !queryExprIn->getData().hasNull();
+ } else {
+ return true;
}
- );
- }
+ });
}
+}
- void PlanCacheIndexabilityState::processPartialIndex(const MatchExpression* filterExpr) {
- invariant(filterExpr);
- for (size_t i = 0; i < filterExpr->numChildren(); ++i) {
- processPartialIndex(filterExpr->getChild(i));
- }
- if (!filterExpr->isLogical()) {
- _pathDiscriminatorsMap[filterExpr->path()].push_back(
- [filterExpr] (const MatchExpression* queryExpr) {
- return expression::isSubsetOf(queryExpr, filterExpr);
- }
- );
- }
+void PlanCacheIndexabilityState::processPartialIndex(const MatchExpression* filterExpr) {
+ invariant(filterExpr);
+ for (size_t i = 0; i < filterExpr->numChildren(); ++i) {
+ processPartialIndex(filterExpr->getChild(i));
}
+ if (!filterExpr->isLogical()) {
+ _pathDiscriminatorsMap[filterExpr->path()].push_back(
+ [filterExpr](const MatchExpression* queryExpr) {
+ return expression::isSubsetOf(queryExpr, filterExpr);
+ });
+ }
+}
namespace {
- const IndexabilityDiscriminators emptyDiscriminators;
+const IndexabilityDiscriminators emptyDiscriminators;
} // namespace
- const IndexabilityDiscriminators& PlanCacheIndexabilityState::getDiscriminators(
- StringData path) const {
- PathDiscriminatorsMap::const_iterator it = _pathDiscriminatorsMap.find(path);
- if (it == _pathDiscriminatorsMap.end()) {
- return emptyDiscriminators;
- }
- return it->second;
+const IndexabilityDiscriminators& PlanCacheIndexabilityState::getDiscriminators(
+ StringData path) const {
+ PathDiscriminatorsMap::const_iterator it = _pathDiscriminatorsMap.find(path);
+ if (it == _pathDiscriminatorsMap.end()) {
+ return emptyDiscriminators;
}
+ return it->second;
+}
- void PlanCacheIndexabilityState::updateDiscriminators(
- const std::vector<IndexEntry>& indexEntries) {
- _pathDiscriminatorsMap = PathDiscriminatorsMap();
+void PlanCacheIndexabilityState::updateDiscriminators(const std::vector<IndexEntry>& indexEntries) {
+ _pathDiscriminatorsMap = PathDiscriminatorsMap();
- for (const IndexEntry& idx : indexEntries) {
- if (idx.sparse) {
- processSparseIndex(idx.keyPattern);
- }
- if (idx.filterExpr) {
- processPartialIndex(idx.filterExpr);
- }
+ for (const IndexEntry& idx : indexEntries) {
+ if (idx.sparse) {
+ processSparseIndex(idx.keyPattern);
+ }
+ if (idx.filterExpr) {
+ processPartialIndex(idx.filterExpr);
}
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_indexability.h b/src/mongo/db/query/plan_cache_indexability.h
index 6d4d4b30012..03278b06929 100644
--- a/src/mongo/db/query/plan_cache_indexability.h
+++ b/src/mongo/db/query/plan_cache_indexability.h
@@ -36,64 +36,65 @@
namespace mongo {
- class BSONObj;
- class MatchExpression;
- struct IndexEntry;
+class BSONObj;
+class MatchExpression;
+struct IndexEntry;
- using IndexabilityDiscriminator = stdx::function<bool(const MatchExpression* me)>;
- using IndexabilityDiscriminators = std::vector<IndexabilityDiscriminator>;
+using IndexabilityDiscriminator = stdx::function<bool(const MatchExpression* me)>;
+using IndexabilityDiscriminators = std::vector<IndexabilityDiscriminator>;
+
+/**
+ * PlanCacheIndexabilityState holds a set of "indexability discriminators" for certain paths.
+ * An indexability discriminator is a binary predicate function, used to classify match
+ * expressions based on the data values in the expression.
+ */
+class PlanCacheIndexabilityState {
+ MONGO_DISALLOW_COPYING(PlanCacheIndexabilityState);
+
+public:
+ PlanCacheIndexabilityState() = default;
/**
- * PlanCacheIndexabilityState holds a set of "indexability discriminators" for certain paths.
- * An indexability discriminator is a binary predicate function, used to classify match
- * expressions based on the data values in the expression.
+ * Gets the set of discriminators associated with 'path'. Returns an empty set if no
+ * discriminators are registered for 'path'.
+ *
+ * The object returned by reference is valid until the next call to updateDiscriminators()
+ * or until destruction of 'this', whichever is first.
*/
- class PlanCacheIndexabilityState {
- MONGO_DISALLOW_COPYING(PlanCacheIndexabilityState);
- public:
- PlanCacheIndexabilityState() = default;
-
- /**
- * Gets the set of discriminators associated with 'path'. Returns an empty set if no
- * discriminators are registered for 'path'.
- *
- * The object returned by reference is valid until the next call to updateDiscriminators()
- * or until destruction of 'this', whichever is first.
- */
- const IndexabilityDiscriminators& getDiscriminators(StringData path) const;
+ const IndexabilityDiscriminators& getDiscriminators(StringData path) const;
- /**
- * Clears discriminators for all paths, and regenerate them from 'indexEntries'.
- */
- void updateDiscriminators(const std::vector<IndexEntry>& indexEntries);
+ /**
+ * Clears discriminators for all paths, and regenerate them from 'indexEntries'.
+ */
+ void updateDiscriminators(const std::vector<IndexEntry>& indexEntries);
- private:
- /**
- * Adds sparse index discriminators for the sparse index with the given key pattern to
- * '_pathDiscriminatorsMap'.
- *
- * A sparse index discriminator distinguishes equality matches to null from other expression
- * types. For example, this allows the predicate {a: 1} to be considered of a different
- * shape from the predicate {a: null}, if there is a sparse index defined with "a" as an
- * element of the key pattern. The former predicate is compatibile with this index, but the
- * latter is not compatible.
- */
- void processSparseIndex(const BSONObj& keyPattern);
+private:
+ /**
+ * Adds sparse index discriminators for the sparse index with the given key pattern to
+ * '_pathDiscriminatorsMap'.
+ *
+ * A sparse index discriminator distinguishes equality matches to null from other expression
+ * types. For example, this allows the predicate {a: 1} to be considered of a different
+ * shape from the predicate {a: null}, if there is a sparse index defined with "a" as an
+ * element of the key pattern. The former predicate is compatibile with this index, but the
+ * latter is not compatible.
+ */
+ void processSparseIndex(const BSONObj& keyPattern);
- /**
- * Adds partial index discriminators for the partial index with the given filter expression
- * to '_pathDiscriminatorsMap'.
- *
- * A partial index discriminator distinguishes expressions that match a given partial index
- * predicate from expressions that don't match the partial index predicate. For example,
- * this allows the predicate {a: {$gt: 5}} to be considered a different shape than the
- * predicate {a: {$gt: -5}}, if there is a partial index defined with document filter {a:
- * {$gt: 0}}. The former is compatible with this index, but the latter is not compatible.
- */
- void processPartialIndex(const MatchExpression* filterExpr);
+ /**
+ * Adds partial index discriminators for the partial index with the given filter expression
+ * to '_pathDiscriminatorsMap'.
+ *
+ * A partial index discriminator distinguishes expressions that match a given partial index
+ * predicate from expressions that don't match the partial index predicate. For example,
+ * this allows the predicate {a: {$gt: 5}} to be considered a different shape than the
+ * predicate {a: {$gt: -5}}, if there is a partial index defined with document filter {a:
+ * {$gt: 0}}. The former is compatible with this index, but the latter is not compatible.
+ */
+ void processPartialIndex(const MatchExpression* filterExpr);
- using PathDiscriminatorsMap = StringMap<IndexabilityDiscriminators>;
- PathDiscriminatorsMap _pathDiscriminatorsMap;
- };
+ using PathDiscriminatorsMap = StringMap<IndexabilityDiscriminators>;
+ PathDiscriminatorsMap _pathDiscriminatorsMap;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index 01abda9e525..a5a0ca38e89 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -34,181 +34,179 @@
namespace mongo {
namespace {
- std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString());
- }
- return std::unique_ptr<MatchExpression>(status.getValue());
+std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ if (!status.isOK()) {
+ FAIL(str::stream() << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString());
}
-
- // Test sparse index discriminators for a simple sparse index.
- TEST(PlanCacheIndexabilityTest, SparseIndexSimple) {
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- true, // sparse
- false, // unique
- "", // name
- nullptr, // filterExpr
- BSONObj())});
-
+ return std::unique_ptr<MatchExpression>(status.getValue());
+}
+
+// Test sparse index discriminators for a simple sparse index.
+TEST(PlanCacheIndexabilityTest, SparseIndexSimple) {
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "", // name
+ nullptr, // filterExpr
+ BSONObj())});
+
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a");
+ ASSERT_EQ(1U, discriminators.size());
+
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get()));
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(1)))).get()));
+ ASSERT_EQ(false,
+ disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(BSONNULL)))).get()));
+}
+
+// Test sparse index discriminators for a compound sparse index.
+TEST(PlanCacheIndexabilityTest, SparseIndexCompound) {
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1 << "b" << 1),
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "", // name
+ nullptr, // filterExpr
+ BSONObj())});
+
+ {
const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a");
ASSERT_EQ(1U, discriminators.size());
const IndexabilityDiscriminator& disc = discriminators[0];
ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get()));
ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get()));
- ASSERT_EQ(true,
- disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(1)))).get()));
- ASSERT_EQ(false,
- disc(parseMatchExpression(BSON("a" <<
- BSON("$in" << BSON_ARRAY(BSONNULL)))).get()));
}
- // Test sparse index discriminators for a compound sparse index.
- TEST(PlanCacheIndexabilityTest, SparseIndexCompound) {
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1 << "b" << 1),
- false, // multikey
- true, // sparse
- false, // unique
- "", // name
- nullptr, // filterExpr
- BSONObj())});
-
- {
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a");
- ASSERT_EQ(1U, discriminators.size());
-
- const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get()));
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get()));
- }
-
- {
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("b");
- ASSERT_EQ(1U, discriminators.size());
-
- const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("b" << 1)).get()));
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("b" << BSONNULL)).get()));
- }
- }
-
- // Test partial index discriminators for an index with a simple filter.
- TEST(PlanCacheIndexabilityTest, PartialIndexSimple) {
- BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
- std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr.get(),
- BSONObj())});
-
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
+ {
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("b");
ASSERT_EQ(1U, discriminators.size());
const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << BSON("$gt" << -5))).get()));
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << BSON("$gt" << 5))).get()));
-
- ASSERT(state.getDiscriminators("a").empty());
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("b" << 1)).get()));
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("b" << BSONNULL)).get()));
}
-
- // Test partial index discriminators for an index where the filter expression is an AND.
- TEST(PlanCacheIndexabilityTest, PartialIndexAnd) {
- BSONObj filterObj = BSON("f" << 1 << "g" << 1);
- std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr.get(),
- BSONObj())});
-
- {
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
- ASSERT_EQ(1U, discriminators.size());
-
- const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << 0)).get()));
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << 1)).get()));
- }
-
- {
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("g");
- ASSERT_EQ(1U, discriminators.size());
-
- const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("g" << 0)).get()));
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("g" << 1)).get()));
- }
-
- ASSERT(state.getDiscriminators("a").empty());
- }
-
- // Test partial index discriminators where there are multiple partial indexes.
- TEST(PlanCacheIndexabilityTest, MultiplePartialIndexes) {
- BSONObj filterObj1 = BSON("f" << 1);
- std::unique_ptr<MatchExpression> filterExpr1(parseMatchExpression(filterObj1));
-
- BSONObj filterObj2 = BSON("f" << 2);
- std::unique_ptr<MatchExpression> filterExpr2(parseMatchExpression(filterObj2));
-
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr1.get(),
- BSONObj()),
- IndexEntry(BSON("b" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr2.get(),
- BSONObj())});
-
+}
+
+// Test partial index discriminators for an index with a simple filter.
+TEST(PlanCacheIndexabilityTest, PartialIndexSimple) {
+ BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
+ std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr.get(),
+ BSONObj())});
+
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
+ ASSERT_EQ(1U, discriminators.size());
+
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << BSON("$gt" << -5))).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << BSON("$gt" << 5))).get()));
+
+ ASSERT(state.getDiscriminators("a").empty());
+}
+
+// Test partial index discriminators for an index where the filter expression is an AND.
+TEST(PlanCacheIndexabilityTest, PartialIndexAnd) {
+ BSONObj filterObj = BSON("f" << 1 << "g" << 1);
+ std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr.get(),
+ BSONObj())});
+
+ {
const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
- ASSERT_EQ(2U, discriminators.size());
-
- const IndexabilityDiscriminator& disc1 = discriminators[0];
- const IndexabilityDiscriminator& disc2 = discriminators[1];
-
- ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
- ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
+ ASSERT_EQ(1U, discriminators.size());
- ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 1)).get()),
- disc2(parseMatchExpression(BSON("f" << 1)).get()));
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << 0)).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << 1)).get()));
+ }
- ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 2)).get()),
- disc2(parseMatchExpression(BSON("f" << 2)).get()));
+ {
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("g");
+ ASSERT_EQ(1U, discriminators.size());
- ASSERT(state.getDiscriminators("a").empty());
- ASSERT(state.getDiscriminators("b").empty());
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("g" << 0)).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("g" << 1)).get()));
}
- // Test that no discriminators are generated for a regular index.
- TEST(PlanCacheIndexabilityTest, IndexNeitherSparseNorPartial) {
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- nullptr,
- BSONObj())});
- ASSERT(state.getDiscriminators("a").empty());
- }
+ ASSERT(state.getDiscriminators("a").empty());
+}
+
+// Test partial index discriminators where there are multiple partial indexes.
+TEST(PlanCacheIndexabilityTest, MultiplePartialIndexes) {
+ BSONObj filterObj1 = BSON("f" << 1);
+ std::unique_ptr<MatchExpression> filterExpr1(parseMatchExpression(filterObj1));
+
+ BSONObj filterObj2 = BSON("f" << 2);
+ std::unique_ptr<MatchExpression> filterExpr2(parseMatchExpression(filterObj2));
+
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr1.get(),
+ BSONObj()),
+ IndexEntry(BSON("b" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr2.get(),
+ BSONObj())});
+
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
+ ASSERT_EQ(2U, discriminators.size());
+
+ const IndexabilityDiscriminator& disc1 = discriminators[0];
+ const IndexabilityDiscriminator& disc2 = discriminators[1];
+
+ ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
+ ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
+
+ ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 1)).get()),
+ disc2(parseMatchExpression(BSON("f" << 1)).get()));
+
+ ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 2)).get()),
+ disc2(parseMatchExpression(BSON("f" << 2)).get()));
+
+ ASSERT(state.getDiscriminators("a").empty());
+ ASSERT(state.getDiscriminators("b").empty());
+}
+
+// Test that no discriminators are generated for a regular index.
+TEST(PlanCacheIndexabilityTest, IndexNeitherSparseNorPartial) {
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ nullptr,
+ BSONObj())});
+ ASSERT(state.getDiscriminators("a").empty());
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 78b9bdf959c..da15528d243 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -50,1169 +50,1218 @@ using namespace mongo;
namespace {
- using std::string;
- using std::unique_ptr;
- using std::vector;
+using std::string;
+using std::unique_ptr;
+using std::vector;
- static const char* ns = "somebogusns";
+static const char* ns = "somebogusns";
- /**
- * Utility functions to create a CanonicalQuery
- */
- CanonicalQuery* canonicalize(const BSONObj& queryObj) {
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- CanonicalQuery* canonicalize(const char* queryStr) {
- BSONObj queryObj = fromjson(queryStr);
- return canonicalize(queryObj);
- }
-
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr,
- long long skip, long long limit,
- const char* hintStr,
- const char* minStr, const char* maxStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- BSONObj hintObj = fromjson(hintStr);
- BSONObj minObj = fromjson(minStr);
- BSONObj maxObj = fromjson(maxStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- skip, limit,
- hintObj,
- minObj, maxObj,
- false, // snapshot
- false, // explain
- &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr,
- long long skip, long long limit,
- const char* hintStr,
- const char* minStr, const char* maxStr,
- bool snapshot,
- bool explain) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- BSONObj hintObj = fromjson(hintStr);
- BSONObj minObj = fromjson(minStr);
- BSONObj maxObj = fromjson(maxStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- skip, limit,
- hintObj,
- minObj, maxObj,
- snapshot,
- explain,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- /**
- * Utility function to create MatchExpression
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- if (!status.isOK()) {
- str::stream ss;
- ss << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString();
- FAIL(ss);
- }
- MatchExpression* expr(status.getValue());
- return expr;
- }
+/**
+ * Utility functions to create a CanonicalQuery
+ */
+CanonicalQuery* canonicalize(const BSONObj& queryObj) {
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ return canonicalize(queryObj);
+}
+
+CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr, const char* projStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ BSONObj hintObj = fromjson(hintStr);
+ BSONObj minObj = fromjson(minStr);
+ BSONObj maxObj = fromjson(maxStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns,
+ queryObj,
+ sortObj,
+ projObj,
+ skip,
+ limit,
+ hintObj,
+ minObj,
+ maxObj,
+ false, // snapshot
+ false, // explain
+ &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr,
+ bool snapshot,
+ bool explain) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ BSONObj hintObj = fromjson(hintStr);
+ BSONObj minObj = fromjson(minStr);
+ BSONObj maxObj = fromjson(maxStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns,
+ queryObj,
+ sortObj,
+ projObj,
+ skip,
+ limit,
+ hintObj,
+ minObj,
+ maxObj,
+ snapshot,
+ explain,
+ &cq);
+ ASSERT_OK(result);
+ return cq;
+}
- void assertEquivalent(const char* queryStr, const MatchExpression* expected, const MatchExpression* actual) {
- if (actual->equivalent(expected)) {
- return;
- }
+/**
+ * Utility function to create MatchExpression
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ if (!status.isOK()) {
str::stream ss;
- ss << "Match expressions are not equivalent."
- << "\nOriginal query: " << queryStr
- << "\nExpected: " << expected->toString()
- << "\nActual: " << actual->toString();
+ ss << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString();
FAIL(ss);
}
-
- //
- // Tests for CachedSolution
- //
-
- /**
- * Generator for vector of QuerySolution shared pointers.
- */
- struct GenerateQuerySolution {
- QuerySolution* operator()() const {
- unique_ptr<QuerySolution> qs(new QuerySolution());
- qs->cacheData.reset(new SolutionCacheData());
- qs->cacheData->solnType = SolutionCacheData::COLLSCAN_SOLN;
- qs->cacheData->tree.reset(new PlanCacheIndexTree());
- return qs.release();
- }
- };
-
- /**
- * Utility function to create a PlanRankingDecision
- */
- PlanRankingDecision* createDecision(size_t numPlans) {
- unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
- for (size_t i = 0; i < numPlans; ++i) {
- CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
- stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
- why->scores.push_back(0U);
- why->candidateOrder.push_back(i);
- }
- return why.release();
+ MatchExpression* expr(status.getValue());
+ return expr;
+}
+
+void assertEquivalent(const char* queryStr,
+ const MatchExpression* expected,
+ const MatchExpression* actual) {
+ if (actual->equivalent(expected)) {
+ return;
}
+ str::stream ss;
+ ss << "Match expressions are not equivalent."
+ << "\nOriginal query: " << queryStr << "\nExpected: " << expected->toString()
+ << "\nActual: " << actual->toString();
+ FAIL(ss);
+}
- /**
- * Test functions for shouldCacheQuery
- * Use these functions to assert which categories
- * of canonicalized queries are suitable for inclusion
- * in the planner cache.
- */
- void assertShouldCacheQuery(const CanonicalQuery& query) {
- if (PlanCache::shouldCacheQuery(query)) {
- return;
- }
- str::stream ss;
- ss << "Canonical query should be cacheable: " << query.toString();
- FAIL(ss);
- }
+//
+// Tests for CachedSolution
+//
- void assertShouldNotCacheQuery(const CanonicalQuery& query) {
- if (!PlanCache::shouldCacheQuery(query)) {
- return;
- }
- str::stream ss;
- ss << "Canonical query should not be cacheable: " << query.toString();
- FAIL(ss);
- }
-
- void assertShouldNotCacheQuery(const BSONObj& query) {
- unique_ptr<CanonicalQuery> cq(canonicalize(query));
- assertShouldNotCacheQuery(*cq);
+/**
+ * Generator for vector of QuerySolution shared pointers.
+ */
+struct GenerateQuerySolution {
+ QuerySolution* operator()() const {
+ unique_ptr<QuerySolution> qs(new QuerySolution());
+ qs->cacheData.reset(new SolutionCacheData());
+ qs->cacheData->solnType = SolutionCacheData::COLLSCAN_SOLN;
+ qs->cacheData->tree.reset(new PlanCacheIndexTree());
+ return qs.release();
}
+};
- void assertShouldNotCacheQuery(const char* queryStr) {
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
- assertShouldNotCacheQuery(*cq);
+/**
+ * Utility function to create a PlanRankingDecision
+ */
+PlanRankingDecision* createDecision(size_t numPlans) {
+ unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
+ for (size_t i = 0; i < numPlans; ++i) {
+ CommonStats common("COLLSCAN");
+ unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ stats->specific.reset(new CollectionScanStats());
+ why->stats.mutableVector().push_back(stats.release());
+ why->scores.push_back(0U);
+ why->candidateOrder.push_back(i);
}
+ return why.release();
+}
- /**
- * Cacheable queries
- * These queries will be added to the cache with run-time statistics
- * and can be managed with the cache DB commands.
- */
-
- TEST(PlanCacheTest, ShouldCacheQueryBasic) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- assertShouldCacheQuery(*cq);
+/**
+ * Test functions for shouldCacheQuery
+ * Use these functions to assert which categories
+ * of canonicalized queries are suitable for inclusion
+ * in the planner cache.
+ */
+void assertShouldCacheQuery(const CanonicalQuery& query) {
+ if (PlanCache::shouldCacheQuery(query)) {
+ return;
}
-
- TEST(PlanCacheTest, ShouldCacheQuerySort) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}"));
- assertShouldCacheQuery(*cq);
+ str::stream ss;
+ ss << "Canonical query should be cacheable: " << query.toString();
+ FAIL(ss);
+}
+
+void assertShouldNotCacheQuery(const CanonicalQuery& query) {
+ if (!PlanCache::shouldCacheQuery(query)) {
+ return;
}
+ str::stream ss;
+ ss << "Canonical query should not be cacheable: " << query.toString();
+ FAIL(ss);
+}
- /*
- * Non-cacheable queries.
- * These queries will be sent through the planning process everytime.
- */
-
- /**
- * Collection scan
- * This should normally be handled by the IDHack runner.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryCollectionScan) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{}"));
- assertShouldNotCacheQuery(*cq);
- }
+void assertShouldNotCacheQuery(const BSONObj& query) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(query));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * Hint
- * A hinted query implies strong user preference for a particular index.
- * Therefore, not much point in caching.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithHint) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{a: 1, b: 1}",
- "{}", "{}"));
- assertShouldNotCacheQuery(*cq);
- }
+void assertShouldNotCacheQuery(const char* queryStr) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * Min queries are a specialized case of hinted queries
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithMin) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}",
- "{a: 100}", "{}"));
- assertShouldNotCacheQuery(*cq);
- }
+/**
+ * Cacheable queries
+ * These queries will be added to the cache with run-time statistics
+ * and can be managed with the cache DB commands.
+ */
- /**
- * Max queries are non-cacheable for the same reasons as min queries.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithMax) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}",
- "{}", "{a: 100}"));
- assertShouldNotCacheQuery(*cq);
- }
+TEST(PlanCacheTest, ShouldCacheQueryBasic) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ assertShouldCacheQuery(*cq);
+}
- /**
- * $geoWithin queries with legacy coordinates are cacheable as long as
- * the planner is able to come up with a cacheable solution.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
- assertShouldCacheQuery(*cq);
- }
+TEST(PlanCacheTest, ShouldCacheQuerySort) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}"));
+ assertShouldCacheQuery(*cq);
+}
- /**
- * $geoWithin queries with GeoJSON coordinates are supported by the index bounds builder.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
- assertShouldCacheQuery(*cq);
- }
+/*
+ * Non-cacheable queries.
+ * These queries will be sent through the planning process everytime.
+ */
- /**
- * $geoWithin queries with both legacy and GeoJSON coordinates are cacheable.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
- "coordinates: [[[0, 0], [0, 90], "
- "[90, 0], [0, 0]]]}}}},"
- "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
- assertShouldCacheQuery(*cq);
- }
+/**
+ * Collection scan
+ * This should normally be handled by the IDHack runner.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryCollectionScan) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{}"));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * $geoIntersects queries are always cacheable because they support GeoJSON coordinates only.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoIntersects: "
- "{$geometry: {type: 'Point', coordinates: "
- "[10.0, 10.0]}}}}"));
- assertShouldCacheQuery(*cq);
- }
+/**
+ * Hint
+ * A hinted query implies strong user preference for a particular index.
+ * Therefore, not much point in caching.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithHint) {
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: 1}", "{}", "{}", 0, 0, "{a: 1, b: 1}", "{}", "{}"));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * $geoNear queries are cacheable because we are able to distinguish
- * between flat and spherical queries.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoNear: {$geometry: {type: 'Point',"
- "coordinates: [0,0]}, $maxDistance:100}}}"));
- assertShouldCacheQuery(*cq);
- }
+/**
+ * Min queries are a specialized case of hinted queries
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithMin) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}", "{a: 100}", "{}"));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * Explain queries are not-cacheable because of allPlans cannot
- * be accurately generated from stale cached stats in the plan cache for
- * non-winning plans.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryExplain) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}",
- "{}", "{}", // min, max
- false, // snapshot
- true // explain
- ));
- const LiteParsedQuery& pq = cq->getParsed();
- ASSERT_TRUE(pq.isExplain());
- assertShouldNotCacheQuery(*cq);
- }
+/**
+ * Max queries are non-cacheable for the same reasons as min queries.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithMax) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}", "{}", "{a: 100}"));
+ assertShouldNotCacheQuery(*cq);
+}
- // Adding an empty vector of query solutions should fail.
- TEST(PlanCacheTest, AddEmptySolutions) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- std::vector<QuerySolution*> solns;
- std::unique_ptr<PlanRankingDecision> decision(createDecision(1U));
- ASSERT_NOT_OK(planCache.add(*cq, solns, decision.get()));
- }
+/**
+ * $geoWithin queries with legacy coordinates are cacheable as long as
+ * the planner is able to come up with a cacheable solution.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
+ assertShouldCacheQuery(*cq);
+}
- TEST(PlanCacheTest, AddValidSolution) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- QuerySolution qs;
- qs.cacheData.reset(new SolutionCacheData());
- qs.cacheData->tree.reset(new PlanCacheIndexTree());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
+/**
+ * $geoWithin queries with GeoJSON coordinates are supported by the index bounds builder.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ assertShouldCacheQuery(*cq);
+}
- // Check if key is in cache before and after add().
- ASSERT_FALSE(planCache.contains(*cq));
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+/**
+ * $geoWithin queries with both legacy and GeoJSON coordinates are cacheable.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
+ "coordinates: [[[0, 0], [0, 90], "
+ "[90, 0], [0, 0]]]}}}},"
+ "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
+ assertShouldCacheQuery(*cq);
+}
- ASSERT_TRUE(planCache.contains(*cq));
- ASSERT_EQUALS(planCache.size(), 1U);
- }
+/**
+ * $geoIntersects queries are always cacheable because they support GeoJSON coordinates only.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoIntersects: "
+ "{$geometry: {type: 'Point', coordinates: "
+ "[10.0, 10.0]}}}}"));
+ assertShouldCacheQuery(*cq);
+}
- TEST(PlanCacheTest, NotifyOfWriteOp) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- QuerySolution qs;
- qs.cacheData.reset(new SolutionCacheData());
- qs.cacheData->tree.reset(new PlanCacheIndexTree());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
- ASSERT_EQUALS(planCache.size(), 1U);
-
- // First (N - 1) write ops should have no effect on cache contents.
- for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
- planCache.notifyOfWriteOp();
- }
- ASSERT_EQUALS(planCache.size(), 1U);
+/**
+ * $geoNear queries are cacheable because we are able to distinguish
+ * between flat and spherical queries.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoNear: {$geometry: {type: 'Point',"
+ "coordinates: [0,0]}, $maxDistance:100}}}"));
+ assertShouldCacheQuery(*cq);
+}
- // N-th notification will cause cache to be cleared.
- planCache.notifyOfWriteOp();
- ASSERT_EQUALS(planCache.size(), 0U);
-
- // Clearing the cache should reset the internal write
- // operation counter.
- // Repopulate cache. Write (N - 1) times.
- // Clear cache.
- // Add cache entry again.
- // After clearing and adding a new entry, the next write operation should not
- // clear the cache.
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
- for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
- planCache.notifyOfWriteOp();
- }
- ASSERT_EQUALS(planCache.size(), 1U);
- planCache.clear();
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
- // Notification after clearing will not flush cache.
+/**
+ * Explain queries are not-cacheable because of allPlans cannot
+ * be accurately generated from stale cached stats in the plan cache for
+ * non-winning plans.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryExplain) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}",
+ "{}",
+ "{}",
+ 0,
+ 0,
+ "{}",
+ "{}",
+ "{}", // min, max
+ false, // snapshot
+ true // explain
+ ));
+ const LiteParsedQuery& pq = cq->getParsed();
+ ASSERT_TRUE(pq.isExplain());
+ assertShouldNotCacheQuery(*cq);
+}
+
+// Adding an empty vector of query solutions should fail.
+TEST(PlanCacheTest, AddEmptySolutions) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ std::vector<QuerySolution*> solns;
+ std::unique_ptr<PlanRankingDecision> decision(createDecision(1U));
+ ASSERT_NOT_OK(planCache.add(*cq, solns, decision.get()));
+}
+
+TEST(PlanCacheTest, AddValidSolution) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ QuerySolution qs;
+ qs.cacheData.reset(new SolutionCacheData());
+ qs.cacheData->tree.reset(new PlanCacheIndexTree());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+
+ // Check if key is in cache before and after add().
+ ASSERT_FALSE(planCache.contains(*cq));
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+
+ ASSERT_TRUE(planCache.contains(*cq));
+ ASSERT_EQUALS(planCache.size(), 1U);
+}
+
+TEST(PlanCacheTest, NotifyOfWriteOp) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ QuerySolution qs;
+ qs.cacheData.reset(new SolutionCacheData());
+ qs.cacheData->tree.reset(new PlanCacheIndexTree());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+ ASSERT_EQUALS(planCache.size(), 1U);
+
+ // First (N - 1) write ops should have no effect on cache contents.
+ for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
planCache.notifyOfWriteOp();
- ASSERT_EQUALS(planCache.size(), 1U);
}
-
- /**
- * Each test in the CachePlanSelectionTest suite goes through
- * the following flow:
- *
- * 1) Run QueryPlanner::plan on the query, with specified indices
- * available. This simulates the case in which we failed to plan from
- * the plan cache, and fell back on selecting a plan ourselves. The
- * enumerator will run, and cache data will be stashed into each solution
- * that it generates.
- *
- * 2) Use firstMatchingSolution to select one of the solutions generated
- * by QueryPlanner::plan. This simulates the multi plan runner picking
- * the "best solution".
- *
- * 3) The cache data stashed inside the "best solution" is used to
- * make a CachedSolution which looks exactly like the data structure that
- * would be returned from the cache. This simulates a plan cache hit.
- *
- * 4) Call QueryPlanner::planFromCache, passing it the CachedSolution.
- * This exercises the code which is able to map from a CachedSolution to
- * a full-blown QuerySolution. Finally, assert that the query solution
- * recovered from the cache is identical to the original "best solution".
- */
- class CachePlanSelectionTest : public mongo::unittest::Test {
- protected:
- void setUp() {
- cq = NULL;
- params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
- addIndex(BSON("_id" << 1));
- }
-
- void tearDown() {
- delete cq;
-
- for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
- delete *it;
- }
- }
-
- void addIndex(BSONObj keyPattern, bool multikey = false) {
- // The first false means not multikey.
- // The second false means not sparse.
- // The third arg is the index name and I am egotistical.
- // The NULL means no filter expression.
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- false,
- false,
- "hari_king_of_the_stove",
- NULL,
- BSONObj()));
- }
-
- void addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- sparse,
- false,
- "note_to_self_dont_break_build",
- NULL,
- BSONObj()));
- }
-
- //
- // Execute planner.
- //
-
- void runQuery(BSONObj query) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
- }
-
- void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj) {
- runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
- }
-
- void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
- }
-
- void runQueryHint(const BSONObj& query, const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
- }
-
- void runQuerySortProjSkipLimit(const BSONObj& query,
- const BSONObj& sort, const BSONObj& proj,
- long long skip, long long limit) {
- runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
- }
-
- void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
- }
-
- void runQueryHintMinMax(const BSONObj& query, const BSONObj& hint,
- const BSONObj& minObj, const BSONObj& maxObj) {
-
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
- }
-
- void runQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort, const BSONObj& proj,
- long long skip, long long limit,
- const BSONObj& hint) {
- runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
- }
-
- void runQuerySnapshot(const BSONObj& query) {
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(),
- BSONObj(), true);
- }
-
- void runQueryFull(const BSONObj& query,
- const BSONObj& sort, const BSONObj& proj,
- long long skip, long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot) {
-
- // Clean up any previous state from a call to runQueryFull
- delete cq;
- cq = NULL;
-
- for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
- delete *it;
- }
-
- solns.clear();
-
-
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, skip, limit, hint,
- minObj, maxObj, snapshot,
- false, // explain
- &cq);
- if (!s.isOK()) { cq = NULL; }
- ASSERT_OK(s);
- s = QueryPlanner::plan(*cq, params, &solns);
- ASSERT_OK(s);
- }
-
- //
- // Solution introspection.
- //
-
- void dumpSolutions(str::stream& ost) const {
- for (vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- ost << (*it)->toString() << '\n';
- }
- }
-
- /**
- * Returns number of generated solutions matching JSON.
- */
- size_t numSolutionMatches(const string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- size_t matches = 0;
- for (vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolutionNode* root = (*it)->root.get();
- if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
- ++matches;
- }
- }
- return matches;
- }
-
- /**
- * Verifies that the solution tree represented in json by 'solnJson' is
- * one of the solutions generated by QueryPlanner.
- *
- * The number of expected matches, 'numMatches', could be greater than
- * 1 if solutions differ only by the pattern of index tags on a filter.
- */
- void assertSolutionExists(const string& solnJson, size_t numMatches = 1) const {
- size_t matches = numSolutionMatches(solnJson);
- if (numMatches == matches) {
- return;
- }
- str::stream ss;
- ss << "expected " << numMatches << " matches for solution " << solnJson
- << " but got " << matches
- << " instead. all solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
- }
-
- /**
- * Plan 'query' from the cache. A mock cache entry is created using
- * the cacheData stored inside the QuerySolution 'soln'.
- *
- * Does not take ownership of 'soln'.
- */
- QuerySolution* planQueryFromCache(const BSONObj& query, const QuerySolution& soln) const {
- return planQueryFromCache(query, BSONObj(), BSONObj(), soln);
- }
-
- /**
- * Plan 'query' from the cache with sort order 'sort' and
- * projection 'proj'. A mock cache entry is created using
- * the cacheData stored inside the QuerySolution 'soln'.
- *
- * Does not take ownership of 'soln'.
- */
- QuerySolution* planQueryFromCache(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- const QuerySolution& soln) const {
- CanonicalQuery* cq;
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, &cq);
- ASSERT_OK(s);
- unique_ptr<CanonicalQuery> scopedCq(cq);
- cq = NULL;
-
- // Create a CachedSolution the long way..
- // QuerySolution -> PlanCacheEntry -> CachedSolution
- QuerySolution qs;
- qs.cacheData.reset(soln.cacheData->clone());
- std::vector<QuerySolution*> solutions;
- solutions.push_back(&qs);
- PlanCacheEntry entry(solutions, createDecision(1U));
- CachedSolution cachedSoln(ck, entry);
-
- QuerySolution *out;
- s = QueryPlanner::planFromCache(*scopedCq.get(), params, cachedSoln, &out);
- ASSERT_OK(s);
-
- return out;
- }
-
- /**
- * @param solnJson -- a json representation of a query solution.
- *
- * Returns the first solution matching 'solnJson', or fails if
- * no match is found.
- */
- QuerySolution* firstMatchingSolution(const string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- for (vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolutionNode* root = (*it)->root.get();
- if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
- return *it;
- }
- }
-
- str::stream ss;
- ss << "Could not find a match for solution " << solnJson
- << " All solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
-
- return NULL;
- }
-
- /**
- * Assert that the QuerySolution 'trueSoln' matches the JSON-based representation
- * of the solution in 'solnJson'.
- *
- * Relies on solutionMatches() -- see query_planner_test_lib.h
- */
- void assertSolutionMatches(QuerySolution* trueSoln, const string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- if (!QueryPlannerTestLib::solutionMatches(testSoln, trueSoln->root.get())) {
- str::stream ss;
- ss << "Expected solution " << solnJson << " did not match true solution: "
- << trueSoln->toString() << '\n';
- FAIL(ss);
- }
- }
-
- /**
- * Overloaded so that it is not necessary to specificy sort and project.
- */
- void assertPlanCacheRecoversSolution(const BSONObj& query, const string& solnJson) {
- assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), solnJson);
- }
-
- /**
- * First, the solution matching 'solnJson' is retrieved from the vector
- * of solutions generated by QueryPlanner::plan. This solution is
- * then passed into planQueryFromCache(). Asserts that the solution
- * generated by QueryPlanner::planFromCache matches 'solnJson'.
- *
- * Must be called after calling one of the runQuery* methods.
- *
- * Together, 'query', 'sort', and 'proj' should specify the query which
- * was previously run using one of the runQuery* methods.
- */
- void assertPlanCacheRecoversSolution(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- const string& solnJson) {
- QuerySolution* bestSoln = firstMatchingSolution(solnJson);
- QuerySolution* planSoln = planQueryFromCache(query, sort, proj, *bestSoln);
- assertSolutionMatches(planSoln, solnJson);
- delete planSoln;
- }
-
- /**
- * Check that the solution will not be cached. The planner will store
- * cache data inside non-cachable solutions, but will not do so for
- * non-cachable solutions. Therefore, we just have to check that
- * cache data is NULL.
- */
- void assertNotCached(const string& solnJson) {
- QuerySolution* bestSoln = firstMatchingSolution(solnJson);
- ASSERT(NULL != bestSoln);
- ASSERT(NULL == bestSoln->cacheData.get());
- }
-
- static const PlanCacheKey ck;
-
- BSONObj queryObj;
- CanonicalQuery* cq;
- QueryPlannerParams params;
- vector<QuerySolution*> solns;
- };
-
- const PlanCacheKey CachePlanSelectionTest::ck = "mock_cache_key";
-
- //
- // Equality
- //
-
- TEST_F(CachePlanSelectionTest, EqualityIndexScan) {
- addIndex(BSON("x" << 1));
- runQuery(BSON("x" << 5));
-
- assertPlanCacheRecoversSolution(BSON("x" << 5),
- "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(CachePlanSelectionTest, EqualityIndexScanWithTrailingFields) {
- addIndex(BSON("x" << 1 << "y" << 1));
- runQuery(BSON("x" << 5));
-
- assertPlanCacheRecoversSolution(BSON("x" << 5),
- "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
+ ASSERT_EQUALS(planCache.size(), 1U);
+
+ // N-th notification will cause cache to be cleared.
+ planCache.notifyOfWriteOp();
+ ASSERT_EQUALS(planCache.size(), 0U);
+
+ // Clearing the cache should reset the internal write
+ // operation counter.
+ // Repopulate cache. Write (N - 1) times.
+ // Clear cache.
+ // Add cache entry again.
+ // After clearing and adding a new entry, the next write operation should not
+ // clear the cache.
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+ for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
+ planCache.notifyOfWriteOp();
}
+ ASSERT_EQUALS(planCache.size(), 1U);
+ planCache.clear();
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+ // Notification after clearing will not flush cache.
+ planCache.notifyOfWriteOp();
+ ASSERT_EQUALS(planCache.size(), 1U);
+}
- //
- // Geo
- //
-
- TEST_F(CachePlanSelectionTest, Basic2DSphereNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- BSONObj query;
-
- query = fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- query = fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+/**
+ * Each test in the CachePlanSelectionTest suite goes through
+ * the following flow:
+ *
+ * 1) Run QueryPlanner::plan on the query, with specified indices
+ * available. This simulates the case in which we failed to plan from
+ * the plan cache, and fell back on selecting a plan ourselves. The
+ * enumerator will run, and cache data will be stashed into each solution
+ * that it generates.
+ *
+ * 2) Use firstMatchingSolution to select one of the solutions generated
+ * by QueryPlanner::plan. This simulates the multi plan runner picking
+ * the "best solution".
+ *
+ * 3) The cache data stashed inside the "best solution" is used to
+ * make a CachedSolution which looks exactly like the data structure that
+ * would be returned from the cache. This simulates a plan cache hit.
+ *
+ * 4) Call QueryPlanner::planFromCache, passing it the CachedSolution.
+ * This exercises the code which is able to map from a CachedSolution to
+ * a full-blown QuerySolution. Finally, assert that the query solution
+ * recovered from the cache is identical to the original "best solution".
+ */
+class CachePlanSelectionTest : public mongo::unittest::Test {
+protected:
+ void setUp() {
+ cq = NULL;
+ params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
+ addIndex(BSON("_id" << 1));
}
- TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNear) {
- addIndex(BSON("a" << "2dsphere"));
- BSONObj query;
+ void tearDown() {
+ delete cq;
- query = fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
-
- query = fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
- }
-
- TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNearReverseCompound) {
- addIndex(BSON("x" << 1));
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- BSONObj query = fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+ for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
+ delete *it;
+ }
}
- TEST_F(CachePlanSelectionTest, TwoDSphereNoGeoPred) {
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- runQuery(BSON("x" << 1));
- assertPlanCacheRecoversSolution(BSON("x" << 1),
- "{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+ void addIndex(BSONObj keyPattern, bool multikey = false) {
+ // The first false means not multikey.
+ // The second false means not sparse.
+ // The third arg is the index name and I am egotistical.
+ // The NULL means no filter expression.
+ params.indices.push_back(IndexEntry(
+ keyPattern, multikey, false, false, "hari_king_of_the_stove", NULL, BSONObj()));
}
- TEST_F(CachePlanSelectionTest, Or2DSphereNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- addIndex(BSON("b" << "2dsphere"));
- BSONObj query = fromjson("{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
- " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}");
-
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+ void addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
+ params.indices.push_back(IndexEntry(
+ keyPattern, multikey, sparse, false, "note_to_self_dont_break_build", NULL, BSONObj()));
}
//
- // tree operations
+ // Execute planner.
//
- TEST_F(CachePlanSelectionTest, TwoPredicatesAnding) {
- addIndex(BSON("x" << 1));
- BSONObj query = fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
+ void runQuery(BSONObj query) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
}
- TEST_F(CachePlanSelectionTest, SimpleOr) {
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{$or: [{a: 20}, {a: 21}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {a:1}}}}}");
+ void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj) {
+ runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
}
- TEST_F(CachePlanSelectionTest, OrWithAndChild) {
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1}}}, "
- "{fetch: {filter: {b: 7}, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}]}}}}");
+ void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
}
- TEST_F(CachePlanSelectionTest, AndWithUnindexedOrChild) {
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{a:20, $or: [{b:1}, {c:7}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ void runQueryHint(const BSONObj& query, const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
}
-
- TEST_F(CachePlanSelectionTest, AndWithOrWithOneIndex) {
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{$or: [{b:1}, {c:7}], a:20}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ void runQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit) {
+ runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
}
- //
- // Sort orders
- //
-
- // SERVER-1205.
- TEST_F(CachePlanSelectionTest, MergeSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
- BSONObj sort = BSON("c" << 1);
- runQuerySortProj(query, sort, BSONObj());
-
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
+ void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
}
- // SERVER-1205 as well.
- TEST_F(CachePlanSelectionTest, NoMergeSortIfNoSortWanted) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
- runQuerySortProj(query, BSONObj(), BSONObj());
-
- assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(),
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
- "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
- }
-
- // Disabled: SERVER-10801.
- /*
- TEST_F(CachePlanSelectionTest, SortOnGeoQuery) {
- addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
- "coordinates: [[[1, 1], [1, 90], [180, 90], "
- "[180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
- runQuerySortProj(query, sort, BSONObj());
-
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
- }
- */
-
- // SERVER-9257
- TEST_F(CachePlanSelectionTest, CompoundGeoNoGeoPredicate) {
- addIndex(BSON("creationDate" << 1 << "foo.bar" << "2dsphere"));
- BSONObj query = fromjson("{creationDate: {$gt: 7}}");
- BSONObj sort = fromjson("{creationDate: 1}");
- runQuerySortProj(query, sort, BSONObj());
-
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
- }
-
- TEST_F(CachePlanSelectionTest, ReverseScanForSort) {
- addIndex(BSON("_id" << 1));
- runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
- assertPlanCacheRecoversSolution(BSONObj(), fromjson("{_id: -1}"), BSONObj(),
- "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {_id: 1}}}}}");
+ void runQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
}
- //
- // Caching collection scans.
- //
-
- TEST_F(CachePlanSelectionTest, CollscanNoUsefulIndices) {
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("c" << 1));
- runQuery(BSON("b" << 4));
- assertPlanCacheRecoversSolution(BSON("b" << 4),
- "{cscan: {filter: {b: 4}, dir: 1}}");
+ void runQuerySortProjSkipLimitHint(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint) {
+ runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
}
- TEST_F(CachePlanSelectionTest, CollscanOrWithoutEnoughIndices) {
- addIndex(BSON("a" << 1));
- BSONObj query =fromjson("{$or: [{a: 20}, {b: 21}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{cscan: {filter: {$or:[{a:20},{b:21}]}, dir: 1}}");
+ void runQuerySnapshot(const BSONObj& query) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(), BSONObj(), true);
}
- TEST_F(CachePlanSelectionTest, CollscanMergeSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
- BSONObj sort = BSON("c" << 1);
- runQuerySortProj(query, sort, BSONObj());
+ void runQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot) {
+ // Clean up any previous state from a call to runQueryFull
+ delete cq;
+ cq = NULL;
+
+ for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
+ delete *it;
+ }
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ solns.clear();
+
+
+ Status s = CanonicalQuery::canonicalize(ns,
+ query,
+ sort,
+ proj,
+ skip,
+ limit,
+ hint,
+ minObj,
+ maxObj,
+ snapshot,
+ false, // explain
+ &cq);
+ if (!s.isOK()) {
+ cq = NULL;
+ }
+ ASSERT_OK(s);
+ s = QueryPlanner::plan(*cq, params, &solns);
+ ASSERT_OK(s);
}
//
- // Check queries that, at least for now, are not cached.
+ // Solution introspection.
//
- TEST_F(CachePlanSelectionTest, GeoNear2DNotCached) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
- assertNotCached("{geoNear2d: {a: '2d'}}");
- }
-
- TEST_F(CachePlanSelectionTest, MinNotCached) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
- assertNotCached("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ void dumpSolutions(str::stream& ost) const {
+ for (vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ ost << (*it)->toString() << '\n';
+ }
}
- TEST_F(CachePlanSelectionTest, MaxNotCached) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
- assertNotCached("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ /**
+ * Returns number of generated solutions matching JSON.
+ */
+ size_t numSolutionMatches(const string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ size_t matches = 0;
+ for (vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolutionNode* root = (*it)->root.get();
+ if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
+ ++matches;
+ }
+ }
+ return matches;
}
- TEST_F(CachePlanSelectionTest, NaturalHintNotCached) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
- assertNotCached("{sort: {pattern: {b: 1}, limit: 0, node: "
- "{cscan: {filter: {a: 1}, dir: 1}}}}");
+ /**
+ * Verifies that the solution tree represented in json by 'solnJson' is
+ * one of the solutions generated by QueryPlanner.
+ *
+ * The number of expected matches, 'numMatches', could be greater than
+ * 1 if solutions differ only by the pattern of index tags on a filter.
+ */
+ void assertSolutionExists(const string& solnJson, size_t numMatches = 1) const {
+ size_t matches = numSolutionMatches(solnJson);
+ if (numMatches == matches) {
+ return;
+ }
+ str::stream ss;
+ ss << "expected " << numMatches << " matches for solution " << solnJson << " but got "
+ << matches << " instead. all solutions generated: " << '\n';
+ dumpSolutions(ss);
+ FAIL(ss);
}
- TEST_F(CachePlanSelectionTest, HintValidNotCached) {
- addIndex(BSON("a" << 1));
- runQueryHint(BSONObj(), fromjson("{a: 1}"));
- assertNotCached("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ /**
+ * Plan 'query' from the cache. A mock cache entry is created using
+ * the cacheData stored inside the QuerySolution 'soln'.
+ *
+ * Does not take ownership of 'soln'.
+ */
+ QuerySolution* planQueryFromCache(const BSONObj& query, const QuerySolution& soln) const {
+ return planQueryFromCache(query, BSONObj(), BSONObj(), soln);
}
- //
- // Queries using '2d' indices are not cached.
- //
+ /**
+ * Plan 'query' from the cache with sort order 'sort' and
+ * projection 'proj'. A mock cache entry is created using
+ * the cacheData stored inside the QuerySolution 'soln'.
+ *
+ * Does not take ownership of 'soln'.
+ */
+ QuerySolution* planQueryFromCache(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ const QuerySolution& soln) const {
+ CanonicalQuery* cq;
+ Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, &cq);
+ ASSERT_OK(s);
+ unique_ptr<CanonicalQuery> scopedCq(cq);
+ cq = NULL;
- TEST_F(CachePlanSelectionTest, Basic2DNonNearNotCached) {
- addIndex(BSON("a" << "2d"));
- BSONObj query;
-
- // Polygon
- query = fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Center
- query = fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Centersphere
- query = fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Within box.
- query = fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
- }
+ // Create a CachedSolution the long way..
+ // QuerySolution -> PlanCacheEntry -> CachedSolution
+ QuerySolution qs;
+ qs.cacheData.reset(soln.cacheData->clone());
+ std::vector<QuerySolution*> solutions;
+ solutions.push_back(&qs);
+ PlanCacheEntry entry(solutions, createDecision(1U));
+ CachedSolution cachedSoln(ck, entry);
- TEST_F(CachePlanSelectionTest, Or2DNonNearNotCached) {
- addIndex(BSON("a" << "2d"));
- addIndex(BSON("b" << "2d"));
- BSONObj query = fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}");
+ QuerySolution* out;
+ s = QueryPlanner::planFromCache(*scopedCq.get(), params, cachedSoln, &out);
+ ASSERT_OK(s);
- runQuery(query);
- assertNotCached("{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
+ return out;
}
/**
- * Test functions for computeKey. Cache keys are intentionally obfuscated and are
- * meaningful only within the current lifetime of the server process. Users should treat plan
- * cache keys as opaque.
+ * @param solnJson -- a json representation of a query solution.
+ *
+ * Returns the first solution matching 'solnJson', or fails if
+ * no match is found.
*/
- void testComputeKey(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- const char *expectedStr) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr));
- PlanCacheKey key = planCache.computeKey(*cq);
- PlanCacheKey expectedKey(expectedStr);
- if (key == expectedKey) {
- return;
+ QuerySolution* firstMatchingSolution(const string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ for (vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolutionNode* root = (*it)->root.get();
+ if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
+ return *it;
+ }
}
+
str::stream ss;
- ss << "Unexpected plan cache key. Expected: " << expectedKey << ". Actual: " << key
- << ". Query: " << cq->toString();
+ ss << "Could not find a match for solution " << solnJson
+ << " All solutions generated: " << '\n';
+ dumpSolutions(ss);
FAIL(ss);
- }
- TEST(PlanCacheTest, ComputeKey) {
- // Generated cache keys should be treated as opaque to the user.
-
- // No sorts
- testComputeKey("{}", "{}", "{}", "an");
- testComputeKey("{$or: [{a: 1}, {b: 2}]}", "{}", "{}", "or[eqa,eqb]");
- testComputeKey("{$or: [{a: 1}, {b: 1}, {c: 1}], d: 1}", "{}", "{}",
- "an[or[eqa,eqb,eqc],eqd]");
- testComputeKey("{$or: [{a: 1}, {b: 1}], c: 1, d: 1}", "{}", "{}",
- "an[or[eqa,eqb],eqc,eqd]");
- testComputeKey("{a: 1, b: 1, c: 1}", "{}", "{}", "an[eqa,eqb,eqc]");
- testComputeKey("{a: 1, beqc: 1}", "{}", "{}", "an[eqa,eqbeqc]");
- testComputeKey("{ap1a: 1}", "{}", "{}", "eqap1a");
- testComputeKey("{aab: 1}", "{}", "{}", "eqaab");
-
- // With sort
- testComputeKey("{}", "{a: 1}", "{}", "an~aa");
- testComputeKey("{}", "{a: -1}", "{}", "an~da");
- testComputeKey("{}", "{a: {$meta: 'textScore'}}", "{a: {$meta: 'textScore'}}",
- "an~ta|{ $meta: \"textScore\" }a");
- testComputeKey("{a: 1}", "{b: 1}", "{}", "eqa~ab");
-
- // With projection
- testComputeKey("{}", "{}", "{a: 1}", "an|ia");
- testComputeKey("{}", "{}", "{a: -1}", "an|ia");
- testComputeKey("{}", "{}", "{a: -1.0}", "an|ia");
- testComputeKey("{}", "{}", "{a: true}", "an|ia");
- testComputeKey("{}", "{}", "{a: 0}", "an|ea");
- testComputeKey("{}", "{}", "{a: false}", "an|ea");
- testComputeKey("{}", "{}", "{a: 99}", "an|ia");
- testComputeKey("{}", "{}", "{a: 'foo'}", "an|ia");
- testComputeKey("{}", "{}", "{a: {$slice: [3, 5]}}", "an|{ $slice: \\[ 3\\, 5 \\] }a");
- testComputeKey("{}", "{}", "{a: {$elemMatch: {x: 2}}}",
- "an|{ $elemMatch: { x: 2 } }a");
- testComputeKey("{}", "{}", "{a: ObjectId('507f191e810c19729de860ea')}",
- "an|ia");
- testComputeKey("{a: 1}", "{}", "{'a.$': 1}", "eqa|ia.$");
- testComputeKey("{a: 1}", "{}", "{a: 1}", "eqa|ia");
-
- // Projection should be order-insensitive
- testComputeKey("{}", "{}", "{a: 1, b: 1}", "an|iaib");
- testComputeKey("{}", "{}", "{b: 1, a: 1}", "an|iaib");
-
- // With or-elimination and projection
- testComputeKey("{$or: [{a: 1}]}", "{}", "{_id: 0, a: 1}", "eqa|e_idia");
- testComputeKey("{$or: [{a: 1}]}", "{}", "{'a.$': 1}", "eqa|ia.$");
+ return NULL;
}
- // Delimiters found in user field names or non-standard projection field values
- // must be escaped.
- TEST(PlanCacheTest, ComputeKeyEscaped) {
- // Field name in query.
- testComputeKey("{'a,[]~|<>': 1}", "{}", "{}", "eqa\\,\\[\\]\\~\\|\\<\\>");
-
- // Field name in sort.
- testComputeKey("{}", "{'a,[]~|<>': 1}", "{}", "an~aa\\,\\[\\]\\~\\|\\<\\>");
-
- // Field name in projection.
- testComputeKey("{}", "{}", "{'a,[]~|<>': 1}", "an|ia\\,\\[\\]\\~\\|\\<\\>");
-
- // Value in projection.
- testComputeKey("{}", "{}", "{a: 'foo,[]~|<>'}", "an|ia");
+ /**
+ * Assert that the QuerySolution 'trueSoln' matches the JSON-based representation
+ * of the solution in 'solnJson'.
+ *
+ * Relies on solutionMatches() -- see query_planner_test_lib.h
+ */
+ void assertSolutionMatches(QuerySolution* trueSoln, const string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ if (!QueryPlannerTestLib::solutionMatches(testSoln, trueSoln->root.get())) {
+ str::stream ss;
+ ss << "Expected solution " << solnJson
+ << " did not match true solution: " << trueSoln->toString() << '\n';
+ FAIL(ss);
+ }
}
- // Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should
- // not be the same.
- TEST(PlanCacheTest, ComputeKeyGeoWithin) {
- PlanCache planCache;
-
- // Legacy coordinates.
- unique_ptr<CanonicalQuery> cqLegacy(canonicalize("{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
- // GeoJSON coordinates.
- unique_ptr<CanonicalQuery> cqNew(canonicalize("{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqLegacy),
- planCache.computeKey(*cqNew));
+ /**
+ * Overloaded so that it is not necessary to specificy sort and project.
+ */
+ void assertPlanCacheRecoversSolution(const BSONObj& query, const string& solnJson) {
+ assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), solnJson);
}
- // GEO_NEAR cache keys should include information on geometry and CRS in addition
- // to the match type and field name.
- TEST(PlanCacheTest, ComputeKeyGeoNear) {
- testComputeKey("{a: {$near: [0,0], $maxDistance:0.3 }}", "{}", "{}", "gnanrfl");
- testComputeKey("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}", "{}", "{}", "gnanssp");
- testComputeKey("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}", "{}", "{}", "gnanrsp");
+ /**
+ * First, the solution matching 'solnJson' is retrieved from the vector
+ * of solutions generated by QueryPlanner::plan. This solution is
+ * then passed into planQueryFromCache(). Asserts that the solution
+ * generated by QueryPlanner::planFromCache matches 'solnJson'.
+ *
+ * Must be called after calling one of the runQuery* methods.
+ *
+ * Together, 'query', 'sort', and 'proj' should specify the query which
+ * was previously run using one of the runQuery* methods.
+ */
+ void assertPlanCacheRecoversSolution(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ const string& solnJson) {
+ QuerySolution* bestSoln = firstMatchingSolution(solnJson);
+ QuerySolution* planSoln = planQueryFromCache(query, sort, proj, *bestSoln);
+ assertSolutionMatches(planSoln, solnJson);
+ delete planSoln;
}
- // When a sparse index is present, computeKey() should generate different keys depending on
- // whether or not the predicates in the given query can use the index.
- TEST(PlanCacheTest, ComputeKeySparseIndex) {
- PlanCache planCache;
- planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
- false, // multikey
- true, // sparse
- false, // unique
- "", // name
- nullptr, // filterExpr
- BSONObj())});
-
- unique_ptr<CanonicalQuery> cqEqNumber(canonicalize("{a: 0}}"));
- unique_ptr<CanonicalQuery> cqEqString(canonicalize("{a: 'x'}}"));
- unique_ptr<CanonicalQuery> cqEqNull(canonicalize("{a: null}}"));
-
- // 'cqEqNumber' and 'cqEqString' get the same key, since both are compatible with this
- // index.
- ASSERT_EQ(planCache.computeKey(*cqEqNumber), planCache.computeKey(*cqEqString));
-
- // 'cqEqNull' gets a different key, since it is not compatible with this index.
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqEqNull), planCache.computeKey(*cqEqNumber));
+ /**
+ * Check that the solution will not be cached. The planner will store
+ * cache data inside non-cachable solutions, but will not do so for
+ * non-cachable solutions. Therefore, we just have to check that
+ * cache data is NULL.
+ */
+ void assertNotCached(const string& solnJson) {
+ QuerySolution* bestSoln = firstMatchingSolution(solnJson);
+ ASSERT(NULL != bestSoln);
+ ASSERT(NULL == bestSoln->cacheData.get());
}
- // When a partial index is present, computeKey() should generate different keys depending on
- // whether or not the predicates in the given query "match" the predicates in the partial index
- // filter.
- TEST(PlanCacheTest, ComputeKeyPartialIndex) {
- BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
- unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
-
- PlanCache planCache;
- planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr.get(),
- BSONObj())});
-
- unique_ptr<CanonicalQuery> cqGtNegativeFive(canonicalize("{f: {$gt: -5}}"));
- unique_ptr<CanonicalQuery> cqGtZero(canonicalize("{f: {$gt: 0}}"));
- unique_ptr<CanonicalQuery> cqGtFive(canonicalize("{f: {$gt: 5}}"));
-
- // 'cqGtZero' and 'cqGtFive' get the same key, since both are compatible with this index.
- ASSERT_EQ(planCache.computeKey(*cqGtZero), planCache.computeKey(*cqGtFive));
-
- // 'cqGtNegativeFive' gets a different key, since it is not compatible with this index.
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqGtNegativeFive), planCache.computeKey(*cqGtZero));
+ static const PlanCacheKey ck;
+
+ BSONObj queryObj;
+ CanonicalQuery* cq;
+ QueryPlannerParams params;
+ vector<QuerySolution*> solns;
+};
+
+const PlanCacheKey CachePlanSelectionTest::ck = "mock_cache_key";
+
+//
+// Equality
+//
+
+TEST_F(CachePlanSelectionTest, EqualityIndexScan) {
+ addIndex(BSON("x" << 1));
+ runQuery(BSON("x" << 5));
+
+ assertPlanCacheRecoversSolution(BSON("x" << 5),
+ "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, EqualityIndexScanWithTrailingFields) {
+ addIndex(BSON("x" << 1 << "y" << 1));
+ runQuery(BSON("x" << 5));
+
+ assertPlanCacheRecoversSolution(
+ BSON("x" << 5), "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
+}
+
+//
+// Geo
+//
+
+TEST_F(CachePlanSelectionTest, Basic2DSphereNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ BSONObj query;
+
+ query = fromjson(
+ "{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ query = fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ BSONObj query;
+
+ query = fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
+
+ query = fromjson(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
+}
+
+TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNearReverseCompound) {
+ addIndex(BSON("x" << 1));
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ BSONObj query = fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+}
+
+TEST_F(CachePlanSelectionTest, TwoDSphereNoGeoPred) {
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ runQuery(BSON("x" << 1));
+ assertPlanCacheRecoversSolution(BSON("x" << 1),
+ "{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, Or2DSphereNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ addIndex(BSON("b"
+ << "2dsphere"));
+ BSONObj query = fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
+ " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}");
+
+ runQuery(query);
+ assertPlanCacheRecoversSolution(
+ query,
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+}
+
+//
+// tree operations
+//
+
+TEST_F(CachePlanSelectionTest, TwoPredicatesAnding) {
+ addIndex(BSON("x" << 1));
+ BSONObj query = fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(
+ query, "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, SimpleOr) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{a: 20}, {a: 21}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(
+ query, "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {a:1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, OrWithAndChild) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query,
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1}}}, "
+ "{fetch: {filter: {b: 7}, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}]}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, AndWithUnindexedOrChild) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{a:20, $or: [{b:1}, {c:7}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query,
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+
+TEST_F(CachePlanSelectionTest, AndWithOrWithOneIndex) {
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{b:1}, {c:7}], a:20}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query,
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+//
+// Sort orders
+//
+
+// SERVER-1205.
+TEST_F(CachePlanSelectionTest, MergeSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
+ BSONObj sort = BSON("c" << 1);
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(
+ query,
+ sort,
+ BSONObj(),
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// SERVER-1205 as well.
+TEST_F(CachePlanSelectionTest, NoMergeSortIfNoSortWanted) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
+ runQuerySortProj(query, BSONObj(), BSONObj());
+
+ assertPlanCacheRecoversSolution(query,
+ BSONObj(),
+ BSONObj(),
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
+ "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// Disabled: SERVER-10801.
+/*
+TEST_F(CachePlanSelectionTest, SortOnGeoQuery) {
+ addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
+ "coordinates: [[[1, 1], [1, 90], [180, 90], "
+ "[180, 1], [1, 1]]]}}}}");
+ BSONObj sort = fromjson("{timestamp: -1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(query, sort, BSONObj(),
+ "{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
+}
+*/
+
+// SERVER-9257
+TEST_F(CachePlanSelectionTest, CompoundGeoNoGeoPredicate) {
+ addIndex(BSON("creationDate" << 1 << "foo.bar"
+ << "2dsphere"));
+ BSONObj query = fromjson("{creationDate: {$gt: 7}}");
+ BSONObj sort = fromjson("{creationDate: 1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(
+ query,
+ sort,
+ BSONObj(),
+ "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, ReverseScanForSort) {
+ addIndex(BSON("_id" << 1));
+ runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
+ assertPlanCacheRecoversSolution(
+ BSONObj(),
+ fromjson("{_id: -1}"),
+ BSONObj(),
+ "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {_id: 1}}}}}");
+}
+
+//
+// Caching collection scans.
+//
+
+TEST_F(CachePlanSelectionTest, CollscanNoUsefulIndices) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("c" << 1));
+ runQuery(BSON("b" << 4));
+ assertPlanCacheRecoversSolution(BSON("b" << 4), "{cscan: {filter: {b: 4}, dir: 1}}");
+}
+
+TEST_F(CachePlanSelectionTest, CollscanOrWithoutEnoughIndices) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{a: 20}, {b: 21}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{cscan: {filter: {$or:[{a:20},{b:21}]}, dir: 1}}");
+}
+
+TEST_F(CachePlanSelectionTest, CollscanMergeSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
+ BSONObj sort = BSON("c" << 1);
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(
+ query, sort, BSONObj(), "{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+}
+
+//
+// Check queries that, at least for now, are not cached.
+//
+
+TEST_F(CachePlanSelectionTest, GeoNear2DNotCached) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
+ assertNotCached("{geoNear2d: {a: '2d'}}");
+}
+
+TEST_F(CachePlanSelectionTest, MinNotCached) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
+ assertNotCached(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, MaxNotCached) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
+ assertNotCached(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, NaturalHintNotCached) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
+ assertNotCached(
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{cscan: {filter: {a: 1}, dir: 1}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, HintValidNotCached) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(BSONObj(), fromjson("{a: 1}"));
+ assertNotCached(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+//
+// Queries using '2d' indices are not cached.
+//
+
+TEST_F(CachePlanSelectionTest, Basic2DNonNearNotCached) {
+ addIndex(BSON("a"
+ << "2d"));
+ BSONObj query;
+
+ // Polygon
+ query = fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Center
+ query = fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Centersphere
+ query = fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Within box.
+ query = fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, Or2DNonNearNotCached) {
+ addIndex(BSON("a"
+ << "2d"));
+ addIndex(BSON("b"
+ << "2d"));
+ BSONObj query = fromjson(
+ "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}");
+
+ runQuery(query);
+ assertNotCached(
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
+}
+
+/**
+ * Test functions for computeKey. Cache keys are intentionally obfuscated and are
+ * meaningful only within the current lifetime of the server process. Users should treat plan
+ * cache keys as opaque.
+ */
+void testComputeKey(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ const char* expectedStr) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr));
+ PlanCacheKey key = planCache.computeKey(*cq);
+ PlanCacheKey expectedKey(expectedStr);
+ if (key == expectedKey) {
+ return;
}
+ str::stream ss;
+ ss << "Unexpected plan cache key. Expected: " << expectedKey << ". Actual: " << key
+ << ". Query: " << cq->toString();
+ FAIL(ss);
+}
+
+TEST(PlanCacheTest, ComputeKey) {
+ // Generated cache keys should be treated as opaque to the user.
+
+ // No sorts
+ testComputeKey("{}", "{}", "{}", "an");
+ testComputeKey("{$or: [{a: 1}, {b: 2}]}", "{}", "{}", "or[eqa,eqb]");
+ testComputeKey("{$or: [{a: 1}, {b: 1}, {c: 1}], d: 1}", "{}", "{}", "an[or[eqa,eqb,eqc],eqd]");
+ testComputeKey("{$or: [{a: 1}, {b: 1}], c: 1, d: 1}", "{}", "{}", "an[or[eqa,eqb],eqc,eqd]");
+ testComputeKey("{a: 1, b: 1, c: 1}", "{}", "{}", "an[eqa,eqb,eqc]");
+ testComputeKey("{a: 1, beqc: 1}", "{}", "{}", "an[eqa,eqbeqc]");
+ testComputeKey("{ap1a: 1}", "{}", "{}", "eqap1a");
+ testComputeKey("{aab: 1}", "{}", "{}", "eqaab");
+
+ // With sort
+ testComputeKey("{}", "{a: 1}", "{}", "an~aa");
+ testComputeKey("{}", "{a: -1}", "{}", "an~da");
+ testComputeKey("{}",
+ "{a: {$meta: 'textScore'}}",
+ "{a: {$meta: 'textScore'}}",
+ "an~ta|{ $meta: \"textScore\" }a");
+ testComputeKey("{a: 1}", "{b: 1}", "{}", "eqa~ab");
+
+ // With projection
+ testComputeKey("{}", "{}", "{a: 1}", "an|ia");
+ testComputeKey("{}", "{}", "{a: -1}", "an|ia");
+ testComputeKey("{}", "{}", "{a: -1.0}", "an|ia");
+ testComputeKey("{}", "{}", "{a: true}", "an|ia");
+ testComputeKey("{}", "{}", "{a: 0}", "an|ea");
+ testComputeKey("{}", "{}", "{a: false}", "an|ea");
+ testComputeKey("{}", "{}", "{a: 99}", "an|ia");
+ testComputeKey("{}", "{}", "{a: 'foo'}", "an|ia");
+ testComputeKey("{}", "{}", "{a: {$slice: [3, 5]}}", "an|{ $slice: \\[ 3\\, 5 \\] }a");
+ testComputeKey("{}", "{}", "{a: {$elemMatch: {x: 2}}}", "an|{ $elemMatch: { x: 2 } }a");
+ testComputeKey("{}", "{}", "{a: ObjectId('507f191e810c19729de860ea')}", "an|ia");
+ testComputeKey("{a: 1}", "{}", "{'a.$': 1}", "eqa|ia.$");
+ testComputeKey("{a: 1}", "{}", "{a: 1}", "eqa|ia");
+
+ // Projection should be order-insensitive
+ testComputeKey("{}", "{}", "{a: 1, b: 1}", "an|iaib");
+ testComputeKey("{}", "{}", "{b: 1, a: 1}", "an|iaib");
+
+ // With or-elimination and projection
+ testComputeKey("{$or: [{a: 1}]}", "{}", "{_id: 0, a: 1}", "eqa|e_idia");
+ testComputeKey("{$or: [{a: 1}]}", "{}", "{'a.$': 1}", "eqa|ia.$");
+}
+
+// Delimiters found in user field names or non-standard projection field values
+// must be escaped.
+TEST(PlanCacheTest, ComputeKeyEscaped) {
+ // Field name in query.
+ testComputeKey("{'a,[]~|<>': 1}", "{}", "{}", "eqa\\,\\[\\]\\~\\|\\<\\>");
+
+ // Field name in sort.
+ testComputeKey("{}", "{'a,[]~|<>': 1}", "{}", "an~aa\\,\\[\\]\\~\\|\\<\\>");
+
+ // Field name in projection.
+ testComputeKey("{}", "{}", "{'a,[]~|<>': 1}", "an|ia\\,\\[\\]\\~\\|\\<\\>");
+
+ // Value in projection.
+ testComputeKey("{}", "{}", "{a: 'foo,[]~|<>'}", "an|ia");
+}
+
+// Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should
+// not be the same.
+TEST(PlanCacheTest, ComputeKeyGeoWithin) {
+ PlanCache planCache;
+
+ // Legacy coordinates.
+ unique_ptr<CanonicalQuery> cqLegacy(canonicalize(
+ "{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
+ // GeoJSON coordinates.
+ unique_ptr<CanonicalQuery> cqNew(canonicalize(
+ "{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ ASSERT_NOT_EQUALS(planCache.computeKey(*cqLegacy), planCache.computeKey(*cqNew));
+}
+
+// GEO_NEAR cache keys should include information on geometry and CRS in addition
+// to the match type and field name.
+TEST(PlanCacheTest, ComputeKeyGeoNear) {
+ testComputeKey("{a: {$near: [0,0], $maxDistance:0.3 }}", "{}", "{}", "gnanrfl");
+ testComputeKey("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}", "{}", "{}", "gnanssp");
+ testComputeKey(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}",
+ "{}",
+ "{}",
+ "gnanrsp");
+}
+
+// When a sparse index is present, computeKey() should generate different keys depending on
+// whether or not the predicates in the given query can use the index.
+TEST(PlanCacheTest, ComputeKeySparseIndex) {
+ PlanCache planCache;
+ planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "", // name
+ nullptr, // filterExpr
+ BSONObj())});
+
+ unique_ptr<CanonicalQuery> cqEqNumber(canonicalize("{a: 0}}"));
+ unique_ptr<CanonicalQuery> cqEqString(canonicalize("{a: 'x'}}"));
+ unique_ptr<CanonicalQuery> cqEqNull(canonicalize("{a: null}}"));
+
+ // 'cqEqNumber' and 'cqEqString' get the same key, since both are compatible with this
+ // index.
+ ASSERT_EQ(planCache.computeKey(*cqEqNumber), planCache.computeKey(*cqEqString));
+
+ // 'cqEqNull' gets a different key, since it is not compatible with this index.
+ ASSERT_NOT_EQUALS(planCache.computeKey(*cqEqNull), planCache.computeKey(*cqEqNumber));
+}
+
+// When a partial index is present, computeKey() should generate different keys depending on
+// whether or not the predicates in the given query "match" the predicates in the partial index
+// filter.
+TEST(PlanCacheTest, ComputeKeyPartialIndex) {
+ BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
+ unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+
+ PlanCache planCache;
+ planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr.get(),
+ BSONObj())});
+
+ unique_ptr<CanonicalQuery> cqGtNegativeFive(canonicalize("{f: {$gt: -5}}"));
+ unique_ptr<CanonicalQuery> cqGtZero(canonicalize("{f: {$gt: 0}}"));
+ unique_ptr<CanonicalQuery> cqGtFive(canonicalize("{f: {$gt: 5}}"));
+
+ // 'cqGtZero' and 'cqGtFive' get the same key, since both are compatible with this index.
+ ASSERT_EQ(planCache.computeKey(*cqGtZero), planCache.computeKey(*cqGtFive));
+
+ // 'cqGtNegativeFive' gets a different key, since it is not compatible with this index.
+ ASSERT_NOT_EQUALS(planCache.computeKey(*cqGtNegativeFive), planCache.computeKey(*cqGtZero));
+}
} // namespace
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index ef1f23997d0..20049790c0b 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -38,1229 +38,1206 @@
namespace {
- using namespace mongo;
- using std::unique_ptr;
- using std::endl;
- using std::set;
- using std::string;
- using std::vector;
-
- std::string getPathPrefix(std::string path) {
- if (mongoutils::str::contains(path, '.')) {
- return mongoutils::str::before(path, '.');
- }
- else {
- return path;
- }
+using namespace mongo;
+using std::unique_ptr;
+using std::endl;
+using std::set;
+using std::string;
+using std::vector;
+
+std::string getPathPrefix(std::string path) {
+ if (mongoutils::str::contains(path, '.')) {
+ return mongoutils::str::before(path, '.');
+ } else {
+ return path;
}
+}
- /**
- * Returns true if either 'node' or a descendent of 'node'
- * is a predicate that is required to use an index.
- */
- bool expressionRequiresIndex(const MatchExpression* node) {
- return CanonicalQuery::countNodes(node, MatchExpression::GEO_NEAR) > 0
- || CanonicalQuery::countNodes(node, MatchExpression::TEXT) > 0;
- }
+/**
+ * Returns true if either 'node' or a descendent of 'node'
+ * is a predicate that is required to use an index.
+ */
+bool expressionRequiresIndex(const MatchExpression* node) {
+ return CanonicalQuery::countNodes(node, MatchExpression::GEO_NEAR) > 0 ||
+ CanonicalQuery::countNodes(node, MatchExpression::TEXT) > 0;
+}
-} // namespace
+} // namespace
namespace mongo {
- PlanEnumerator::PlanEnumerator(const PlanEnumeratorParams& params)
- : _root(params.root),
- _indices(params.indices),
- _ixisect(params.intersect),
- _orLimit(params.maxSolutionsPerOr),
- _intersectLimit(params.maxIntersectPerAnd) { }
-
- PlanEnumerator::~PlanEnumerator() {
- typedef unordered_map<MemoID, NodeAssignment*> MemoMap;
- for (MemoMap::iterator it = _memo.begin(); it != _memo.end(); ++it) {
- delete it->second;
- }
+PlanEnumerator::PlanEnumerator(const PlanEnumeratorParams& params)
+ : _root(params.root),
+ _indices(params.indices),
+ _ixisect(params.intersect),
+ _orLimit(params.maxSolutionsPerOr),
+ _intersectLimit(params.maxIntersectPerAnd) {}
+
+PlanEnumerator::~PlanEnumerator() {
+ typedef unordered_map<MemoID, NodeAssignment*> MemoMap;
+ for (MemoMap::iterator it = _memo.begin(); it != _memo.end(); ++it) {
+ delete it->second;
}
+}
+
+Status PlanEnumerator::init() {
+ // Fill out our memo structure from the tagged _root.
+ _done = !prepMemo(_root, PrepMemoContext());
+
+ // Dump the tags. We replace them with IndexTag instances.
+ _root->resetTag();
- Status PlanEnumerator::init() {
- // Fill out our memo structure from the tagged _root.
- _done = !prepMemo(_root, PrepMemoContext());
+ return Status::OK();
+}
- // Dump the tags. We replace them with IndexTag instances.
- _root->resetTag();
+std::string PlanEnumerator::dumpMemo() {
+ mongoutils::str::stream ss;
- return Status::OK();
+ // Note that this needs to be kept in sync with allocateAssignment which assigns memo IDs.
+ for (size_t i = 1; i < _memo.size(); ++i) {
+ ss << "[Node #" << i << "]: " << _memo[i]->toString() << "\n";
}
+ return ss;
+}
- std::string PlanEnumerator::dumpMemo() {
+string PlanEnumerator::NodeAssignment::toString() const {
+ if (NULL != pred) {
mongoutils::str::stream ss;
-
- // Note that this needs to be kept in sync with allocateAssignment which assigns memo IDs.
- for (size_t i = 1; i < _memo.size(); ++i) {
- ss << "[Node #" << i << "]: " << _memo[i]->toString() << "\n";
+ ss << "predicate\n";
+ ss << "\tfirst indices: [";
+ for (size_t i = 0; i < pred->first.size(); ++i) {
+ ss << pred->first[i];
+ if (i < pred->first.size() - 1)
+ ss << ", ";
}
+ ss << "]\n";
+ ss << "\tpred: " << pred->expr->toString();
+ ss << "\tindexToAssign: " << pred->indexToAssign;
return ss;
- }
-
- string PlanEnumerator::NodeAssignment::toString() const {
- if (NULL != pred) {
- mongoutils::str::stream ss;
- ss << "predicate\n";
- ss << "\tfirst indices: [";
- for (size_t i = 0; i < pred->first.size(); ++i) {
- ss << pred->first[i];
- if (i < pred->first.size() - 1)
- ss << ", ";
+ } else if (NULL != andAssignment) {
+ mongoutils::str::stream ss;
+ ss << "AND enumstate counter " << andAssignment->counter;
+ for (size_t i = 0; i < andAssignment->choices.size(); ++i) {
+ ss << "\n\tchoice " << i << ":\n";
+ const AndEnumerableState& state = andAssignment->choices[i];
+ ss << "\t\tsubnodes: ";
+ for (size_t j = 0; j < state.subnodesToIndex.size(); ++j) {
+ ss << state.subnodesToIndex[j] << " ";
}
- ss << "]\n";
- ss << "\tpred: " << pred->expr->toString();
- ss << "\tindexToAssign: " << pred->indexToAssign;
- return ss;
- }
- else if (NULL != andAssignment) {
- mongoutils::str::stream ss;
- ss << "AND enumstate counter " << andAssignment->counter;
- for (size_t i = 0; i < andAssignment->choices.size(); ++i) {
- ss << "\n\tchoice " << i << ":\n";
- const AndEnumerableState& state = andAssignment->choices[i];
- ss << "\t\tsubnodes: ";
- for (size_t j = 0; j < state.subnodesToIndex.size(); ++j) {
- ss << state.subnodesToIndex[j] << " ";
- }
- ss << '\n';
- for (size_t j = 0; j < state.assignments.size(); ++j) {
- const OneIndexAssignment& oie = state.assignments[j];
- ss << "\t\tidx[" << oie.index << "]\n";
-
- for (size_t k = 0; k < oie.preds.size(); ++k) {
- ss << "\t\t\tpos " << oie.positions[k]
- << " pred " << oie.preds[k]->toString();
- }
+ ss << '\n';
+ for (size_t j = 0; j < state.assignments.size(); ++j) {
+ const OneIndexAssignment& oie = state.assignments[j];
+ ss << "\t\tidx[" << oie.index << "]\n";
+
+ for (size_t k = 0; k < oie.preds.size(); ++k) {
+ ss << "\t\t\tpos " << oie.positions[k] << " pred " << oie.preds[k]->toString();
}
}
- return ss;
}
- else if (NULL != arrayAssignment) {
- mongoutils::str::stream ss;
- ss << "ARRAY SUBNODES enumstate " << arrayAssignment->counter << "/ ONE OF: [ ";
- for (size_t i = 0; i < arrayAssignment->subnodes.size(); ++i) {
- ss << arrayAssignment->subnodes[i] << " ";
- }
- ss << "]";
- return ss;
+ return ss;
+ } else if (NULL != arrayAssignment) {
+ mongoutils::str::stream ss;
+ ss << "ARRAY SUBNODES enumstate " << arrayAssignment->counter << "/ ONE OF: [ ";
+ for (size_t i = 0; i < arrayAssignment->subnodes.size(); ++i) {
+ ss << arrayAssignment->subnodes[i] << " ";
}
- else {
- verify(NULL != orAssignment);
- mongoutils::str::stream ss;
- ss << "ALL OF: [ ";
- for (size_t i = 0; i < orAssignment->subnodes.size(); ++i) {
- ss << orAssignment->subnodes[i] << " ";
- }
- ss << "]";
- return ss;
+ ss << "]";
+ return ss;
+ } else {
+ verify(NULL != orAssignment);
+ mongoutils::str::stream ss;
+ ss << "ALL OF: [ ";
+ for (size_t i = 0; i < orAssignment->subnodes.size(); ++i) {
+ ss << orAssignment->subnodes[i] << " ";
}
+ ss << "]";
+ return ss;
}
+}
- PlanEnumerator::MemoID PlanEnumerator::memoIDForNode(MatchExpression* node) {
- unordered_map<MatchExpression*, MemoID>::iterator it = _nodeToId.find(node);
+PlanEnumerator::MemoID PlanEnumerator::memoIDForNode(MatchExpression* node) {
+ unordered_map<MatchExpression*, MemoID>::iterator it = _nodeToId.find(node);
- if (_nodeToId.end() == it) {
- error() << "Trying to look up memo entry for node, none found.";
- invariant(0);
- }
-
- return it->second;
+ if (_nodeToId.end() == it) {
+ error() << "Trying to look up memo entry for node, none found.";
+ invariant(0);
}
- bool PlanEnumerator::getNext(MatchExpression** tree) {
- if (_done) { return false; }
-
- // Tag with our first solution.
- tagMemo(memoIDForNode(_root));
+ return it->second;
+}
- *tree = _root->shallowClone();
- tagForSort(*tree);
- sortUsingTags(*tree);
-
- _root->resetTag();
- LOG(5) << "Enumerator: memo just before moving:" << endl << dumpMemo();
- _done = nextMemo(memoIDForNode(_root));
- return true;
+bool PlanEnumerator::getNext(MatchExpression** tree) {
+ if (_done) {
+ return false;
}
- //
- // Structure creation
- //
+ // Tag with our first solution.
+ tagMemo(memoIDForNode(_root));
+
+ *tree = _root->shallowClone();
+ tagForSort(*tree);
+ sortUsingTags(*tree);
+
+ _root->resetTag();
+ LOG(5) << "Enumerator: memo just before moving:" << endl
+ << dumpMemo();
+ _done = nextMemo(memoIDForNode(_root));
+ return true;
+}
+
+//
+// Structure creation
+//
+
+void PlanEnumerator::allocateAssignment(MatchExpression* expr,
+ NodeAssignment** assign,
+ MemoID* id) {
+ // We start at 1 so that the lookup of any entries not explicitly allocated
+ // will refer to an invalid memo slot.
+ size_t newID = _memo.size() + 1;
+
+ // Shouldn't be anything there already.
+ verify(_nodeToId.end() == _nodeToId.find(expr));
+ _nodeToId[expr] = newID;
+ verify(_memo.end() == _memo.find(newID));
+ NodeAssignment* newAssignment = new NodeAssignment();
+ _memo[newID] = newAssignment;
+ *assign = newAssignment;
+ *id = newID;
+}
+
+bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) {
+ PrepMemoContext childContext;
+ childContext.elemMatchExpr = context.elemMatchExpr;
+ if (Indexability::nodeCanUseIndexOnOwnField(node)) {
+ // We only get here if our parent is an OR, an array operator, or we're the root.
+
+ // If we have no index tag there are no indices we can use.
+ if (NULL == node->getTag()) {
+ return false;
+ }
- void PlanEnumerator::allocateAssignment(MatchExpression* expr,
- NodeAssignment** assign,
- MemoID* id) {
- // We start at 1 so that the lookup of any entries not explicitly allocated
- // will refer to an invalid memo slot.
- size_t newID = _memo.size() + 1;
-
- // Shouldn't be anything there already.
- verify(_nodeToId.end() == _nodeToId.find(expr));
- _nodeToId[expr] = newID;
- verify(_memo.end() == _memo.find(newID));
- NodeAssignment* newAssignment = new NodeAssignment();
- _memo[newID] = newAssignment;
- *assign = newAssignment;
- *id = newID;
- }
+ RelevantTag* rt = static_cast<RelevantTag*>(node->getTag());
+ // In order to definitely use an index it must be prefixed with our field.
+ // We don't consider notFirst indices here because we must be AND-related to a node
+ // that uses the first spot in that index, and we currently do not know that
+ // unless we're in an AND node.
+ if (0 == rt->first.size()) {
+ return false;
+ }
- bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) {
- PrepMemoContext childContext;
- childContext.elemMatchExpr = context.elemMatchExpr;
- if (Indexability::nodeCanUseIndexOnOwnField(node)) {
- // We only get here if our parent is an OR, an array operator, or we're the root.
-
- // If we have no index tag there are no indices we can use.
- if (NULL == node->getTag()) { return false; }
-
- RelevantTag* rt = static_cast<RelevantTag*>(node->getTag());
- // In order to definitely use an index it must be prefixed with our field.
- // We don't consider notFirst indices here because we must be AND-related to a node
- // that uses the first spot in that index, and we currently do not know that
- // unless we're in an AND node.
- if (0 == rt->first.size()) { return false; }
-
- // We know we can use an index, so grab a memo spot.
- size_t myMemoID;
- NodeAssignment* assign;
- allocateAssignment(node, &assign, &myMemoID);
-
- assign->pred.reset(new PredicateAssignment());
- assign->pred->expr = node;
- assign->pred->first.swap(rt->first);
- return true;
+ // We know we can use an index, so grab a memo spot.
+ size_t myMemoID;
+ NodeAssignment* assign;
+ allocateAssignment(node, &assign, &myMemoID);
+
+ assign->pred.reset(new PredicateAssignment());
+ assign->pred->expr = node;
+ assign->pred->first.swap(rt->first);
+ return true;
+ } else if (Indexability::isBoundsGeneratingNot(node)) {
+ bool childIndexable = prepMemo(node->getChild(0), childContext);
+ // If the child isn't indexable then bail out now.
+ if (!childIndexable) {
+ return false;
}
- else if (Indexability::isBoundsGeneratingNot(node)) {
- bool childIndexable = prepMemo(node->getChild(0), childContext);
- // If the child isn't indexable then bail out now.
- if (!childIndexable) {
+
+ // Our parent node, if any exists, will expect a memo entry keyed on 'node'. As such we
+ // have the node ID for 'node' just point to the memo created for the child that
+ // actually generates the bounds.
+ size_t myMemoID;
+ NodeAssignment* assign;
+ allocateAssignment(node, &assign, &myMemoID);
+ OrAssignment* orAssignment = new OrAssignment();
+ orAssignment->subnodes.push_back(memoIDForNode(node->getChild(0)));
+ assign->orAssignment.reset(orAssignment);
+ return true;
+ } else if (MatchExpression::OR == node->matchType()) {
+ // For an OR to be indexed, all its children must be indexed.
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ if (!prepMemo(node->getChild(i), childContext)) {
return false;
}
-
- // Our parent node, if any exists, will expect a memo entry keyed on 'node'. As such we
- // have the node ID for 'node' just point to the memo created for the child that
- // actually generates the bounds.
- size_t myMemoID;
- NodeAssignment* assign;
- allocateAssignment(node, &assign, &myMemoID);
- OrAssignment* orAssignment = new OrAssignment();
- orAssignment->subnodes.push_back(memoIDForNode(node->getChild(0)));
- assign->orAssignment.reset(orAssignment);
- return true;
}
- else if (MatchExpression::OR == node->matchType()) {
- // For an OR to be indexed, all its children must be indexed.
- for (size_t i = 0; i < node->numChildren(); ++i) {
- if (!prepMemo(node->getChild(i), childContext)) {
- return false;
- }
- }
- // If we're here we're fully indexed and can be in the memo.
- size_t myMemoID;
- NodeAssignment* assign;
- allocateAssignment(node, &assign, &myMemoID);
+ // If we're here we're fully indexed and can be in the memo.
+ size_t myMemoID;
+ NodeAssignment* assign;
+ allocateAssignment(node, &assign, &myMemoID);
- OrAssignment* orAssignment = new OrAssignment();
- for (size_t i = 0; i < node->numChildren(); ++i) {
- orAssignment->subnodes.push_back(memoIDForNode(node->getChild(i)));
- }
- assign->orAssignment.reset(orAssignment);
- return true;
+ OrAssignment* orAssignment = new OrAssignment();
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ orAssignment->subnodes.push_back(memoIDForNode(node->getChild(i)));
}
- else if (Indexability::arrayUsesIndexOnChildren(node)) {
- // Add each of our children as a subnode. We enumerate through each subnode one at a
- // time until it's exhausted then we move on.
- unique_ptr<ArrayAssignment> aa(new ArrayAssignment());
+ assign->orAssignment.reset(orAssignment);
+ return true;
+ } else if (Indexability::arrayUsesIndexOnChildren(node)) {
+ // Add each of our children as a subnode. We enumerate through each subnode one at a
+ // time until it's exhausted then we move on.
+ unique_ptr<ArrayAssignment> aa(new ArrayAssignment());
- if (MatchExpression::ELEM_MATCH_OBJECT == node->matchType()) {
- childContext.elemMatchExpr = node;
- }
+ if (MatchExpression::ELEM_MATCH_OBJECT == node->matchType()) {
+ childContext.elemMatchExpr = node;
+ }
- // For an OR to be indexed, all its children must be indexed.
- for (size_t i = 0; i < node->numChildren(); ++i) {
- if (prepMemo(node->getChild(i), childContext)) {
- aa->subnodes.push_back(memoIDForNode(node->getChild(i)));
- }
+ // For an OR to be indexed, all its children must be indexed.
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ if (prepMemo(node->getChild(i), childContext)) {
+ aa->subnodes.push_back(memoIDForNode(node->getChild(i)));
}
+ }
- if (0 == aa->subnodes.size()) { return false; }
-
- size_t myMemoID;
- NodeAssignment* assign;
- allocateAssignment(node, &assign, &myMemoID);
-
- assign->arrayAssignment.reset(aa.release());
- return true;
+ if (0 == aa->subnodes.size()) {
+ return false;
}
- else if (MatchExpression::AND == node->matchType()) {
- // Map from idx id to children that have a pred over it.
-
- // TODO: The index intersection logic could be simplified if we could iterate over these
- // maps in a known order. Currently when iterating over these maps we have to impose an
- // ordering on each individual pair of indices in order to make sure that the
- // enumeration results are order-independent. See SERVER-12196.
- IndexToPredMap idxToFirst;
- IndexToPredMap idxToNotFirst;
-
- // Children that aren't predicates, and which do not necessarily need
- // to use an index.
- vector<MemoID> subnodes;
-
- // Children that aren't predicates, but which *must* use an index.
- // (e.g. an OR which contains a TEXT child).
- vector<MemoID> mandatorySubnodes;
-
- // A list of predicates contained in the subtree rooted at 'node'
- // obtained by traversing deeply through $and and $elemMatch children.
- vector<MatchExpression*> indexedPreds;
-
- // Partition the childen into the children that aren't predicates which may or may
- // not be indexed ('subnodes'), children that aren't predicates which must use the
- // index ('mandatorySubnodes'). and children that are predicates ('indexedPreds').
- //
- // We have to get the subnodes with mandatory assignments rather than adding the
- // mandatory preds to 'indexedPreds'. Adding the mandatory preds directly to
- // 'indexedPreds' would lead to problems such as pulling a predicate beneath an OR
- // into a set joined by an AND.
- if (!partitionPreds(node, childContext, &indexedPreds,
- &subnodes, &mandatorySubnodes)) {
- return false;
- }
- if (mandatorySubnodes.size() > 1) {
- return false;
- }
+ size_t myMemoID;
+ NodeAssignment* assign;
+ allocateAssignment(node, &assign, &myMemoID);
- // There can only be one mandatory predicate (at most one $text, at most one
- // $geoNear, can't combine $text/$geoNear).
- MatchExpression* mandatoryPred = NULL;
+ assign->arrayAssignment.reset(aa.release());
+ return true;
+ } else if (MatchExpression::AND == node->matchType()) {
+ // Map from idx id to children that have a pred over it.
+
+ // TODO: The index intersection logic could be simplified if we could iterate over these
+ // maps in a known order. Currently when iterating over these maps we have to impose an
+ // ordering on each individual pair of indices in order to make sure that the
+ // enumeration results are order-independent. See SERVER-12196.
+ IndexToPredMap idxToFirst;
+ IndexToPredMap idxToNotFirst;
+
+ // Children that aren't predicates, and which do not necessarily need
+ // to use an index.
+ vector<MemoID> subnodes;
+
+ // Children that aren't predicates, but which *must* use an index.
+ // (e.g. an OR which contains a TEXT child).
+ vector<MemoID> mandatorySubnodes;
+
+ // A list of predicates contained in the subtree rooted at 'node'
+ // obtained by traversing deeply through $and and $elemMatch children.
+ vector<MatchExpression*> indexedPreds;
+
+ // Partition the childen into the children that aren't predicates which may or may
+ // not be indexed ('subnodes'), children that aren't predicates which must use the
+ // index ('mandatorySubnodes'). and children that are predicates ('indexedPreds').
+ //
+ // We have to get the subnodes with mandatory assignments rather than adding the
+ // mandatory preds to 'indexedPreds'. Adding the mandatory preds directly to
+ // 'indexedPreds' would lead to problems such as pulling a predicate beneath an OR
+ // into a set joined by an AND.
+ if (!partitionPreds(node, childContext, &indexedPreds, &subnodes, &mandatorySubnodes)) {
+ return false;
+ }
- // There could be multiple indices which we could use to satisfy the mandatory
- // predicate. Keep the set of such indices. Currently only one text index is
- // allowed per collection, but there could be multiple 2d or 2dsphere indices
- // available to answer a $geoNear predicate.
- set<IndexID> mandatoryIndices;
+ if (mandatorySubnodes.size() > 1) {
+ return false;
+ }
- // Go through 'indexedPreds' and add the predicates to the
- // 'idxToFirst' and 'idxToNotFirst' maps.
- for (size_t i = 0; i < indexedPreds.size(); ++i) {
- MatchExpression* child = indexedPreds[i];
+ // There can only be one mandatory predicate (at most one $text, at most one
+ // $geoNear, can't combine $text/$geoNear).
+ MatchExpression* mandatoryPred = NULL;
- invariant(Indexability::nodeCanUseIndexOnOwnField(child));
+ // There could be multiple indices which we could use to satisfy the mandatory
+ // predicate. Keep the set of such indices. Currently only one text index is
+ // allowed per collection, but there could be multiple 2d or 2dsphere indices
+ // available to answer a $geoNear predicate.
+ set<IndexID> mandatoryIndices;
- RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
+ // Go through 'indexedPreds' and add the predicates to the
+ // 'idxToFirst' and 'idxToNotFirst' maps.
+ for (size_t i = 0; i < indexedPreds.size(); ++i) {
+ MatchExpression* child = indexedPreds[i];
- if (expressionRequiresIndex(child)) {
- // 'child' is a predicate which *must* be tagged with an index.
- // This should include only TEXT and GEO_NEAR preds.
+ invariant(Indexability::nodeCanUseIndexOnOwnField(child));
- // We expect either 0 or 1 mandatory predicates.
- invariant(NULL == mandatoryPred);
+ RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
- // Mandatory predicates are TEXT or GEO_NEAR.
- invariant(MatchExpression::TEXT == child->matchType() ||
- MatchExpression::GEO_NEAR == child->matchType());
+ if (expressionRequiresIndex(child)) {
+ // 'child' is a predicate which *must* be tagged with an index.
+ // This should include only TEXT and GEO_NEAR preds.
- // The mandatory predicate must have a corresponding "mandatory index".
- invariant(rt->first.size() != 0 || rt->notFirst.size() != 0);
+ // We expect either 0 or 1 mandatory predicates.
+ invariant(NULL == mandatoryPred);
- mandatoryPred = child;
+ // Mandatory predicates are TEXT or GEO_NEAR.
+ invariant(MatchExpression::TEXT == child->matchType() ||
+ MatchExpression::GEO_NEAR == child->matchType());
- // Find all of the indices that could be used to satisfy the pred,
- // and add them to the 'mandatoryIndices' set.
- mandatoryIndices.insert(rt->first.begin(), rt->first.end());
- mandatoryIndices.insert(rt->notFirst.begin(), rt->notFirst.end());
- }
+ // The mandatory predicate must have a corresponding "mandatory index".
+ invariant(rt->first.size() != 0 || rt->notFirst.size() != 0);
- for (size_t j = 0; j < rt->first.size(); ++j) {
- idxToFirst[rt->first[j]].push_back(child);
- }
+ mandatoryPred = child;
- for (size_t j = 0 ; j< rt->notFirst.size(); ++j) {
- idxToNotFirst[rt->notFirst[j]].push_back(child);
- }
+ // Find all of the indices that could be used to satisfy the pred,
+ // and add them to the 'mandatoryIndices' set.
+ mandatoryIndices.insert(rt->first.begin(), rt->first.end());
+ mandatoryIndices.insert(rt->notFirst.begin(), rt->notFirst.end());
}
- // If none of our children can use indices, bail out.
- if (idxToFirst.empty()
- && (subnodes.size() == 0)
- && (mandatorySubnodes.size() == 0)) {
- return false;
+ for (size_t j = 0; j < rt->first.size(); ++j) {
+ idxToFirst[rt->first[j]].push_back(child);
}
- // At least one child can use an index, so we can create a memo entry.
- AndAssignment* andAssignment = new AndAssignment();
-
- size_t myMemoID;
- NodeAssignment* nodeAssignment;
- allocateAssignment(node, &nodeAssignment, &myMemoID);
- // Takes ownership.
- nodeAssignment->andAssignment.reset(andAssignment);
-
- // Predicates which must use an index might be buried inside
- // a subnode. Handle that case here.
- if (1 == mandatorySubnodes.size()) {
- AndEnumerableState aes;
- aes.subnodesToIndex.push_back(mandatorySubnodes[0]);
- andAssignment->choices.push_back(aes);
- return true;
+ for (size_t j = 0; j < rt->notFirst.size(); ++j) {
+ idxToNotFirst[rt->notFirst[j]].push_back(child);
}
+ }
- if (NULL != mandatoryPred) {
- // We must have at least one index which can be used to answer 'mandatoryPred'.
- invariant(!mandatoryIndices.empty());
- return enumerateMandatoryIndex(idxToFirst, idxToNotFirst, mandatoryPred,
- mandatoryIndices, andAssignment);
- }
+ // If none of our children can use indices, bail out.
+ if (idxToFirst.empty() && (subnodes.size() == 0) && (mandatorySubnodes.size() == 0)) {
+ return false;
+ }
- enumerateOneIndex(idxToFirst, idxToNotFirst, subnodes, andAssignment);
+ // At least one child can use an index, so we can create a memo entry.
+ AndAssignment* andAssignment = new AndAssignment();
- if (_ixisect) {
- enumerateAndIntersect(idxToFirst, idxToNotFirst, subnodes, andAssignment);
- }
+ size_t myMemoID;
+ NodeAssignment* nodeAssignment;
+ allocateAssignment(node, &nodeAssignment, &myMemoID);
+ // Takes ownership.
+ nodeAssignment->andAssignment.reset(andAssignment);
+ // Predicates which must use an index might be buried inside
+ // a subnode. Handle that case here.
+ if (1 == mandatorySubnodes.size()) {
+ AndEnumerableState aes;
+ aes.subnodesToIndex.push_back(mandatorySubnodes[0]);
+ andAssignment->choices.push_back(aes);
return true;
}
- // Don't know what the node is at this point.
- return false;
+ if (NULL != mandatoryPred) {
+ // We must have at least one index which can be used to answer 'mandatoryPred'.
+ invariant(!mandatoryIndices.empty());
+ return enumerateMandatoryIndex(
+ idxToFirst, idxToNotFirst, mandatoryPred, mandatoryIndices, andAssignment);
+ }
+
+ enumerateOneIndex(idxToFirst, idxToNotFirst, subnodes, andAssignment);
+
+ if (_ixisect) {
+ enumerateAndIntersect(idxToFirst, idxToNotFirst, subnodes, andAssignment);
+ }
+
+ return true;
}
- bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- MatchExpression* mandatoryPred,
- const set<IndexID>& mandatoryIndices,
- AndAssignment* andAssignment) {
- // Generate index assignments for each index in 'mandatoryIndices'. We
- // must assign 'mandatoryPred' to one of these indices, but we try all
- // possibilities in 'mandatoryIndices' because some might be better than
- // others for this query.
- for (set<IndexID>::const_iterator indexIt = mandatoryIndices.begin();
- indexIt != mandatoryIndices.end();
- ++indexIt) {
-
- // We have a predicate which *must* be tagged to use an index.
- // Get the index entry for the index it should use.
- const IndexEntry& thisIndex = (*_indices)[*indexIt];
-
- // Only text, 2d, and 2dsphere index types should be able to satisfy
- // mandatory predicates.
- invariant(INDEX_TEXT == thisIndex.type ||
- INDEX_2D == thisIndex.type ||
- INDEX_2DSPHERE == thisIndex.type);
-
- OneIndexAssignment indexAssign;
- indexAssign.index = *indexIt;
-
- IndexToPredMap::const_iterator it = idxToFirst.find(*indexIt);
- if (idxToFirst.end() == it) {
- // We don't have any predicate to assign to the leading field of this index.
- // This means that we cannot generate a solution using this index, so we
- // just move on to the next index.
- continue;
- }
+ // Don't know what the node is at this point.
+ return false;
+}
+
+bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ MatchExpression* mandatoryPred,
+ const set<IndexID>& mandatoryIndices,
+ AndAssignment* andAssignment) {
+ // Generate index assignments for each index in 'mandatoryIndices'. We
+ // must assign 'mandatoryPred' to one of these indices, but we try all
+ // possibilities in 'mandatoryIndices' because some might be better than
+ // others for this query.
+ for (set<IndexID>::const_iterator indexIt = mandatoryIndices.begin();
+ indexIt != mandatoryIndices.end();
+ ++indexIt) {
+ // We have a predicate which *must* be tagged to use an index.
+ // Get the index entry for the index it should use.
+ const IndexEntry& thisIndex = (*_indices)[*indexIt];
+
+ // Only text, 2d, and 2dsphere index types should be able to satisfy
+ // mandatory predicates.
+ invariant(INDEX_TEXT == thisIndex.type || INDEX_2D == thisIndex.type ||
+ INDEX_2DSPHERE == thisIndex.type);
+
+ OneIndexAssignment indexAssign;
+ indexAssign.index = *indexIt;
+
+ IndexToPredMap::const_iterator it = idxToFirst.find(*indexIt);
+ if (idxToFirst.end() == it) {
+ // We don't have any predicate to assign to the leading field of this index.
+ // This means that we cannot generate a solution using this index, so we
+ // just move on to the next index.
+ continue;
+ }
- const vector<MatchExpression*>& predsOverLeadingField = it->second;
-
- if (thisIndex.multikey) {
- // Special handling for multikey mandatory indices.
- if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
- predsOverLeadingField.end(),
- mandatoryPred)) {
- // The mandatory predicate is over the first field of the index. Assign
- // it now.
- indexAssign.preds.push_back(mandatoryPred);
- indexAssign.positions.push_back(0);
- }
- else {
- // The mandatory pred is notFirst. Assign an arbitrary predicate
- // over the first position.
- invariant(!predsOverLeadingField.empty());
- indexAssign.preds.push_back(predsOverLeadingField[0]);
- indexAssign.positions.push_back(0);
-
- // Assign the mandatory predicate at the matching position in the compound
- // index. We do this in order to ensure that the mandatory predicate (and not
- // some other predicate over the same position in the compound index) gets
- // assigned.
- //
- // The bad thing that could happen otherwise: A non-mandatory predicate gets
- // chosen by getMultikeyCompoundablePreds(...) instead of 'mandatoryPred'.
- // We would then fail to assign the mandatory predicate, and hence generate
- // a bad data access plan.
- //
- // The mandatory predicate is assigned by calling compound(...) because
- // compound(...) has logic for matching up a predicate with the proper
- // position in the compound index.
- vector<MatchExpression*> mandatoryToCompound;
- mandatoryToCompound.push_back(mandatoryPred);
- compound(mandatoryToCompound, thisIndex, &indexAssign);
-
- // At this point we have assigned a predicate over the leading field and
- // we have assigned the mandatory predicate to a trailing field.
- //
- // Ex:
- // Say we have index {a: 1, b: 1, c: "2dsphere", d: 1}. Also suppose that
- // there is a $near predicate over "c", with additional predicates over
- // "a", "b", "c", and "d". We will have assigned the $near predicate at
- // position 2 and a predicate with path "a" at position 0.
- }
+ const vector<MatchExpression*>& predsOverLeadingField = it->second;
- // Compound remaining predicates in a multikey-safe way.
- IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
- if (compIt != idxToNotFirst.end()) {
- const vector<MatchExpression*>& couldCompound = compIt->second;
- vector<MatchExpression*> tryCompound;
+ if (thisIndex.multikey) {
+ // Special handling for multikey mandatory indices.
+ if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
+ predsOverLeadingField.end(),
+ mandatoryPred)) {
+ // The mandatory predicate is over the first field of the index. Assign
+ // it now.
+ indexAssign.preds.push_back(mandatoryPred);
+ indexAssign.positions.push_back(0);
+ } else {
+ // The mandatory pred is notFirst. Assign an arbitrary predicate
+ // over the first position.
+ invariant(!predsOverLeadingField.empty());
+ indexAssign.preds.push_back(predsOverLeadingField[0]);
+ indexAssign.positions.push_back(0);
- getMultikeyCompoundablePreds(indexAssign.preds, couldCompound, &tryCompound);
- if (tryCompound.size()) {
- compound(tryCompound, thisIndex, &indexAssign);
- }
- }
+ // Assign the mandatory predicate at the matching position in the compound
+ // index. We do this in order to ensure that the mandatory predicate (and not
+ // some other predicate over the same position in the compound index) gets
+ // assigned.
+ //
+ // The bad thing that could happen otherwise: A non-mandatory predicate gets
+ // chosen by getMultikeyCompoundablePreds(...) instead of 'mandatoryPred'.
+ // We would then fail to assign the mandatory predicate, and hence generate
+ // a bad data access plan.
+ //
+ // The mandatory predicate is assigned by calling compound(...) because
+ // compound(...) has logic for matching up a predicate with the proper
+ // position in the compound index.
+ vector<MatchExpression*> mandatoryToCompound;
+ mandatoryToCompound.push_back(mandatoryPred);
+ compound(mandatoryToCompound, thisIndex, &indexAssign);
+
+ // At this point we have assigned a predicate over the leading field and
+ // we have assigned the mandatory predicate to a trailing field.
+ //
+ // Ex:
+ // Say we have index {a: 1, b: 1, c: "2dsphere", d: 1}. Also suppose that
+ // there is a $near predicate over "c", with additional predicates over
+ // "a", "b", "c", and "d". We will have assigned the $near predicate at
+ // position 2 and a predicate with path "a" at position 0.
}
- else {
- // For non-multikey, we don't have to do anything too special.
- // Just assign all "first" predicates and try to compound like usual.
- indexAssign.preds = it->second;
-
- // Since everything in assign.preds prefixes the index, they all go
- // at position '0' in the index, the first position.
- indexAssign.positions.resize(indexAssign.preds.size(), 0);
-
- // And now we begin compound analysis.
-
- // Find everything that could use assign.index but isn't a pred over
- // the first field of that index.
- IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
- if (compIt != idxToNotFirst.end()) {
- compound(compIt->second, thisIndex, &indexAssign);
+
+ // Compound remaining predicates in a multikey-safe way.
+ IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
+ if (compIt != idxToNotFirst.end()) {
+ const vector<MatchExpression*>& couldCompound = compIt->second;
+ vector<MatchExpression*> tryCompound;
+
+ getMultikeyCompoundablePreds(indexAssign.preds, couldCompound, &tryCompound);
+ if (tryCompound.size()) {
+ compound(tryCompound, thisIndex, &indexAssign);
}
}
+ } else {
+ // For non-multikey, we don't have to do anything too special.
+ // Just assign all "first" predicates and try to compound like usual.
+ indexAssign.preds = it->second;
- // The mandatory predicate must be assigned.
- invariant(indexAssign.preds.end() != std::find(indexAssign.preds.begin(),
- indexAssign.preds.end(),
- mandatoryPred));
+ // Since everything in assign.preds prefixes the index, they all go
+ // at position '0' in the index, the first position.
+ indexAssign.positions.resize(indexAssign.preds.size(), 0);
- // Output the assignments for this index.
- AndEnumerableState state;
- state.assignments.push_back(indexAssign);
- andAssignment->choices.push_back(state);
+ // And now we begin compound analysis.
+
+ // Find everything that could use assign.index but isn't a pred over
+ // the first field of that index.
+ IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
+ if (compIt != idxToNotFirst.end()) {
+ compound(compIt->second, thisIndex, &indexAssign);
+ }
}
- return andAssignment->choices.size() > 0;
- }
+ // The mandatory predicate must be assigned.
+ invariant(indexAssign.preds.end() !=
+ std::find(indexAssign.preds.begin(), indexAssign.preds.end(), mandatoryPred));
- void PlanEnumerator::enumerateOneIndex(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- const vector<MemoID>& subnodes,
- AndAssignment* andAssignment) {
- // In the simplest case, an AndAssignment picks indices like a PredicateAssignment. To
- // be indexed we must only pick one index
- //
- // Complications:
- //
- // Some of our child predicates cannot be answered without an index. As such, the
- // indices that those predicates require must always be outputted. We store these
- // mandatory index assignments in 'mandatoryIndices'.
- //
- // Some of our children may not be predicates. We may have ORs (or array operators) as
- // children. If one of these subtrees provides an index, the AND is indexed. We store
- // these subtree choices in 'subnodes'.
- //
- // With the above two cases out of the way, we can focus on the remaining case: what to
- // do with our children that are leaf predicates.
- //
- // Guiding principles for index assignment to leaf predicates:
- //
- // 1. If we assign an index to {x:{$gt: 5}} we should assign the same index to
- // {x:{$lt: 50}}. That is, an index assignment should include all predicates
- // over its leading field.
- //
- // 2. If we have the index {a:1, b:1} and we assign it to {a: 5} we should assign it
- // to {b:7}, since with a predicate over the first field of the compound index,
- // the second field can be bounded as well. We may only assign indices to predicates
- // if all fields to the left of the index field are constrained.
+ // Output the assignments for this index.
+ AndEnumerableState state;
+ state.assignments.push_back(indexAssign);
+ andAssignment->choices.push_back(state);
+ }
- // First, add the state of using each subnode.
- for (size_t i = 0; i < subnodes.size(); ++i) {
- AndEnumerableState aes;
- aes.subnodesToIndex.push_back(subnodes[i]);
- andAssignment->choices.push_back(aes);
- }
+ return andAssignment->choices.size() > 0;
+}
- // For each FIRST, we assign nodes to it.
- for (IndexToPredMap::const_iterator it = idxToFirst.begin(); it != idxToFirst.end(); ++it) {
- // The assignment we're filling out.
- OneIndexAssignment indexAssign;
-
- // This is the index we assign to.
- indexAssign.index = it->first;
-
- const IndexEntry& thisIndex = (*_indices)[it->first];
-
- // If the index is multikey, we only assign one pred to it. We also skip
- // compounding. TODO: is this also true for 2d and 2dsphere indices? can they be
- // multikey but still compoundable?
- if (thisIndex.multikey) {
- // TODO: could pick better pred than first but not too worried since we should
- // really be isecting indices here. Just take the first pred. We don't assign
- // any other preds to this index. The planner will intersect the preds and this
- // enumeration strategy is just one index at a time.
- indexAssign.preds.push_back(it->second[0]);
- indexAssign.positions.push_back(0);
+void PlanEnumerator::enumerateOneIndex(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ const vector<MemoID>& subnodes,
+ AndAssignment* andAssignment) {
+ // In the simplest case, an AndAssignment picks indices like a PredicateAssignment. To
+ // be indexed we must only pick one index
+ //
+ // Complications:
+ //
+ // Some of our child predicates cannot be answered without an index. As such, the
+ // indices that those predicates require must always be outputted. We store these
+ // mandatory index assignments in 'mandatoryIndices'.
+ //
+ // Some of our children may not be predicates. We may have ORs (or array operators) as
+ // children. If one of these subtrees provides an index, the AND is indexed. We store
+ // these subtree choices in 'subnodes'.
+ //
+ // With the above two cases out of the way, we can focus on the remaining case: what to
+ // do with our children that are leaf predicates.
+ //
+ // Guiding principles for index assignment to leaf predicates:
+ //
+ // 1. If we assign an index to {x:{$gt: 5}} we should assign the same index to
+ // {x:{$lt: 50}}. That is, an index assignment should include all predicates
+ // over its leading field.
+ //
+ // 2. If we have the index {a:1, b:1} and we assign it to {a: 5} we should assign it
+ // to {b:7}, since with a predicate over the first field of the compound index,
+ // the second field can be bounded as well. We may only assign indices to predicates
+ // if all fields to the left of the index field are constrained.
+
+ // First, add the state of using each subnode.
+ for (size_t i = 0; i < subnodes.size(); ++i) {
+ AndEnumerableState aes;
+ aes.subnodesToIndex.push_back(subnodes[i]);
+ andAssignment->choices.push_back(aes);
+ }
- // If there are any preds that could possibly be compounded with this
- // index...
- IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
- if (compIt != idxToNotFirst.end()) {
- const vector<MatchExpression*>& couldCompound = compIt->second;
- vector<MatchExpression*> tryCompound;
-
- // ...select the predicates that are safe to compound and try to
- // compound them.
- getMultikeyCompoundablePreds(indexAssign.preds, couldCompound, &tryCompound);
- if (tryCompound.size()) {
- compound(tryCompound, thisIndex, &indexAssign);
- }
+ // For each FIRST, we assign nodes to it.
+ for (IndexToPredMap::const_iterator it = idxToFirst.begin(); it != idxToFirst.end(); ++it) {
+ // The assignment we're filling out.
+ OneIndexAssignment indexAssign;
+
+ // This is the index we assign to.
+ indexAssign.index = it->first;
+
+ const IndexEntry& thisIndex = (*_indices)[it->first];
+
+ // If the index is multikey, we only assign one pred to it. We also skip
+ // compounding. TODO: is this also true for 2d and 2dsphere indices? can they be
+ // multikey but still compoundable?
+ if (thisIndex.multikey) {
+ // TODO: could pick better pred than first but not too worried since we should
+ // really be isecting indices here. Just take the first pred. We don't assign
+ // any other preds to this index. The planner will intersect the preds and this
+ // enumeration strategy is just one index at a time.
+ indexAssign.preds.push_back(it->second[0]);
+ indexAssign.positions.push_back(0);
+
+ // If there are any preds that could possibly be compounded with this
+ // index...
+ IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
+ if (compIt != idxToNotFirst.end()) {
+ const vector<MatchExpression*>& couldCompound = compIt->second;
+ vector<MatchExpression*> tryCompound;
+
+ // ...select the predicates that are safe to compound and try to
+ // compound them.
+ getMultikeyCompoundablePreds(indexAssign.preds, couldCompound, &tryCompound);
+ if (tryCompound.size()) {
+ compound(tryCompound, thisIndex, &indexAssign);
}
}
- else {
- // The index isn't multikey. Assign all preds to it. The planner will
- // intersect the bounds.
- indexAssign.preds = it->second;
-
- // Since everything in assign.preds prefixes the index, they all go
- // at position '0' in the index, the first position.
- indexAssign.positions.resize(indexAssign.preds.size(), 0);
-
- // Find everything that could use assign.index but isn't a pred over
- // the first field of that index.
- IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
- if (compIt != idxToNotFirst.end()) {
- compound(compIt->second, thisIndex, &indexAssign);
- }
+ } else {
+ // The index isn't multikey. Assign all preds to it. The planner will
+ // intersect the bounds.
+ indexAssign.preds = it->second;
+
+ // Since everything in assign.preds prefixes the index, they all go
+ // at position '0' in the index, the first position.
+ indexAssign.positions.resize(indexAssign.preds.size(), 0);
+
+ // Find everything that could use assign.index but isn't a pred over
+ // the first field of that index.
+ IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
+ if (compIt != idxToNotFirst.end()) {
+ compound(compIt->second, thisIndex, &indexAssign);
}
+ }
+ AndEnumerableState state;
+ state.assignments.push_back(indexAssign);
+ andAssignment->choices.push_back(state);
+ }
+}
+
+void PlanEnumerator::enumerateAndIntersect(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ const vector<MemoID>& subnodes,
+ AndAssignment* andAssignment) {
+ // Hardcoded "look at all members of the power set of size 2" search,
+ // a.k.a. "consider all pairs of indices".
+ //
+ // For each unordered pair of indices do the following:
+ // 0. Impose an ordering (idx1, idx2) using the key patterns.
+ // (*See note below.)
+ // 1. Assign predicates which prefix idx1 to idx1.
+ // 2. Add assigned predicates to a set of predicates---the "already
+ // assigned set".
+ // 3. Assign predicates which prefix idx2 to idx2, as long as they
+ // been assigned to idx1 already. Add newly assigned predicates to
+ // the "already assigned set".
+ // 4. Try to assign predicates to idx1 by compounding.
+ // 5. Add any predicates assigned to idx1 by compounding to the
+ // "already assigned set",
+ // 6. Try to assign predicates to idx2 by compounding.
+ // 7. Determine if we have already assigned all predicates in
+ // the "already assigned set" to a single index. If so, then
+ // don't generate an ixisect solution, as compounding will
+ // be better. Otherwise, output the ixisect assignments.
+ //
+ // *NOTE on ordering. Suppose we have two indices A and B, and a
+ // predicate P1 which is over the prefix of both indices A and B.
+ // If we order the indices (A, B) then P1 will get assigned to A,
+ // but if we order the indices (B, A) then P1 will get assigned to
+ // B. In order to make sure that we get the same result for the unordered
+ // pair {A, B} we have to begin by imposing an ordering. As a more concrete
+ // example, if we have indices {x: 1, y: 1} and {x: 1, z: 1} with predicate
+ // {x: 3}, we want to make sure that {x: 3} gets assigned to the same index
+ // irrespective of ordering.
+
+ size_t sizeBefore = andAssignment->choices.size();
+
+ for (IndexToPredMap::const_iterator firstIt = idxToFirst.begin(); firstIt != idxToFirst.end();
+ ++firstIt) {
+ const IndexEntry& oneIndex = (*_indices)[firstIt->first];
+
+ // 'oneAssign' is used to assign indices and subnodes or to
+ // make assignments for the first index when it's multikey.
+ // It is NOT used in the inner loop that considers pairs of
+ // indices.
+ OneIndexAssignment oneAssign;
+ oneAssign.index = firstIt->first;
+ oneAssign.preds = firstIt->second;
+ // Since everything in assign.preds prefixes the index, they all go
+ // at position '0' in the index, the first position.
+ oneAssign.positions.resize(oneAssign.preds.size(), 0);
+
+ // We create a scan per predicate so if we have >1 predicate we'll already
+ // have at least 2 scans (one predicate per scan as the planner can't
+ // intersect bounds when the index is multikey), so we stop here.
+ if (oneIndex.multikey && oneAssign.preds.size() > 1) {
+ // One could imagine an enormous auto-generated $all query with too many clauses to
+ // have an ixscan per clause.
+ static const size_t kMaxSelfIntersections = 10;
+ if (oneAssign.preds.size() > kMaxSelfIntersections) {
+ // Only take the first kMaxSelfIntersections preds.
+ oneAssign.preds.resize(kMaxSelfIntersections);
+ oneAssign.positions.resize(kMaxSelfIntersections);
+ }
AndEnumerableState state;
- state.assignments.push_back(indexAssign);
+ state.assignments.push_back(oneAssign);
andAssignment->choices.push_back(state);
+ continue;
}
- }
- void PlanEnumerator::enumerateAndIntersect(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- const vector<MemoID>& subnodes,
- AndAssignment* andAssignment) {
- // Hardcoded "look at all members of the power set of size 2" search,
- // a.k.a. "consider all pairs of indices".
- //
- // For each unordered pair of indices do the following:
- // 0. Impose an ordering (idx1, idx2) using the key patterns.
- // (*See note below.)
- // 1. Assign predicates which prefix idx1 to idx1.
- // 2. Add assigned predicates to a set of predicates---the "already
- // assigned set".
- // 3. Assign predicates which prefix idx2 to idx2, as long as they
- // been assigned to idx1 already. Add newly assigned predicates to
- // the "already assigned set".
- // 4. Try to assign predicates to idx1 by compounding.
- // 5. Add any predicates assigned to idx1 by compounding to the
- // "already assigned set",
- // 6. Try to assign predicates to idx2 by compounding.
- // 7. Determine if we have already assigned all predicates in
- // the "already assigned set" to a single index. If so, then
- // don't generate an ixisect solution, as compounding will
- // be better. Otherwise, output the ixisect assignments.
- //
- // *NOTE on ordering. Suppose we have two indices A and B, and a
- // predicate P1 which is over the prefix of both indices A and B.
- // If we order the indices (A, B) then P1 will get assigned to A,
- // but if we order the indices (B, A) then P1 will get assigned to
- // B. In order to make sure that we get the same result for the unordered
- // pair {A, B} we have to begin by imposing an ordering. As a more concrete
- // example, if we have indices {x: 1, y: 1} and {x: 1, z: 1} with predicate
- // {x: 3}, we want to make sure that {x: 3} gets assigned to the same index
- // irrespective of ordering.
-
- size_t sizeBefore = andAssignment->choices.size();
-
- for (IndexToPredMap::const_iterator firstIt = idxToFirst.begin();
- firstIt != idxToFirst.end(); ++firstIt) {
-
- const IndexEntry& oneIndex = (*_indices)[firstIt->first];
-
- // 'oneAssign' is used to assign indices and subnodes or to
- // make assignments for the first index when it's multikey.
- // It is NOT used in the inner loop that considers pairs of
- // indices.
- OneIndexAssignment oneAssign;
- oneAssign.index = firstIt->first;
- oneAssign.preds = firstIt->second;
- // Since everything in assign.preds prefixes the index, they all go
- // at position '0' in the index, the first position.
- oneAssign.positions.resize(oneAssign.preds.size(), 0);
-
- // We create a scan per predicate so if we have >1 predicate we'll already
- // have at least 2 scans (one predicate per scan as the planner can't
- // intersect bounds when the index is multikey), so we stop here.
- if (oneIndex.multikey && oneAssign.preds.size() > 1) {
- // One could imagine an enormous auto-generated $all query with too many clauses to
- // have an ixscan per clause.
- static const size_t kMaxSelfIntersections = 10;
- if (oneAssign.preds.size() > kMaxSelfIntersections) {
- // Only take the first kMaxSelfIntersections preds.
- oneAssign.preds.resize(kMaxSelfIntersections);
- oneAssign.positions.resize(kMaxSelfIntersections);
- }
- AndEnumerableState state;
- state.assignments.push_back(oneAssign);
- andAssignment->choices.push_back(state);
- continue;
+ // Output (subnode, firstAssign) pairs.
+ for (size_t i = 0; i < subnodes.size(); ++i) {
+ AndEnumerableState indexAndSubnode;
+ indexAndSubnode.assignments.push_back(oneAssign);
+ indexAndSubnode.subnodesToIndex.push_back(subnodes[i]);
+ andAssignment->choices.push_back(indexAndSubnode);
+ // Limit n^2.
+ if (andAssignment->choices.size() - sizeBefore > _intersectLimit) {
+ return;
}
+ }
- // Output (subnode, firstAssign) pairs.
- for (size_t i = 0; i < subnodes.size(); ++i) {
- AndEnumerableState indexAndSubnode;
- indexAndSubnode.assignments.push_back(oneAssign);
- indexAndSubnode.subnodesToIndex.push_back(subnodes[i]);
- andAssignment->choices.push_back(indexAndSubnode);
- // Limit n^2.
- if (andAssignment->choices.size() - sizeBefore > _intersectLimit) {
- return;
- }
+ // Start looking at all other indices to find one that we want to bundle
+ // with firstAssign.
+ IndexToPredMap::const_iterator secondIt = firstIt;
+ secondIt++;
+ for (; secondIt != idxToFirst.end(); secondIt++) {
+ const IndexEntry& firstIndex = (*_indices)[secondIt->first];
+ const IndexEntry& secondIndex = (*_indices)[secondIt->first];
+
+ // Limit n^2.
+ if (andAssignment->choices.size() - sizeBefore > _intersectLimit) {
+ return;
}
- // Start looking at all other indices to find one that we want to bundle
- // with firstAssign.
- IndexToPredMap::const_iterator secondIt = firstIt;
- secondIt++;
- for (; secondIt != idxToFirst.end(); secondIt++) {
- const IndexEntry& firstIndex = (*_indices)[secondIt->first];
- const IndexEntry& secondIndex = (*_indices)[secondIt->first];
-
- // Limit n^2.
- if (andAssignment->choices.size() - sizeBefore > _intersectLimit) {
- return;
- }
+ // If the other index we're considering is multikey with >1 pred, we don't
+ // want to have it as an additional assignment. Eventually, it1 will be
+ // equal to the current value of secondIt and we'll assign every pred for
+ // this mapping to the index.
+ if (secondIndex.multikey && secondIt->second.size() > 1) {
+ continue;
+ }
- // If the other index we're considering is multikey with >1 pred, we don't
- // want to have it as an additional assignment. Eventually, it1 will be
- // equal to the current value of secondIt and we'll assign every pred for
- // this mapping to the index.
- if (secondIndex.multikey && secondIt->second.size() > 1) {
- continue;
- }
+ //
+ // Step #0:
+ // Impose an ordering (idx1, idx2) using the key patterns.
+ //
+ IndexToPredMap::const_iterator it1, it2;
+ int ordering = firstIndex.keyPattern.woCompare(secondIndex.keyPattern);
+ it1 = (ordering > 0) ? firstIt : secondIt;
+ it2 = (ordering > 0) ? secondIt : firstIt;
+ const IndexEntry& ie1 = (*_indices)[it1->first];
+ const IndexEntry& ie2 = (*_indices)[it2->first];
- //
- // Step #0:
- // Impose an ordering (idx1, idx2) using the key patterns.
- //
- IndexToPredMap::const_iterator it1, it2;
- int ordering = firstIndex.keyPattern.woCompare(secondIndex.keyPattern);
- it1 = (ordering > 0) ? firstIt : secondIt;
- it2 = (ordering > 0) ? secondIt : firstIt;
- const IndexEntry& ie1 = (*_indices)[it1->first];
- const IndexEntry& ie2 = (*_indices)[it2->first];
+ //
+ // Step #1:
+ // Assign predicates which prefix firstIndex to firstAssign.
+ //
+ OneIndexAssignment firstAssign;
+ firstAssign.index = it1->first;
+ firstAssign.preds = it1->second;
+ // Since everything in assign.preds prefixes the index, they all go
+ // at position '0' in the index, the first position.
+ firstAssign.positions.resize(firstAssign.preds.size(), 0);
- //
- // Step #1:
- // Assign predicates which prefix firstIndex to firstAssign.
- //
- OneIndexAssignment firstAssign;
- firstAssign.index = it1->first;
- firstAssign.preds = it1->second;
- // Since everything in assign.preds prefixes the index, they all go
- // at position '0' in the index, the first position.
- firstAssign.positions.resize(firstAssign.preds.size(), 0);
-
- // We keep track of what preds are assigned to indices either because they
- // prefix the index or have been assigned through compounding. We make sure
- // that these predicates DO NOT become additional index assignments.
- // Example: what if firstAssign is the index (x, y) and we're trying to
- // compound? We want to make sure not to compound if the predicate is
- // already assigned to index y.
- set<MatchExpression*> predsAssigned;
+ // We keep track of what preds are assigned to indices either because they
+ // prefix the index or have been assigned through compounding. We make sure
+ // that these predicates DO NOT become additional index assignments.
+ // Example: what if firstAssign is the index (x, y) and we're trying to
+ // compound? We want to make sure not to compound if the predicate is
+ // already assigned to index y.
+ set<MatchExpression*> predsAssigned;
- //
- // Step #2:
- // Add indices assigned in 'firstAssign' to 'predsAssigned'.
- //
- for (size_t i = 0; i < firstAssign.preds.size(); ++i) {
- predsAssigned.insert(firstAssign.preds[i]);
- }
+ //
+ // Step #2:
+ // Add indices assigned in 'firstAssign' to 'predsAssigned'.
+ //
+ for (size_t i = 0; i < firstAssign.preds.size(); ++i) {
+ predsAssigned.insert(firstAssign.preds[i]);
+ }
- //
- // Step #3:
- // Assign predicates which prefix secondIndex to secondAssign and
- // have not already been assigned to firstAssign. Any newly
- // assigned predicates are added to 'predsAssigned'.
- //
- OneIndexAssignment secondAssign;
- secondAssign.index = it2->first;
- const vector<MatchExpression*>& preds = it2->second;
- for (size_t i = 0; i < preds.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(preds[i])) {
- secondAssign.preds.push_back(preds[i]);
- secondAssign.positions.push_back(0);
- predsAssigned.insert(preds[i]);
- }
+ //
+ // Step #3:
+ // Assign predicates which prefix secondIndex to secondAssign and
+ // have not already been assigned to firstAssign. Any newly
+ // assigned predicates are added to 'predsAssigned'.
+ //
+ OneIndexAssignment secondAssign;
+ secondAssign.index = it2->first;
+ const vector<MatchExpression*>& preds = it2->second;
+ for (size_t i = 0; i < preds.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(preds[i])) {
+ secondAssign.preds.push_back(preds[i]);
+ secondAssign.positions.push_back(0);
+ predsAssigned.insert(preds[i]);
}
+ }
- // Every predicate that would use this index is already assigned in
- // firstAssign.
- if (0 == secondAssign.preds.size()) { continue; }
+ // Every predicate that would use this index is already assigned in
+ // firstAssign.
+ if (0 == secondAssign.preds.size()) {
+ continue;
+ }
- //
- // Step #4:
- // Compound on firstAssign, if applicable.
- //
- IndexToPredMap::const_iterator firstIndexCompound =
- idxToNotFirst.find(firstAssign.index);
-
- // Can't compound with multikey indices.
- if (!ie1.multikey && firstIndexCompound != idxToNotFirst.end()) {
- // We must remove any elements of 'predsAssigned' from consideration.
- vector<MatchExpression*> tryCompound;
- const vector<MatchExpression*>& couldCompound
- = firstIndexCompound->second;
- for (size_t i = 0; i < couldCompound.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(couldCompound[i])) {
- tryCompound.push_back(couldCompound[i]);
- }
- }
- if (tryCompound.size()) {
- compound(tryCompound, ie1, &firstAssign);
+ //
+ // Step #4:
+ // Compound on firstAssign, if applicable.
+ //
+ IndexToPredMap::const_iterator firstIndexCompound =
+ idxToNotFirst.find(firstAssign.index);
+
+ // Can't compound with multikey indices.
+ if (!ie1.multikey && firstIndexCompound != idxToNotFirst.end()) {
+ // We must remove any elements of 'predsAssigned' from consideration.
+ vector<MatchExpression*> tryCompound;
+ const vector<MatchExpression*>& couldCompound = firstIndexCompound->second;
+ for (size_t i = 0; i < couldCompound.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(couldCompound[i])) {
+ tryCompound.push_back(couldCompound[i]);
}
}
-
- //
- // Step #5:
- // Make sure predicates assigned by compounding in step #4 do not get
- // assigned again.
- //
- for (size_t i = 0; i < firstAssign.preds.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(firstAssign.preds[i])) {
- predsAssigned.insert(firstAssign.preds[i]);
- }
+ if (tryCompound.size()) {
+ compound(tryCompound, ie1, &firstAssign);
}
+ }
- //
- // Step #6:
- // Compound on firstAssign, if applicable.
- //
- IndexToPredMap::const_iterator secondIndexCompound =
- idxToNotFirst.find(secondAssign.index);
-
- if (!ie2.multikey && secondIndexCompound != idxToNotFirst.end()) {
- // We must remove any elements of 'predsAssigned' from consideration.
- vector<MatchExpression*> tryCompound;
- const vector<MatchExpression*>& couldCompound
- = secondIndexCompound->second;
- for (size_t i = 0; i < couldCompound.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(couldCompound[i])) {
- tryCompound.push_back(couldCompound[i]);
- }
- }
- if (tryCompound.size()) {
- compound(tryCompound, ie2, &secondAssign);
- }
+ //
+ // Step #5:
+ // Make sure predicates assigned by compounding in step #4 do not get
+ // assigned again.
+ //
+ for (size_t i = 0; i < firstAssign.preds.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(firstAssign.preds[i])) {
+ predsAssigned.insert(firstAssign.preds[i]);
}
+ }
- // Add predicates in 'secondAssign' to the set of all assigned predicates.
- for (size_t i = 0; i < secondAssign.preds.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(secondAssign.preds[i])) {
- predsAssigned.insert(secondAssign.preds[i]);
+ //
+ // Step #6:
+ // Compound on firstAssign, if applicable.
+ //
+ IndexToPredMap::const_iterator secondIndexCompound =
+ idxToNotFirst.find(secondAssign.index);
+
+ if (!ie2.multikey && secondIndexCompound != idxToNotFirst.end()) {
+ // We must remove any elements of 'predsAssigned' from consideration.
+ vector<MatchExpression*> tryCompound;
+ const vector<MatchExpression*>& couldCompound = secondIndexCompound->second;
+ for (size_t i = 0; i < couldCompound.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(couldCompound[i])) {
+ tryCompound.push_back(couldCompound[i]);
}
}
-
- //
- // Step #7:
- // Make sure we haven't already assigned this set of predicates by compounding.
- // If we have, then bail out for this pair of indices.
- //
- if (alreadyCompounded(predsAssigned, andAssignment)) {
- // There is no need to add either 'firstAssign' or 'secondAssign'
- // to 'andAssignment' in this case because we have already performed
- // assignments to single indices in enumerateOneIndex(...).
- continue;
+ if (tryCompound.size()) {
+ compound(tryCompound, ie2, &secondAssign);
}
+ }
- // We're done with this particular pair of indices; output
- // the resulting assignments.
- AndEnumerableState state;
- state.assignments.push_back(firstAssign);
- state.assignments.push_back(secondAssign);
- andAssignment->choices.push_back(state);
+ // Add predicates in 'secondAssign' to the set of all assigned predicates.
+ for (size_t i = 0; i < secondAssign.preds.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(secondAssign.preds[i])) {
+ predsAssigned.insert(secondAssign.preds[i]);
+ }
}
- }
- // TODO: Do we just want one subnode at a time? We can use far more than 2 indices at once
- // doing this very easily. If we want to restrict the # of indices the children use, when
- // we memoize the subtree above we can restrict it to 1 index at a time. This can get
- // tricky if we want both an intersection and a 1-index memo entry, since our state change
- // is simple and we don't traverse the memo in any targeted way. Should also verify that
- // having a one-to-many mapping of MatchExpression to MemoID doesn't break anything. This
- // approach errors on the side of "too much indexing."
- for (size_t i = 0; i < subnodes.size(); ++i) {
- for (size_t j = i + 1; j < subnodes.size(); ++j) {
- AndEnumerableState state;
- state.subnodesToIndex.push_back(subnodes[i]);
- state.subnodesToIndex.push_back(subnodes[j]);
- andAssignment->choices.push_back(state);
+ //
+ // Step #7:
+ // Make sure we haven't already assigned this set of predicates by compounding.
+ // If we have, then bail out for this pair of indices.
+ //
+ if (alreadyCompounded(predsAssigned, andAssignment)) {
+ // There is no need to add either 'firstAssign' or 'secondAssign'
+ // to 'andAssignment' in this case because we have already performed
+ // assignments to single indices in enumerateOneIndex(...).
+ continue;
}
+
+ // We're done with this particular pair of indices; output
+ // the resulting assignments.
+ AndEnumerableState state;
+ state.assignments.push_back(firstAssign);
+ state.assignments.push_back(secondAssign);
+ andAssignment->choices.push_back(state);
}
}
- bool PlanEnumerator::partitionPreds(MatchExpression* node,
- PrepMemoContext context,
- vector<MatchExpression*>* indexOut,
- vector<MemoID>* subnodesOut,
- vector<MemoID>* mandatorySubnodes) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- MatchExpression* child = node->getChild(i);
- if (Indexability::nodeCanUseIndexOnOwnField(child)) {
- RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
- if (NULL != context.elemMatchExpr) {
- // If we're in an $elemMatch context, store the
- // innermost parent $elemMatch, as well as the
- // inner path prefix.
- rt->elemMatchExpr = context.elemMatchExpr;
- rt->pathPrefix = getPathPrefix(child->path().toString());
- }
- else {
- // We're not an $elemMatch context, so we should store
- // the prefix of the full path.
- rt->pathPrefix = getPathPrefix(rt->path);
- }
-
- // Output this as a pred that can use the index.
- indexOut->push_back(child);
- }
- else if (Indexability::isBoundsGeneratingNot(child)) {
- partitionPreds(child, context, indexOut, subnodesOut, mandatorySubnodes);
- }
- else if (MatchExpression::ELEM_MATCH_OBJECT == child->matchType()) {
- PrepMemoContext childContext;
- childContext.elemMatchExpr = child;
- partitionPreds(child, childContext, indexOut, subnodesOut, mandatorySubnodes);
- }
- else if (MatchExpression::AND == child->matchType()) {
- partitionPreds(child, context, indexOut, subnodesOut, mandatorySubnodes);
+ // TODO: Do we just want one subnode at a time? We can use far more than 2 indices at once
+ // doing this very easily. If we want to restrict the # of indices the children use, when
+ // we memoize the subtree above we can restrict it to 1 index at a time. This can get
+ // tricky if we want both an intersection and a 1-index memo entry, since our state change
+ // is simple and we don't traverse the memo in any targeted way. Should also verify that
+ // having a one-to-many mapping of MatchExpression to MemoID doesn't break anything. This
+ // approach errors on the side of "too much indexing."
+ for (size_t i = 0; i < subnodes.size(); ++i) {
+ for (size_t j = i + 1; j < subnodes.size(); ++j) {
+ AndEnumerableState state;
+ state.subnodesToIndex.push_back(subnodes[i]);
+ state.subnodesToIndex.push_back(subnodes[j]);
+ andAssignment->choices.push_back(state);
+ }
+ }
+}
+
+bool PlanEnumerator::partitionPreds(MatchExpression* node,
+ PrepMemoContext context,
+ vector<MatchExpression*>* indexOut,
+ vector<MemoID>* subnodesOut,
+ vector<MemoID>* mandatorySubnodes) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ MatchExpression* child = node->getChild(i);
+ if (Indexability::nodeCanUseIndexOnOwnField(child)) {
+ RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
+ if (NULL != context.elemMatchExpr) {
+ // If we're in an $elemMatch context, store the
+ // innermost parent $elemMatch, as well as the
+ // inner path prefix.
+ rt->elemMatchExpr = context.elemMatchExpr;
+ rt->pathPrefix = getPathPrefix(child->path().toString());
+ } else {
+ // We're not an $elemMatch context, so we should store
+ // the prefix of the full path.
+ rt->pathPrefix = getPathPrefix(rt->path);
}
- else {
- bool mandatory = expressionRequiresIndex(child);
- // Recursively prepMemo for the subnode. We fall through
- // to this case for logical nodes other than AND (e.g. OR).
- if (prepMemo(child, context)) {
- size_t childID = memoIDForNode(child);
-
- // Output the subnode.
- if (mandatory) {
- mandatorySubnodes->push_back(childID);
- }
- else {
- subnodesOut->push_back(childID);
- }
- }
- else if (mandatory) {
- // The subnode is mandatory but cannot be indexed. This means
- // that the entire AND cannot be indexed either.
- return false;
+ // Output this as a pred that can use the index.
+ indexOut->push_back(child);
+ } else if (Indexability::isBoundsGeneratingNot(child)) {
+ partitionPreds(child, context, indexOut, subnodesOut, mandatorySubnodes);
+ } else if (MatchExpression::ELEM_MATCH_OBJECT == child->matchType()) {
+ PrepMemoContext childContext;
+ childContext.elemMatchExpr = child;
+ partitionPreds(child, childContext, indexOut, subnodesOut, mandatorySubnodes);
+ } else if (MatchExpression::AND == child->matchType()) {
+ partitionPreds(child, context, indexOut, subnodesOut, mandatorySubnodes);
+ } else {
+ bool mandatory = expressionRequiresIndex(child);
+
+ // Recursively prepMemo for the subnode. We fall through
+ // to this case for logical nodes other than AND (e.g. OR).
+ if (prepMemo(child, context)) {
+ size_t childID = memoIDForNode(child);
+
+ // Output the subnode.
+ if (mandatory) {
+ mandatorySubnodes->push_back(childID);
+ } else {
+ subnodesOut->push_back(childID);
}
+ } else if (mandatory) {
+ // The subnode is mandatory but cannot be indexed. This means
+ // that the entire AND cannot be indexed either.
+ return false;
}
}
-
- return true;
}
- void PlanEnumerator::getMultikeyCompoundablePreds(const vector<MatchExpression*>& assigned,
- const vector<MatchExpression*>& couldCompound,
- vector<MatchExpression*>* out) {
- // Map from a particular $elemMatch expression to the set of prefixes
- // used so far by the predicates inside the $elemMatch. For example,
- // {a: {$elemMatch: {b: 1, c: 2}}} would map to the set {'b', 'c'} at
- // the end of this function's execution.
- //
- // NULL maps to the set of prefixes used so far outside of an $elemMatch
- // context.
- //
- // As we iterate over the available indexed predicates, we keep track
- // of the used prefixes both inside and outside of an $elemMatch context.
- unordered_map<MatchExpression*, set<string> > used;
-
- // Initialize 'used' with the starting predicates in 'assigned'. Begin by
- // initializing the top-level scope with the prefix of the full path.
- for (size_t i = 0; i < assigned.size(); i++) {
- const MatchExpression* assignedPred = assigned[i];
- invariant(NULL != assignedPred->getTag());
- RelevantTag* usedRt = static_cast<RelevantTag*>(assignedPred->getTag());
- set<string> usedPrefixes;
- usedPrefixes.insert(getPathPrefix(usedRt->path));
- used[NULL] = usedPrefixes;
-
- // If 'assigned' is a predicate inside an $elemMatch, we have to
- // add the prefix not only to the top-level context, but also to the
- // the $elemMatch context. For example, if 'assigned' is {a: {$elemMatch: {b: 1}}},
- // then we will have already added "a" to the set for NULL. We now
- // also need to add "b" to the set for the $elemMatch.
- if (NULL != usedRt->elemMatchExpr) {
- set<string> elemMatchUsed;
- // Whereas getPathPrefix(usedRt->path) is the prefix of the full path,
- // usedRt->pathPrefix contains the prefix of the portion of the
- // path that is inside the $elemMatch. These two prefixes are the same
- // in the top-level context, but here must be different because 'usedRt'
- // is in an $elemMatch context.
- elemMatchUsed.insert(usedRt->pathPrefix);
- used[usedRt->elemMatchExpr] = elemMatchUsed;
- }
- }
+ return true;
+}
- for (size_t i = 0; i < couldCompound.size(); ++i) {
- invariant(Indexability::nodeCanUseIndexOnOwnField(couldCompound[i]));
- RelevantTag* rt = static_cast<RelevantTag*>(couldCompound[i]->getTag());
-
- if (used.end() == used.find(rt->elemMatchExpr)) {
- // This is a new $elemMatch that we haven't seen before.
- invariant(used.end() != used.find(NULL));
- set<string>& topLevelUsed = used.find(NULL)->second;
-
- // If the top-level path prefix of the $elemMatch hasn't been
- // used yet, couldCompound[i] is safe to compound.
- if (topLevelUsed.end() == topLevelUsed.find(getPathPrefix(rt->path))) {
- topLevelUsed.insert(getPathPrefix(rt->path));
- set<string> usedPrefixes;
- usedPrefixes.insert(rt->pathPrefix);
- used[rt->elemMatchExpr] = usedPrefixes;
-
- // Output the predicate.
- out->push_back(couldCompound[i]);
- }
+void PlanEnumerator::getMultikeyCompoundablePreds(const vector<MatchExpression*>& assigned,
+ const vector<MatchExpression*>& couldCompound,
+ vector<MatchExpression*>* out) {
+ // Map from a particular $elemMatch expression to the set of prefixes
+ // used so far by the predicates inside the $elemMatch. For example,
+ // {a: {$elemMatch: {b: 1, c: 2}}} would map to the set {'b', 'c'} at
+ // the end of this function's execution.
+ //
+ // NULL maps to the set of prefixes used so far outside of an $elemMatch
+ // context.
+ //
+ // As we iterate over the available indexed predicates, we keep track
+ // of the used prefixes both inside and outside of an $elemMatch context.
+ unordered_map<MatchExpression*, set<string>> used;
+
+ // Initialize 'used' with the starting predicates in 'assigned'. Begin by
+ // initializing the top-level scope with the prefix of the full path.
+ for (size_t i = 0; i < assigned.size(); i++) {
+ const MatchExpression* assignedPred = assigned[i];
+ invariant(NULL != assignedPred->getTag());
+ RelevantTag* usedRt = static_cast<RelevantTag*>(assignedPred->getTag());
+ set<string> usedPrefixes;
+ usedPrefixes.insert(getPathPrefix(usedRt->path));
+ used[NULL] = usedPrefixes;
+
+ // If 'assigned' is a predicate inside an $elemMatch, we have to
+ // add the prefix not only to the top-level context, but also to the
+ // the $elemMatch context. For example, if 'assigned' is {a: {$elemMatch: {b: 1}}},
+ // then we will have already added "a" to the set for NULL. We now
+ // also need to add "b" to the set for the $elemMatch.
+ if (NULL != usedRt->elemMatchExpr) {
+ set<string> elemMatchUsed;
+ // Whereas getPathPrefix(usedRt->path) is the prefix of the full path,
+ // usedRt->pathPrefix contains the prefix of the portion of the
+ // path that is inside the $elemMatch. These two prefixes are the same
+ // in the top-level context, but here must be different because 'usedRt'
+ // is in an $elemMatch context.
+ elemMatchUsed.insert(usedRt->pathPrefix);
+ used[usedRt->elemMatchExpr] = elemMatchUsed;
+ }
+ }
+ for (size_t i = 0; i < couldCompound.size(); ++i) {
+ invariant(Indexability::nodeCanUseIndexOnOwnField(couldCompound[i]));
+ RelevantTag* rt = static_cast<RelevantTag*>(couldCompound[i]->getTag());
+
+ if (used.end() == used.find(rt->elemMatchExpr)) {
+ // This is a new $elemMatch that we haven't seen before.
+ invariant(used.end() != used.find(NULL));
+ set<string>& topLevelUsed = used.find(NULL)->second;
+
+ // If the top-level path prefix of the $elemMatch hasn't been
+ // used yet, couldCompound[i] is safe to compound.
+ if (topLevelUsed.end() == topLevelUsed.find(getPathPrefix(rt->path))) {
+ topLevelUsed.insert(getPathPrefix(rt->path));
+ set<string> usedPrefixes;
+ usedPrefixes.insert(rt->pathPrefix);
+ used[rt->elemMatchExpr] = usedPrefixes;
+
+ // Output the predicate.
+ out->push_back(couldCompound[i]);
}
- else {
- // We've seen this $elemMatch before, or the predicate is
- // top-level (not in an $elemMatch context). If the prefix stored
- // in the tag has not been used yet, then couldCompound[i] is
- // safe to compound.
- set<string>& usedPrefixes = used.find(rt->elemMatchExpr)->second;
- if (usedPrefixes.end() == usedPrefixes.find(rt->pathPrefix)) {
- usedPrefixes.insert(rt->pathPrefix);
-
- // Output the predicate.
- out->push_back(couldCompound[i]);
- }
+
+ } else {
+ // We've seen this $elemMatch before, or the predicate is
+ // top-level (not in an $elemMatch context). If the prefix stored
+ // in the tag has not been used yet, then couldCompound[i] is
+ // safe to compound.
+ set<string>& usedPrefixes = used.find(rt->elemMatchExpr)->second;
+ if (usedPrefixes.end() == usedPrefixes.find(rt->pathPrefix)) {
+ usedPrefixes.insert(rt->pathPrefix);
+
+ // Output the predicate.
+ out->push_back(couldCompound[i]);
}
}
}
+}
- bool PlanEnumerator::alreadyCompounded(const set<MatchExpression*>& ixisectAssigned,
- const AndAssignment* andAssignment) {
- for (size_t i = 0; i < andAssignment->choices.size(); ++i) {
- const AndEnumerableState& state = andAssignment->choices[i];
+bool PlanEnumerator::alreadyCompounded(const set<MatchExpression*>& ixisectAssigned,
+ const AndAssignment* andAssignment) {
+ for (size_t i = 0; i < andAssignment->choices.size(); ++i) {
+ const AndEnumerableState& state = andAssignment->choices[i];
- // We cannot have assigned this set of predicates already by
- // compounding unless this is an assignment to a single index.
- if (state.assignments.size() != 1) {
- continue;
- }
+ // We cannot have assigned this set of predicates already by
+ // compounding unless this is an assignment to a single index.
+ if (state.assignments.size() != 1) {
+ continue;
+ }
- // If the set of preds in 'ixisectAssigned' is a subset of 'oneAssign.preds',
- // then all the preds can be used by compounding on a single index.
- const OneIndexAssignment& oneAssign = state.assignments[0];
+ // If the set of preds in 'ixisectAssigned' is a subset of 'oneAssign.preds',
+ // then all the preds can be used by compounding on a single index.
+ const OneIndexAssignment& oneAssign = state.assignments[0];
- // If 'ixisectAssigned' is larger than 'oneAssign.preds', then
- // it can't be a subset.
- if (ixisectAssigned.size() > oneAssign.preds.size()) {
- continue;
- }
-
- // Check for subset by counting the number of elements in 'oneAssign.preds'
- // that are contained in 'ixisectAssigned'. The elements of both 'oneAssign.preds'
- // and 'ixisectAssigned' are unique (no repeated elements).
- size_t count = 0;
- for (size_t j = 0; j < oneAssign.preds.size(); ++j) {
- if (ixisectAssigned.end() != ixisectAssigned.find(oneAssign.preds[j])) {
- ++count;
- }
- }
+ // If 'ixisectAssigned' is larger than 'oneAssign.preds', then
+ // it can't be a subset.
+ if (ixisectAssigned.size() > oneAssign.preds.size()) {
+ continue;
+ }
- if (ixisectAssigned.size() == count) {
- return true;
+ // Check for subset by counting the number of elements in 'oneAssign.preds'
+ // that are contained in 'ixisectAssigned'. The elements of both 'oneAssign.preds'
+ // and 'ixisectAssigned' are unique (no repeated elements).
+ size_t count = 0;
+ for (size_t j = 0; j < oneAssign.preds.size(); ++j) {
+ if (ixisectAssigned.end() != ixisectAssigned.find(oneAssign.preds[j])) {
+ ++count;
}
+ }
- // We cannot assign the preds by compounding on 'oneAssign'.
- // Move on to the next index.
+ if (ixisectAssigned.size() == count) {
+ return true;
}
- return false;
+ // We cannot assign the preds by compounding on 'oneAssign'.
+ // Move on to the next index.
}
- void PlanEnumerator::compound(const vector<MatchExpression*>& tryCompound,
- const IndexEntry& thisIndex,
- OneIndexAssignment* assign) {
- // Let's try to match up the expressions in 'compExprs' with the
- // fields in the index key pattern.
- BSONObjIterator kpIt(thisIndex.keyPattern);
-
- // Skip the first elt as it's already assigned.
- kpIt.next();
-
- // When we compound we store the field number that the predicate
- // goes over in order to avoid having to iterate again and compare
- // field names.
- size_t posInIdx = 0;
-
- while (kpIt.more()) {
- BSONElement keyElt = kpIt.next();
- ++posInIdx;
-
- // Go through 'tryCompound' to see if there is a compoundable
- // predicate for 'keyElt'. If there is nothing to compound, then
- // simply move on to the next field in the compound index. We
- // do not enforce that fields are assigned contiguously from
- // right to left, i.e. for compound index {a: 1, b: 1, c: 1}
- // it is okay to compound predicates over "a" and "c", skipping "b".
- for (size_t j = 0; j < tryCompound.size(); ++j) {
- MatchExpression* maybe = tryCompound[j];
- // Sigh we grab the full path from the relevant tag.
- RelevantTag* rt = static_cast<RelevantTag*>(maybe->getTag());
- if (keyElt.fieldName() == rt->path) {
- // preds and positions are parallel arrays.
- assign->preds.push_back(maybe);
- assign->positions.push_back(posInIdx);
- }
+ return false;
+}
+
+void PlanEnumerator::compound(const vector<MatchExpression*>& tryCompound,
+ const IndexEntry& thisIndex,
+ OneIndexAssignment* assign) {
+ // Let's try to match up the expressions in 'compExprs' with the
+ // fields in the index key pattern.
+ BSONObjIterator kpIt(thisIndex.keyPattern);
+
+ // Skip the first elt as it's already assigned.
+ kpIt.next();
+
+ // When we compound we store the field number that the predicate
+ // goes over in order to avoid having to iterate again and compare
+ // field names.
+ size_t posInIdx = 0;
+
+ while (kpIt.more()) {
+ BSONElement keyElt = kpIt.next();
+ ++posInIdx;
+
+ // Go through 'tryCompound' to see if there is a compoundable
+ // predicate for 'keyElt'. If there is nothing to compound, then
+ // simply move on to the next field in the compound index. We
+ // do not enforce that fields are assigned contiguously from
+ // right to left, i.e. for compound index {a: 1, b: 1, c: 1}
+ // it is okay to compound predicates over "a" and "c", skipping "b".
+ for (size_t j = 0; j < tryCompound.size(); ++j) {
+ MatchExpression* maybe = tryCompound[j];
+ // Sigh we grab the full path from the relevant tag.
+ RelevantTag* rt = static_cast<RelevantTag*>(maybe->getTag());
+ if (keyElt.fieldName() == rt->path) {
+ // preds and positions are parallel arrays.
+ assign->preds.push_back(maybe);
+ assign->positions.push_back(posInIdx);
}
}
}
+}
+
+//
+// Structure navigation
+//
+
+void PlanEnumerator::tagMemo(size_t id) {
+ LOG(5) << "Tagging memoID " << id << endl;
+ NodeAssignment* assign = _memo[id];
+ verify(NULL != assign);
+
+ if (NULL != assign->pred) {
+ PredicateAssignment* pa = assign->pred.get();
+ verify(NULL == pa->expr->getTag());
+ verify(pa->indexToAssign < pa->first.size());
+ pa->expr->setTag(new IndexTag(pa->first[pa->indexToAssign]));
+ } else if (NULL != assign->orAssignment) {
+ OrAssignment* oa = assign->orAssignment.get();
+ for (size_t i = 0; i < oa->subnodes.size(); ++i) {
+ tagMemo(oa->subnodes[i]);
+ }
+ } else if (NULL != assign->arrayAssignment) {
+ ArrayAssignment* aa = assign->arrayAssignment.get();
+ tagMemo(aa->subnodes[aa->counter]);
+ } else if (NULL != assign->andAssignment) {
+ AndAssignment* aa = assign->andAssignment.get();
+ verify(aa->counter < aa->choices.size());
- //
- // Structure navigation
- //
-
- void PlanEnumerator::tagMemo(size_t id) {
- LOG(5) << "Tagging memoID " << id << endl;
- NodeAssignment* assign = _memo[id];
- verify(NULL != assign);
+ const AndEnumerableState& aes = aa->choices[aa->counter];
- if (NULL != assign->pred) {
- PredicateAssignment* pa = assign->pred.get();
- verify(NULL == pa->expr->getTag());
- verify(pa->indexToAssign < pa->first.size());
- pa->expr->setTag(new IndexTag(pa->first[pa->indexToAssign]));
- }
- else if (NULL != assign->orAssignment) {
- OrAssignment* oa = assign->orAssignment.get();
- for (size_t i = 0; i < oa->subnodes.size(); ++i) {
- tagMemo(oa->subnodes[i]);
- }
- }
- else if (NULL != assign->arrayAssignment) {
- ArrayAssignment* aa = assign->arrayAssignment.get();
- tagMemo(aa->subnodes[aa->counter]);
+ for (size_t j = 0; j < aes.subnodesToIndex.size(); ++j) {
+ tagMemo(aes.subnodesToIndex[j]);
}
- else if (NULL != assign->andAssignment) {
- AndAssignment* aa = assign->andAssignment.get();
- verify(aa->counter < aa->choices.size());
- const AndEnumerableState& aes = aa->choices[aa->counter];
+ for (size_t i = 0; i < aes.assignments.size(); ++i) {
+ const OneIndexAssignment& assign = aes.assignments[i];
- for (size_t j = 0; j < aes.subnodesToIndex.size(); ++j) {
- tagMemo(aes.subnodesToIndex[j]);
+ for (size_t j = 0; j < assign.preds.size(); ++j) {
+ MatchExpression* pred = assign.preds[j];
+ verify(NULL == pred->getTag());
+ pred->setTag(new IndexTag(assign.index, assign.positions[j]));
}
-
- for (size_t i = 0; i < aes.assignments.size(); ++i) {
- const OneIndexAssignment& assign = aes.assignments[i];
-
- for (size_t j = 0; j < assign.preds.size(); ++j) {
- MatchExpression* pred = assign.preds[j];
- verify(NULL == pred->getTag());
- pred->setTag(new IndexTag(assign.index, assign.positions[j]));
- }
- }
- }
- else {
- verify(0);
}
+ } else {
+ verify(0);
}
+}
- bool PlanEnumerator::nextMemo(size_t id) {
- NodeAssignment* assign = _memo[id];
- verify(NULL != assign);
+bool PlanEnumerator::nextMemo(size_t id) {
+ NodeAssignment* assign = _memo[id];
+ verify(NULL != assign);
- if (NULL != assign->pred) {
- PredicateAssignment* pa = assign->pred.get();
- pa->indexToAssign++;
- if (pa->indexToAssign >= pa->first.size()) {
- pa->indexToAssign = 0;
- return true;
- }
- return false;
+ if (NULL != assign->pred) {
+ PredicateAssignment* pa = assign->pred.get();
+ pa->indexToAssign++;
+ if (pa->indexToAssign >= pa->first.size()) {
+ pa->indexToAssign = 0;
+ return true;
}
- else if (NULL != assign->orAssignment) {
- OrAssignment* oa = assign->orAssignment.get();
-
- // Limit the number of OR enumerations
- oa->counter++;
- if (oa->counter >= _orLimit) {
- return true;
- }
+ return false;
+ } else if (NULL != assign->orAssignment) {
+ OrAssignment* oa = assign->orAssignment.get();
- // OR just walks through telling its children to
- // move forward.
- for (size_t i = 0; i < oa->subnodes.size(); ++i) {
- // If there's no carry, we just stop. If there's a carry, we move the next child
- // forward.
- if (!nextMemo(oa->subnodes[i])) {
- return false;
- }
- }
- // If we're here, the last subnode had a carry, therefore the OR has a carry.
+ // Limit the number of OR enumerations
+ oa->counter++;
+ if (oa->counter >= _orLimit) {
return true;
}
- else if (NULL != assign->arrayAssignment) {
- ArrayAssignment* aa = assign->arrayAssignment.get();
- // moving to next on current subnode is OK
- if (!nextMemo(aa->subnodes[aa->counter])) { return false; }
- // Move to next subnode.
- ++aa->counter;
- if (aa->counter < aa->subnodes.size()) {
+
+ // OR just walks through telling its children to
+ // move forward.
+ for (size_t i = 0; i < oa->subnodes.size(); ++i) {
+ // If there's no carry, we just stop. If there's a carry, we move the next child
+ // forward.
+ if (!nextMemo(oa->subnodes[i])) {
return false;
}
- aa->counter = 0;
- return true;
}
- else if (NULL != assign->andAssignment) {
- AndAssignment* aa = assign->andAssignment.get();
-
- // One of our subnodes might have to move on to its next enumeration state.
- const AndEnumerableState& aes = aa->choices[aa->counter];
- for (size_t i = 0; i < aes.subnodesToIndex.size(); ++i) {
- if (!nextMemo(aes.subnodesToIndex[i])) {
- return false;
- }
- }
+ // If we're here, the last subnode had a carry, therefore the OR has a carry.
+ return true;
+ } else if (NULL != assign->arrayAssignment) {
+ ArrayAssignment* aa = assign->arrayAssignment.get();
+ // moving to next on current subnode is OK
+ if (!nextMemo(aa->subnodes[aa->counter])) {
+ return false;
+ }
+ // Move to next subnode.
+ ++aa->counter;
+ if (aa->counter < aa->subnodes.size()) {
+ return false;
+ }
+ aa->counter = 0;
+ return true;
+ } else if (NULL != assign->andAssignment) {
+ AndAssignment* aa = assign->andAssignment.get();
- // None of the subnodes had another enumeration state, so we move on to the
- // next top-level choice.
- ++aa->counter;
- if (aa->counter < aa->choices.size()) {
+ // One of our subnodes might have to move on to its next enumeration state.
+ const AndEnumerableState& aes = aa->choices[aa->counter];
+ for (size_t i = 0; i < aes.subnodesToIndex.size(); ++i) {
+ if (!nextMemo(aes.subnodesToIndex[i])) {
return false;
}
- aa->counter = 0;
- return true;
}
- // This shouldn't happen.
- verify(0);
- return false;
+ // None of the subnodes had another enumeration state, so we move on to the
+ // next top-level choice.
+ ++aa->counter;
+ if (aa->counter < aa->choices.size()) {
+ return false;
+ }
+ aa->counter = 0;
+ return true;
}
-} // namespace mongo
+ // This shouldn't happen.
+ verify(0);
+ return false;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_enumerator.h b/src/mongo/db/query/plan_enumerator.h
index 39f0d697145..f12dde08897 100644
--- a/src/mongo/db/query/plan_enumerator.h
+++ b/src/mongo/db/query/plan_enumerator.h
@@ -39,409 +39,409 @@
namespace mongo {
- struct PlanEnumeratorParams {
+struct PlanEnumeratorParams {
+ PlanEnumeratorParams()
+ : intersect(false),
+ maxSolutionsPerOr(internalQueryEnumerationMaxOrSolutions),
+ maxIntersectPerAnd(internalQueryEnumerationMaxIntersectPerAnd) {}
- PlanEnumeratorParams() : intersect(false),
- maxSolutionsPerOr(internalQueryEnumerationMaxOrSolutions),
- maxIntersectPerAnd(internalQueryEnumerationMaxIntersectPerAnd) { }
+ // Do we provide solutions that use more indices than the minimum required to provide
+ // an indexed solution?
+ bool intersect;
- // Do we provide solutions that use more indices than the minimum required to provide
- // an indexed solution?
- bool intersect;
+ // Not owned here.
+ MatchExpression* root;
- // Not owned here.
- MatchExpression* root;
+ // Not owned here.
+ const std::vector<IndexEntry>* indices;
+
+ // How many plans are we willing to ouput from an OR? We currently consider
+ // all possibly OR plans, which means the product of the number of possibilities
+ // for each clause of the OR. This could grow disastrously large.
+ size_t maxSolutionsPerOr;
+
+ // How many intersect plans are we willing to output from an AND? Given that we pursue an
+ // all-pairs approach, we could wind up creating a lot of enumeration possibilities for
+ // certain inputs.
+ size_t maxIntersectPerAnd;
+};
+
+/**
+ * Provides elements from the power set of possible indices to use. Uses the available
+ * predicate information to make better decisions about what indices are best.
+ */
+class PlanEnumerator {
+ MONGO_DISALLOW_COPYING(PlanEnumerator);
+
+public:
+ /**
+ * Constructs an enumerator for the query specified in 'root' which is tagged with
+ * RelevantTag(s). The index patterns mentioned in the tags are described by 'indices'.
+ *
+ * Does not take ownership of any arguments. They must outlive any calls to getNext(...).
+ */
+ PlanEnumerator(const PlanEnumeratorParams& params);
+
+ ~PlanEnumerator();
+
+ /**
+ * Returns OK and performs a sanity check on the input parameters and prepares the
+ * internal state so that getNext() can be called. Returns an error status with a
+ * description if the sanity check failed.
+ */
+ Status init();
+
+ /**
+ * Outputs a possible plan. Leaves in the plan are tagged with an index to use.
+ * Returns true if a plan was outputted, false if no more plans will be outputted.
+ *
+ * 'tree' is set to point to the query tree. A QueryAssignment is built from this tree.
+ * Caller owns the pointer. Note that 'tree' itself points into data owned by the
+ * provided CanonicalQuery.
+ *
+ * Nodes in 'tree' are tagged with indices that should be used to answer the tagged nodes.
+ * Only nodes that have a field name (isLogical() == false) will be tagged.
+ */
+ bool getNext(MatchExpression** tree);
+
+private:
+ //
+ // Memoization strategy
+ //
+
+
+ // Everything is really a size_t but it's far more readable to impose a type via typedef.
+
+ // An ID we use to index into _memo. An entry in _memo is a NodeAssignment.
+ typedef size_t MemoID;
+
+ // An index in _indices.
+ typedef size_t IndexID;
+
+ // The position of a field in a possibly compound index.
+ typedef size_t IndexPosition;
+
+ struct PrepMemoContext {
+ PrepMemoContext() : elemMatchExpr(NULL) {}
+ MatchExpression* elemMatchExpr;
+ };
+
+ /**
+ * Traverses the match expression and generates the memo structure from it.
+ * Returns true if the provided node uses an index, false otherwise.
+ */
+ bool prepMemo(MatchExpression* node, PrepMemoContext context);
+
+ /**
+ * Traverses the memo structure and annotates the tree with IndexTags for the chosen
+ * indices.
+ */
+ void tagMemo(MemoID id);
+
+ /**
+ * Move to the next enumeration state. Each assignment stores its own enumeration state.
+ * See the various ____Assignment classes below for details on enumeration state.
+ *
+ * Returns true if the memo subtree with root 'node' has no further enumeration states. In
+ * this case, that subtree restarts its enumeration at the beginning state. This implies
+ * that the parent of node should move to the next state. If 'node' is the root of the
+ * tree, we are done with enumeration.
+ *
+ * The return of this function can be thought of like a 'carry' in addition.
+ *
+ * Returns false if the memo subtree has moved to the next state.
+ */
+ bool nextMemo(MemoID id);
+ /**
+ * A short word on the memo structure.
+ *
+ * The PlanEnumerator is interested in matching predicates and indices. Predicates
+ * are leaf nodes in the parse tree. {x:5}, {x: {$geoWithin:...}} are both predicates.
+ *
+ * When we have simple predicates, like {x:5}, the task is easy: any indices prefixed
+ * with 'x' can be used to answer the predicate. This is where the PredicateAssignment
+ * is used.
+ *
+ * With logical operators, things are more complicated. Let's start with OR, the simplest.
+ * Since the output of an OR is the union of its results, each of its children must be
+ * indexed for the entire OR to be indexed. If each subtree of an OR is indexable, the
+ * OR is as well.
+ *
+ * For an AND to be indexed, only one of its children must be indexed. AND is an
+ * intersection of its children, so each of its children describes a superset of the
+ * produced results.
+ */
+
+ struct PredicateAssignment {
+ PredicateAssignment() : indexToAssign(0) {}
+
+ std::vector<IndexID> first;
// Not owned here.
- const std::vector<IndexEntry>* indices;
+ MatchExpression* expr;
+
+ // Enumeration state. An indexed predicate's possible states are the indices that the
+ // predicate can directly use (the 'first' indices). As such this value ranges from 0
+ // to first.size()-1 inclusive.
+ size_t indexToAssign;
+ };
+
+ struct OrAssignment {
+ OrAssignment() : counter(0) {}
+
+ // Each child of an OR must be indexed for the OR to be indexed. When an OR moves to a
+ // subsequent state it just asks all its children to move their states forward.
- // How many plans are we willing to ouput from an OR? We currently consider
- // all possibly OR plans, which means the product of the number of possibilities
- // for each clause of the OR. This could grow disastrously large.
- size_t maxSolutionsPerOr;
+ // Must use all of subnodes.
+ std::vector<MemoID> subnodes;
- // How many intersect plans are we willing to output from an AND? Given that we pursue an
- // all-pairs approach, we could wind up creating a lot of enumeration possibilities for
- // certain inputs.
- size_t maxIntersectPerAnd;
+ // The number of OR states that we've enumerated so far.
+ size_t counter;
};
+ // This is used by AndAssignment and is not an actual assignment.
+ struct OneIndexAssignment {
+ // 'preds[i]' is uses index 'index' at position 'positions[i]'
+ std::vector<MatchExpression*> preds;
+ std::vector<IndexPosition> positions;
+ IndexID index;
+ };
+
+ struct AndEnumerableState {
+ std::vector<OneIndexAssignment> assignments;
+ std::vector<MemoID> subnodesToIndex;
+ };
+
+ struct AndAssignment {
+ AndAssignment() : counter(0) {}
+
+ std::vector<AndEnumerableState> choices;
+
+ // We're on the counter-th member of state.
+ size_t counter;
+ };
+
+ struct ArrayAssignment {
+ ArrayAssignment() : counter(0) {}
+ std::vector<MemoID> subnodes;
+ size_t counter;
+ };
+
+ /**
+ * Associates indices with predicates.
+ */
+ struct NodeAssignment {
+ std::unique_ptr<PredicateAssignment> pred;
+ std::unique_ptr<OrAssignment> orAssignment;
+ std::unique_ptr<AndAssignment> andAssignment;
+ std::unique_ptr<ArrayAssignment> arrayAssignment;
+ std::string toString() const;
+ };
+
+ /**
+ * Allocates a NodeAssignment and associates it with the provided 'expr'.
+ *
+ * The unique MemoID of the new assignment is outputted in '*id'.
+ * The out parameter '*slot' points to the newly allocated NodeAssignment.
+ */
+ void allocateAssignment(MatchExpression* expr, NodeAssignment** slot, MemoID* id);
+
+ /**
+ * Predicates inside $elemMatch's that are semantically "$and of $and"
+ * predicates are not rewritten to the top-level during normalization.
+ * However, we would like to make predicates inside $elemMatch available
+ * for combining index bounds with the top-level $and predicates.
+ *
+ * This function deeply traverses $and and $elemMatch expressions of
+ * the tree rooted at 'node', adding all preds that can use an index
+ * to the output vector 'indexOut'. At the same time, $elemMatch
+ * context information is stashed in the tags so that we don't lose
+ * information due to flattening.
+ *
+ * Nodes that cannot be deeply traversed are returned via the output
+ * vectors 'subnodesOut' and 'mandatorySubnodes'. Subnodes are "mandatory"
+ * if they *must* use an index (TEXT and GEO).
+ *
+ * Does not take ownership of arguments.
+ *
+ * Returns false if the AND cannot be indexed. Otherwise returns true.
+ */
+ bool partitionPreds(MatchExpression* node,
+ PrepMemoContext context,
+ std::vector<MatchExpression*>* indexOut,
+ std::vector<MemoID>* subnodesOut,
+ std::vector<MemoID>* mandatorySubnodes);
+
+ /**
+ * Finds a set of predicates that can be safely compounded with the set
+ * of predicates in 'assigned', under the assumption that we are assigning
+ * predicates to a compound, multikey index.
+ *
+ * The list of candidate predicates that we could compound is passed
+ * in 'couldCompound'. A subset of these predicates that is safe to
+ * combine by compounding is returned in the out-parameter 'out'.
+ *
+ * Does not take ownership of its arguments.
+ *
+ * The rules for when to compound for multikey indices are reasonably
+ * complex, and are dependent on the structure of $elemMatch's used
+ * in the query. Ignoring $elemMatch for the time being, the rule is this:
+ *
+ * "Any set of predicates for which no two predicates share a path
+ * prefix can be compounded."
+ *
+ * Suppose we have predicates over paths 'a.b' and 'a.c'. These cannot
+ * be compounded because they share the prefix 'a'. Similarly, the bounds
+ * for 'a' and 'a.b' cannot be compounded (in the case of multikey index
+ * {a: 1, 'a.b': 1}). You *can* compound predicates over the paths 'a.b.c',
+ * 'd', and 'e.b.c', because there is no shared prefix.
+ *
+ * The rules are different in the presence of $elemMatch. For $elemMatch
+ * {a: {$elemMatch: {<pred1>, ..., <predN>}}}, we are allowed to compound
+ * bounds for pred1 through predN, even though these predicates share the
+ * path prefix 'a'. However, we still cannot compound in the case of
+ * {a: {$elemMatch: {'b.c': {$gt: 1}, 'b.d': 5}}} because 'b.c' and 'b.d'
+ * share a prefix. In other words, what matters inside an $elemMatch is not
+ * the absolute prefix, but rather the "relative prefix" after the shared
+ * $elemMatch part of the path.
+ *
+ * A few more examples:
+ * 1) {'a.b': {$elemMatch: {c: {$gt: 1}, d: 5}}}. In this case, we can
+ * compound, because the $elemMatch is applied to the shared part of
+ * the path 'a.b'.
+ *
+ * 2) {'a.b': 1, a: {$elemMatch: {b: {$gt: 0}}}}. We cannot combine the
+ * bounds here because the prefix 'a' is shared by two predicates which
+ * are not joined together by an $elemMatch.
+ *
+ * NOTE:
+ * Usually 'assigned' has just one predicate. However, in order to support
+ * mandatory predicate assignment (TEXT and GEO_NEAR), we allow multiple
+ * already-assigned predicates to be passed. If a mandatory predicate is over
+ * a trailing field in a multikey compound index, then we assign both a predicate
+ * over the leading field as well as the mandatory predicate prior to calling
+ * this function.
+ *
+ * Ex:
+ * Say we have index {a: 1, b: 1, c: "2dsphere", d: 1} as well as a $near
+ * predicate and a $within predicate over "c". The $near predicate is mandatory
+ * and must be assigned. The $within predicate is not mandatory. Furthermore,
+ * it cannot be assigned in addition to the $near predicate because the index
+ * is multikey.
+ *
+ * In this case the enumerator must assign the $near predicate, and pass it in
+ * in 'assigned'. Otherwise it would be possible to assign the $within predicate,
+ * and then not assign the $near because the $within is already assigned (and
+ * has the same path).
+ */
+ void getMultikeyCompoundablePreds(const std::vector<MatchExpression*>& assigned,
+ const std::vector<MatchExpression*>& couldCompound,
+ std::vector<MatchExpression*>* out);
+
+ /**
+ * 'andAssignment' contains assignments that we've already committed to outputting,
+ * including both single index assignments and ixisect assignments.
+ *
+ * 'ixisectAssigned' is a set of predicates that we are about to add to 'andAssignment'
+ * as an index intersection assignment.
+ *
+ * Returns true if an single index assignment which is already in 'andAssignment'
+ * contains a superset of the predicates in 'ixisectAssigned'. This means that we
+ * can assign the same preds to a compound index rather than using index intersection.
+ *
+ * Ex.
+ * Suppose we have indices {a: 1}, {b: 1}, and {a: 1, b: 1} with query
+ * {a: 2, b: 2}. When we try to intersect {a: 1} and {b: 1} the predicates
+ * a==2 and b==2 will get assigned to respective indices. But then we will
+ * call this function with ixisectAssigned equal to the set {'a==2', 'b==2'},
+ * and notice that we have already assigned this same set of predicates to
+ * the single index {a: 1, b: 1} via compounding.
+ */
+ bool alreadyCompounded(const std::set<MatchExpression*>& ixisectAssigned,
+ const AndAssignment* andAssignment);
+ /**
+ * Output index intersection assignments inside of an AND node.
+ */
+ typedef unordered_map<IndexID, std::vector<MatchExpression*>> IndexToPredMap;
+
/**
- * Provides elements from the power set of possible indices to use. Uses the available
- * predicate information to make better decisions about what indices are best.
+ * Generate index intersection assignments given the predicate/index structure in idxToFirst
+ * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs the assignments in
+ * 'andAssignment'.
*/
- class PlanEnumerator {
- MONGO_DISALLOW_COPYING(PlanEnumerator);
- public:
- /**
- * Constructs an enumerator for the query specified in 'root' which is tagged with
- * RelevantTag(s). The index patterns mentioned in the tags are described by 'indices'.
- *
- * Does not take ownership of any arguments. They must outlive any calls to getNext(...).
- */
- PlanEnumerator(const PlanEnumeratorParams& params);
-
- ~PlanEnumerator();
-
- /**
- * Returns OK and performs a sanity check on the input parameters and prepares the
- * internal state so that getNext() can be called. Returns an error status with a
- * description if the sanity check failed.
- */
- Status init();
-
- /**
- * Outputs a possible plan. Leaves in the plan are tagged with an index to use.
- * Returns true if a plan was outputted, false if no more plans will be outputted.
- *
- * 'tree' is set to point to the query tree. A QueryAssignment is built from this tree.
- * Caller owns the pointer. Note that 'tree' itself points into data owned by the
- * provided CanonicalQuery.
- *
- * Nodes in 'tree' are tagged with indices that should be used to answer the tagged nodes.
- * Only nodes that have a field name (isLogical() == false) will be tagged.
- */
- bool getNext(MatchExpression** tree);
-
- private:
-
- //
- // Memoization strategy
- //
-
-
- // Everything is really a size_t but it's far more readable to impose a type via typedef.
-
- // An ID we use to index into _memo. An entry in _memo is a NodeAssignment.
- typedef size_t MemoID;
-
- // An index in _indices.
- typedef size_t IndexID;
-
- // The position of a field in a possibly compound index.
- typedef size_t IndexPosition;
-
- struct PrepMemoContext {
- PrepMemoContext() : elemMatchExpr(NULL) { }
- MatchExpression* elemMatchExpr;
- };
-
- /**
- * Traverses the match expression and generates the memo structure from it.
- * Returns true if the provided node uses an index, false otherwise.
- */
- bool prepMemo(MatchExpression* node, PrepMemoContext context);
-
- /**
- * Traverses the memo structure and annotates the tree with IndexTags for the chosen
- * indices.
- */
- void tagMemo(MemoID id);
-
- /**
- * Move to the next enumeration state. Each assignment stores its own enumeration state.
- * See the various ____Assignment classes below for details on enumeration state.
- *
- * Returns true if the memo subtree with root 'node' has no further enumeration states. In
- * this case, that subtree restarts its enumeration at the beginning state. This implies
- * that the parent of node should move to the next state. If 'node' is the root of the
- * tree, we are done with enumeration.
- *
- * The return of this function can be thought of like a 'carry' in addition.
- *
- * Returns false if the memo subtree has moved to the next state.
- */
- bool nextMemo(MemoID id);
-
- /**
- * A short word on the memo structure.
- *
- * The PlanEnumerator is interested in matching predicates and indices. Predicates
- * are leaf nodes in the parse tree. {x:5}, {x: {$geoWithin:...}} are both predicates.
- *
- * When we have simple predicates, like {x:5}, the task is easy: any indices prefixed
- * with 'x' can be used to answer the predicate. This is where the PredicateAssignment
- * is used.
- *
- * With logical operators, things are more complicated. Let's start with OR, the simplest.
- * Since the output of an OR is the union of its results, each of its children must be
- * indexed for the entire OR to be indexed. If each subtree of an OR is indexable, the
- * OR is as well.
- *
- * For an AND to be indexed, only one of its children must be indexed. AND is an
- * intersection of its children, so each of its children describes a superset of the
- * produced results.
- */
-
- struct PredicateAssignment {
- PredicateAssignment() : indexToAssign(0) { }
-
- std::vector<IndexID> first;
- // Not owned here.
- MatchExpression* expr;
-
- // Enumeration state. An indexed predicate's possible states are the indices that the
- // predicate can directly use (the 'first' indices). As such this value ranges from 0
- // to first.size()-1 inclusive.
- size_t indexToAssign;
- };
-
- struct OrAssignment {
- OrAssignment() : counter(0) { }
-
- // Each child of an OR must be indexed for the OR to be indexed. When an OR moves to a
- // subsequent state it just asks all its children to move their states forward.
-
- // Must use all of subnodes.
- std::vector<MemoID> subnodes;
-
- // The number of OR states that we've enumerated so far.
- size_t counter;
- };
-
- // This is used by AndAssignment and is not an actual assignment.
- struct OneIndexAssignment {
- // 'preds[i]' is uses index 'index' at position 'positions[i]'
- std::vector<MatchExpression*> preds;
- std::vector<IndexPosition> positions;
- IndexID index;
- };
-
- struct AndEnumerableState {
- std::vector<OneIndexAssignment> assignments;
- std::vector<MemoID> subnodesToIndex;
- };
-
- struct AndAssignment {
- AndAssignment() : counter(0) { }
-
- std::vector<AndEnumerableState> choices;
-
- // We're on the counter-th member of state.
- size_t counter;
- };
-
- struct ArrayAssignment {
- ArrayAssignment() : counter(0) { }
- std::vector<MemoID> subnodes;
- size_t counter;
- };
-
- /**
- * Associates indices with predicates.
- */
- struct NodeAssignment {
- std::unique_ptr<PredicateAssignment> pred;
- std::unique_ptr<OrAssignment> orAssignment;
- std::unique_ptr<AndAssignment> andAssignment;
- std::unique_ptr<ArrayAssignment> arrayAssignment;
- std::string toString() const;
- };
-
- /**
- * Allocates a NodeAssignment and associates it with the provided 'expr'.
- *
- * The unique MemoID of the new assignment is outputted in '*id'.
- * The out parameter '*slot' points to the newly allocated NodeAssignment.
- */
- void allocateAssignment(MatchExpression* expr, NodeAssignment** slot, MemoID* id);
-
- /**
- * Predicates inside $elemMatch's that are semantically "$and of $and"
- * predicates are not rewritten to the top-level during normalization.
- * However, we would like to make predicates inside $elemMatch available
- * for combining index bounds with the top-level $and predicates.
- *
- * This function deeply traverses $and and $elemMatch expressions of
- * the tree rooted at 'node', adding all preds that can use an index
- * to the output vector 'indexOut'. At the same time, $elemMatch
- * context information is stashed in the tags so that we don't lose
- * information due to flattening.
- *
- * Nodes that cannot be deeply traversed are returned via the output
- * vectors 'subnodesOut' and 'mandatorySubnodes'. Subnodes are "mandatory"
- * if they *must* use an index (TEXT and GEO).
- *
- * Does not take ownership of arguments.
- *
- * Returns false if the AND cannot be indexed. Otherwise returns true.
- */
- bool partitionPreds(MatchExpression* node,
- PrepMemoContext context,
- std::vector<MatchExpression*>* indexOut,
- std::vector<MemoID>* subnodesOut,
- std::vector<MemoID>* mandatorySubnodes);
-
- /**
- * Finds a set of predicates that can be safely compounded with the set
- * of predicates in 'assigned', under the assumption that we are assigning
- * predicates to a compound, multikey index.
- *
- * The list of candidate predicates that we could compound is passed
- * in 'couldCompound'. A subset of these predicates that is safe to
- * combine by compounding is returned in the out-parameter 'out'.
- *
- * Does not take ownership of its arguments.
- *
- * The rules for when to compound for multikey indices are reasonably
- * complex, and are dependent on the structure of $elemMatch's used
- * in the query. Ignoring $elemMatch for the time being, the rule is this:
- *
- * "Any set of predicates for which no two predicates share a path
- * prefix can be compounded."
- *
- * Suppose we have predicates over paths 'a.b' and 'a.c'. These cannot
- * be compounded because they share the prefix 'a'. Similarly, the bounds
- * for 'a' and 'a.b' cannot be compounded (in the case of multikey index
- * {a: 1, 'a.b': 1}). You *can* compound predicates over the paths 'a.b.c',
- * 'd', and 'e.b.c', because there is no shared prefix.
- *
- * The rules are different in the presence of $elemMatch. For $elemMatch
- * {a: {$elemMatch: {<pred1>, ..., <predN>}}}, we are allowed to compound
- * bounds for pred1 through predN, even though these predicates share the
- * path prefix 'a'. However, we still cannot compound in the case of
- * {a: {$elemMatch: {'b.c': {$gt: 1}, 'b.d': 5}}} because 'b.c' and 'b.d'
- * share a prefix. In other words, what matters inside an $elemMatch is not
- * the absolute prefix, but rather the "relative prefix" after the shared
- * $elemMatch part of the path.
- *
- * A few more examples:
- * 1) {'a.b': {$elemMatch: {c: {$gt: 1}, d: 5}}}. In this case, we can
- * compound, because the $elemMatch is applied to the shared part of
- * the path 'a.b'.
- *
- * 2) {'a.b': 1, a: {$elemMatch: {b: {$gt: 0}}}}. We cannot combine the
- * bounds here because the prefix 'a' is shared by two predicates which
- * are not joined together by an $elemMatch.
- *
- * NOTE:
- * Usually 'assigned' has just one predicate. However, in order to support
- * mandatory predicate assignment (TEXT and GEO_NEAR), we allow multiple
- * already-assigned predicates to be passed. If a mandatory predicate is over
- * a trailing field in a multikey compound index, then we assign both a predicate
- * over the leading field as well as the mandatory predicate prior to calling
- * this function.
- *
- * Ex:
- * Say we have index {a: 1, b: 1, c: "2dsphere", d: 1} as well as a $near
- * predicate and a $within predicate over "c". The $near predicate is mandatory
- * and must be assigned. The $within predicate is not mandatory. Furthermore,
- * it cannot be assigned in addition to the $near predicate because the index
- * is multikey.
- *
- * In this case the enumerator must assign the $near predicate, and pass it in
- * in 'assigned'. Otherwise it would be possible to assign the $within predicate,
- * and then not assign the $near because the $within is already assigned (and
- * has the same path).
- */
- void getMultikeyCompoundablePreds(const std::vector<MatchExpression*>& assigned,
- const std::vector<MatchExpression*>& couldCompound,
- std::vector<MatchExpression*>* out);
-
- /**
- * 'andAssignment' contains assignments that we've already committed to outputting,
- * including both single index assignments and ixisect assignments.
- *
- * 'ixisectAssigned' is a set of predicates that we are about to add to 'andAssignment'
- * as an index intersection assignment.
- *
- * Returns true if an single index assignment which is already in 'andAssignment'
- * contains a superset of the predicates in 'ixisectAssigned'. This means that we
- * can assign the same preds to a compound index rather than using index intersection.
- *
- * Ex.
- * Suppose we have indices {a: 1}, {b: 1}, and {a: 1, b: 1} with query
- * {a: 2, b: 2}. When we try to intersect {a: 1} and {b: 1} the predicates
- * a==2 and b==2 will get assigned to respective indices. But then we will
- * call this function with ixisectAssigned equal to the set {'a==2', 'b==2'},
- * and notice that we have already assigned this same set of predicates to
- * the single index {a: 1, b: 1} via compounding.
- */
- bool alreadyCompounded(const std::set<MatchExpression*>& ixisectAssigned,
- const AndAssignment* andAssignment);
- /**
- * Output index intersection assignments inside of an AND node.
- */
- typedef unordered_map<IndexID, std::vector<MatchExpression*> > IndexToPredMap;
-
- /**
- * Generate index intersection assignments given the predicate/index structure in idxToFirst
- * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs the assignments in
- * 'andAssignment'.
- */
- void enumerateAndIntersect(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- const std::vector<MemoID>& subnodes,
- AndAssignment* andAssignment);
-
- /**
- * Generate one-index-at-once assignments given the predicate/index structure in idxToFirst
- * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs the assignments into
- * 'andAssignment'.
- */
- void enumerateOneIndex(const IndexToPredMap& idxToFirst,
+ void enumerateAndIntersect(const IndexToPredMap& idxToFirst,
const IndexToPredMap& idxToNotFirst,
const std::vector<MemoID>& subnodes,
AndAssignment* andAssignment);
- /**
- * Generate single-index assignments for queries which contain mandatory
- * predicates (TEXT and GEO_NEAR, which are required to use a compatible index).
- * Outputs these assignments into 'andAssignment'.
- *
- * Returns true if it generated at least one assignment, and false if no assignment
- * of 'mandatoryPred' is possible.
- */
- bool enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- MatchExpression* mandatoryPred,
- const std::set<IndexID>& mandatoryIndices,
- AndAssignment* andAssignment);
-
- /**
- * Try to assign predicates in 'tryCompound' to 'thisIndex' as compound assignments.
- * Output the assignments in 'assign'.
- */
- void compound(const std::vector<MatchExpression*>& tryCompound,
- const IndexEntry& thisIndex,
- OneIndexAssignment* assign);
-
- /**
- * Return the memo entry for 'node'. Does some sanity checking to ensure that a memo entry
- * actually exists.
- */
- MemoID memoIDForNode(MatchExpression* node);
-
- std::string dumpMemo();
-
- // Map from expression to its MemoID.
- unordered_map<MatchExpression*, MemoID> _nodeToId;
-
- // Map from MemoID to its precomputed solution info.
- unordered_map<MemoID, NodeAssignment*> _memo;
-
- // If true, there are no further enumeration states, and getNext should return false.
- // We could be _done immediately after init if we're unable to output an indexed plan.
- bool _done;
-
- //
- // Data used by all enumeration strategies
- //
-
- // Match expression we're planning for. Not owned by us.
- MatchExpression* _root;
-
- // Indices we're allowed to enumerate with. Not owned here.
- const std::vector<IndexEntry>* _indices;
-
- // Do we output >1 index per AND (index intersection)?
- bool _ixisect;
-
- // How many enumerations are we willing to produce from each OR?
- size_t _orLimit;
-
- // How many things do we want from each AND?
- size_t _intersectLimit;
- };
+ /**
+ * Generate one-index-at-once assignments given the predicate/index structure in idxToFirst
+ * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs the assignments into
+ * 'andAssignment'.
+ */
+ void enumerateOneIndex(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ const std::vector<MemoID>& subnodes,
+ AndAssignment* andAssignment);
+
+ /**
+ * Generate single-index assignments for queries which contain mandatory
+ * predicates (TEXT and GEO_NEAR, which are required to use a compatible index).
+ * Outputs these assignments into 'andAssignment'.
+ *
+ * Returns true if it generated at least one assignment, and false if no assignment
+ * of 'mandatoryPred' is possible.
+ */
+ bool enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ MatchExpression* mandatoryPred,
+ const std::set<IndexID>& mandatoryIndices,
+ AndAssignment* andAssignment);
+
+ /**
+ * Try to assign predicates in 'tryCompound' to 'thisIndex' as compound assignments.
+ * Output the assignments in 'assign'.
+ */
+ void compound(const std::vector<MatchExpression*>& tryCompound,
+ const IndexEntry& thisIndex,
+ OneIndexAssignment* assign);
+
+ /**
+ * Return the memo entry for 'node'. Does some sanity checking to ensure that a memo entry
+ * actually exists.
+ */
+ MemoID memoIDForNode(MatchExpression* node);
+
+ std::string dumpMemo();
+
+ // Map from expression to its MemoID.
+ unordered_map<MatchExpression*, MemoID> _nodeToId;
+
+ // Map from MemoID to its precomputed solution info.
+ unordered_map<MemoID, NodeAssignment*> _memo;
+
+ // If true, there are no further enumeration states, and getNext should return false.
+ // We could be _done immediately after init if we're unable to output an indexed plan.
+ bool _done;
+
+ //
+ // Data used by all enumeration strategies
+ //
+
+ // Match expression we're planning for. Not owned by us.
+ MatchExpression* _root;
+
+ // Indices we're allowed to enumerate with. Not owned here.
+ const std::vector<IndexEntry>* _indices;
+
+ // Do we output >1 index per AND (index intersection)?
+ bool _ixisect;
+
+ // How many enumerations are we willing to produce from each OR?
+ size_t _orLimit;
+
+ // How many things do we want from each AND?
+ size_t _intersectLimit;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 24b01bb704e..f234948fe50 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -48,513 +48,498 @@
namespace mongo {
- using std::shared_ptr;
- using std::string;
- using std::vector;
-
- namespace {
-
- /**
- * Retrieves the first stage of a given type from the plan tree, or NULL
- * if no such stage is found.
- */
- PlanStage* getStageByType(PlanStage* root, StageType type) {
- if (root->stageType() == type) {
- return root;
- }
+using std::shared_ptr;
+using std::string;
+using std::vector;
- vector<PlanStage*> children = root->getChildren();
- for (size_t i = 0; i < children.size(); i++) {
- PlanStage* result = getStageByType(children[i], type);
- if (result) {
- return result;
- }
- }
-
- return NULL;
- }
+namespace {
+/**
+ * Retrieves the first stage of a given type from the plan tree, or NULL
+ * if no such stage is found.
+ */
+PlanStage* getStageByType(PlanStage* root, StageType type) {
+ if (root->stageType() == type) {
+ return root;
}
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, collection, "", yieldPolicy, out);
+ vector<PlanStage*> children = root->getChildren();
+ for (size_t i = 0; i < children.size(); i++) {
+ PlanStage* result = getStageByType(children[i], type);
+ if (result) {
+ return result;
+ }
}
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, NULL, ns, yieldPolicy, out);
+ return NULL;
+}
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, collection, "", yieldPolicy, out);
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ const std::string& ns,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, NULL, ns, yieldPolicy, out);
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ return PlanExecutor::make(opCtx, ws, rt, NULL, cq, collection, "", yieldPolicy, out);
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ return PlanExecutor::make(opCtx, ws, rt, qs, cq, collection, "", yieldPolicy, out);
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ const std::string& ns,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ std::unique_ptr<PlanExecutor> exec(new PlanExecutor(opCtx, ws, rt, qs, cq, collection, ns));
+
+ // Perform plan selection, if necessary.
+ Status status = exec->pickBestPlan(yieldPolicy);
+ if (!status.isOK()) {
+ return status;
}
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, NULL, cq, collection, "", yieldPolicy, out);
+ *out = exec.release();
+ return Status::OK();
+}
+
+PlanExecutor::PlanExecutor(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ const std::string& ns)
+ : _opCtx(opCtx),
+ _collection(collection),
+ _cq(cq),
+ _workingSet(ws),
+ _qs(qs),
+ _root(rt),
+ _ns(ns),
+ _yieldPolicy(new PlanYieldPolicy(this, YIELD_MANUAL)) {
+ // We may still need to initialize _ns from either _collection or _cq.
+ if (!_ns.empty()) {
+ // We already have an _ns set, so there's nothing more to do.
+ return;
}
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, qs, cq, collection, "", yieldPolicy, out);
+ if (NULL != _collection) {
+ _ns = _collection->ns().ns();
+ } else {
+ invariant(NULL != _cq.get());
+ _ns = _cq->getParsed().ns();
}
-
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- std::unique_ptr<PlanExecutor> exec(new PlanExecutor(opCtx, ws, rt, qs, cq, collection, ns));
-
- // Perform plan selection, if necessary.
- Status status = exec->pickBestPlan(yieldPolicy);
- if (!status.isOK()) {
- return status;
- }
-
- *out = exec.release();
- return Status::OK();
+}
+
+Status PlanExecutor::pickBestPlan(YieldPolicy policy) {
+ // For YIELD_AUTO, this will both set an auto yield policy on the PlanExecutor and
+ // register it to receive notifications.
+ this->setYieldPolicy(policy);
+
+ // First check if we need to do subplanning.
+ PlanStage* foundStage = getStageByType(_root.get(), STAGE_SUBPLAN);
+ if (foundStage) {
+ SubplanStage* subplan = static_cast<SubplanStage*>(foundStage);
+ return subplan->pickBestPlan(_yieldPolicy.get());
}
- PlanExecutor::PlanExecutor(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns)
- : _opCtx(opCtx),
- _collection(collection),
- _cq(cq),
- _workingSet(ws),
- _qs(qs),
- _root(rt),
- _ns(ns),
- _yieldPolicy(new PlanYieldPolicy(this, YIELD_MANUAL)) {
- // We may still need to initialize _ns from either _collection or _cq.
- if (!_ns.empty()) {
- // We already have an _ns set, so there's nothing more to do.
- return;
- }
+ // If we didn't have to do subplanning, we might still have to do regular
+ // multi plan selection...
+ foundStage = getStageByType(_root.get(), STAGE_MULTI_PLAN);
+ if (foundStage) {
+ MultiPlanStage* mps = static_cast<MultiPlanStage*>(foundStage);
+ return mps->pickBestPlan(_yieldPolicy.get());
+ }
- if (NULL != _collection) {
- _ns = _collection->ns().ns();
- }
- else {
- invariant(NULL != _cq.get());
- _ns = _cq->getParsed().ns();
- }
+ // ...or, we might have to run a plan from the cache for a trial period, falling back on
+ // regular planning if the cached plan performs poorly.
+ foundStage = getStageByType(_root.get(), STAGE_CACHED_PLAN);
+ if (foundStage) {
+ CachedPlanStage* cachedPlan = static_cast<CachedPlanStage*>(foundStage);
+ return cachedPlan->pickBestPlan(_yieldPolicy.get());
}
- Status PlanExecutor::pickBestPlan(YieldPolicy policy) {
- // For YIELD_AUTO, this will both set an auto yield policy on the PlanExecutor and
- // register it to receive notifications.
- this->setYieldPolicy(policy);
+ // Either we chose a plan, or no plan selection was required. In both cases,
+ // our work has been successfully completed.
+ return Status::OK();
+}
+
+PlanExecutor::~PlanExecutor() {}
+
+// static
+std::string PlanExecutor::statestr(ExecState s) {
+ if (PlanExecutor::ADVANCED == s) {
+ return "ADVANCED";
+ } else if (PlanExecutor::IS_EOF == s) {
+ return "IS_EOF";
+ } else if (PlanExecutor::DEAD == s) {
+ return "DEAD";
+ } else {
+ verify(PlanExecutor::FAILURE == s);
+ return "FAILURE";
+ }
+}
- // First check if we need to do subplanning.
- PlanStage* foundStage = getStageByType(_root.get(), STAGE_SUBPLAN);
- if (foundStage) {
- SubplanStage* subplan = static_cast<SubplanStage*>(foundStage);
- return subplan->pickBestPlan(_yieldPolicy.get());
- }
+WorkingSet* PlanExecutor::getWorkingSet() const {
+ return _workingSet.get();
+}
- // If we didn't have to do subplanning, we might still have to do regular
- // multi plan selection...
- foundStage = getStageByType(_root.get(), STAGE_MULTI_PLAN);
- if (foundStage) {
- MultiPlanStage* mps = static_cast<MultiPlanStage*>(foundStage);
- return mps->pickBestPlan(_yieldPolicy.get());
- }
+PlanStage* PlanExecutor::getRootStage() const {
+ return _root.get();
+}
- // ...or, we might have to run a plan from the cache for a trial period, falling back on
- // regular planning if the cached plan performs poorly.
- foundStage = getStageByType(_root.get(), STAGE_CACHED_PLAN);
- if (foundStage) {
- CachedPlanStage* cachedPlan = static_cast<CachedPlanStage*>(foundStage);
- return cachedPlan->pickBestPlan(_yieldPolicy.get());
- }
+CanonicalQuery* PlanExecutor::getCanonicalQuery() const {
+ return _cq.get();
+}
- // Either we chose a plan, or no plan selection was required. In both cases,
- // our work has been successfully completed.
- return Status::OK();
- }
+PlanStageStats* PlanExecutor::getStats() const {
+ return _root->getStats();
+}
- PlanExecutor::~PlanExecutor() { }
+const Collection* PlanExecutor::collection() const {
+ return _collection;
+}
- // static
- std::string PlanExecutor::statestr(ExecState s) {
- if (PlanExecutor::ADVANCED == s) {
- return "ADVANCED";
- }
- else if (PlanExecutor::IS_EOF == s) {
- return "IS_EOF";
- }
- else if (PlanExecutor::DEAD == s) {
- return "DEAD";
- }
- else {
- verify(PlanExecutor::FAILURE == s);
- return "FAILURE";
- }
- }
+OperationContext* PlanExecutor::getOpCtx() const {
+ return _opCtx;
+}
- WorkingSet* PlanExecutor::getWorkingSet() const {
- return _workingSet.get();
+void PlanExecutor::saveState() {
+ if (!killed()) {
+ _root->saveState();
}
- PlanStage* PlanExecutor::getRootStage() const {
- return _root.get();
+ // Doc-locking storage engines drop their transactional context after saving state.
+ // The query stages inside this stage tree might buffer record ids (e.g. text, geoNear,
+ // mergeSort, sort) which are no longer protected by the storage engine's transactional
+ // boundaries. Force-fetch the documents for any such record ids so that we have our
+ // own copy in the working set.
+ if (supportsDocLocking()) {
+ WorkingSetCommon::prepareForSnapshotChange(_workingSet.get());
}
- CanonicalQuery* PlanExecutor::getCanonicalQuery() const {
- return _cq.get();
- }
+ _opCtx = NULL;
+}
- PlanStageStats* PlanExecutor::getStats() const {
- return _root->getStats();
- }
+bool PlanExecutor::restoreState(OperationContext* opCtx) {
+ try {
+ return restoreStateWithoutRetrying(opCtx);
+ } catch (const WriteConflictException& wce) {
+ if (!_yieldPolicy->allowedToYield())
+ throw;
- const Collection* PlanExecutor::collection() const {
- return _collection;
+ // Handles retries by calling restoreStateWithoutRetrying() in a loop.
+ return _yieldPolicy->yield(NULL);
}
+}
- OperationContext* PlanExecutor::getOpCtx() const {
- return _opCtx;
- }
+bool PlanExecutor::restoreStateWithoutRetrying(OperationContext* opCtx) {
+ invariant(NULL == _opCtx);
+ invariant(opCtx);
- void PlanExecutor::saveState() {
- if (!killed()) {
- _root->saveState();
- }
+ _opCtx = opCtx;
- // Doc-locking storage engines drop their transactional context after saving state.
- // The query stages inside this stage tree might buffer record ids (e.g. text, geoNear,
- // mergeSort, sort) which are no longer protected by the storage engine's transactional
- // boundaries. Force-fetch the documents for any such record ids so that we have our
- // own copy in the working set.
- if (supportsDocLocking()) {
- WorkingSetCommon::prepareForSnapshotChange(_workingSet.get());
- }
+ // We're restoring after a yield or getMore now. If we're a yielding plan executor, reset
+ // the yield timer in order to prevent from yielding again right away.
+ _yieldPolicy->resetTimer();
- _opCtx = NULL;
+ if (!killed()) {
+ _root->restoreState(opCtx);
}
- bool PlanExecutor::restoreState(OperationContext* opCtx) {
- try {
- return restoreStateWithoutRetrying(opCtx);
- }
- catch (const WriteConflictException& wce) {
- if (!_yieldPolicy->allowedToYield())
- throw;
+ return !killed();
+}
- // Handles retries by calling restoreStateWithoutRetrying() in a loop.
- return _yieldPolicy->yield(NULL);
- }
+void PlanExecutor::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ if (!killed()) {
+ _root->invalidate(txn, dl, type);
}
+}
- bool PlanExecutor::restoreStateWithoutRetrying(OperationContext* opCtx) {
- invariant(NULL == _opCtx);
- invariant(opCtx);
+PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, RecordId* dlOut) {
+ Snapshotted<BSONObj> snapshotted;
+ ExecState state = getNextSnapshotted(objOut ? &snapshotted : NULL, dlOut);
- _opCtx = opCtx;
-
- // We're restoring after a yield or getMore now. If we're a yielding plan executor, reset
- // the yield timer in order to prevent from yielding again right away.
- _yieldPolicy->resetTimer();
-
- if (!killed()) {
- _root->restoreState(opCtx);
- }
-
- return !killed();
- }
-
- void PlanExecutor::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- if (!killed()) { _root->invalidate(txn, dl, type); }
+ if (objOut) {
+ *objOut = snapshotted.value();
}
- PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, RecordId* dlOut) {
- Snapshotted<BSONObj> snapshotted;
- ExecState state = getNextSnapshotted(objOut ? &snapshotted : NULL, dlOut);
-
- if (objOut) {
- *objOut = snapshotted.value();
+ return state;
+}
+
+PlanExecutor::ExecState PlanExecutor::getNextSnapshotted(Snapshotted<BSONObj>* objOut,
+ RecordId* dlOut) {
+ if (killed()) {
+ if (NULL != objOut) {
+ Status status(ErrorCodes::OperationFailed,
+ str::stream() << "Operation aborted because: " << *_killReason);
+ *objOut = Snapshotted<BSONObj>(SnapshotId(),
+ WorkingSetCommon::buildMemberStatusObject(status));
}
-
- return state;
+ return PlanExecutor::DEAD;
}
- PlanExecutor::ExecState PlanExecutor::getNextSnapshotted(Snapshotted<BSONObj>* objOut,
- RecordId* dlOut) {
- if (killed()) {
- if (NULL != objOut) {
- Status status(ErrorCodes::OperationFailed,
- str::stream() << "Operation aborted because: " << *_killReason);
- *objOut = Snapshotted<BSONObj>(SnapshotId(),
- WorkingSetCommon::buildMemberStatusObject(status));
- }
- return PlanExecutor::DEAD;
- }
-
- if (!_stash.empty()) {
- invariant(objOut && !dlOut);
- *objOut = {SnapshotId(), _stash.front()};
- _stash.pop();
- return PlanExecutor::ADVANCED;
- }
+ if (!_stash.empty()) {
+ invariant(objOut && !dlOut);
+ *objOut = {SnapshotId(), _stash.front()};
+ _stash.pop();
+ return PlanExecutor::ADVANCED;
+ }
- // When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
- // to use to pull the record into memory. We take ownership of the RecordFetcher here,
- // deleting it after we've had a chance to do the fetch. For timing-based yields, we
- // just pass a NULL fetcher.
- std::unique_ptr<RecordFetcher> fetcher;
-
- // Incremented on every writeConflict, reset to 0 on any successful call to _root->work.
- size_t writeConflictsInARow = 0;
-
- for (;;) {
- // These are the conditions which can cause us to yield:
- // 1) The yield policy's timer elapsed, or
- // 2) some stage requested a yield due to a document fetch, or
- // 3) we need to yield and retry due to a WriteConflictException.
- // In all cases, the actual yielding happens here.
- if (_yieldPolicy->shouldYield()) {
- _yieldPolicy->yield(fetcher.get());
-
- if (killed()) {
- if (NULL != objOut) {
- Status status(ErrorCodes::OperationFailed,
- str::stream() << "Operation aborted because: "
- << *_killReason);
- *objOut = Snapshotted<BSONObj>(
- SnapshotId(),
- WorkingSetCommon::buildMemberStatusObject(status));
- }
- return PlanExecutor::DEAD;
+ // When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
+ // to use to pull the record into memory. We take ownership of the RecordFetcher here,
+ // deleting it after we've had a chance to do the fetch. For timing-based yields, we
+ // just pass a NULL fetcher.
+ std::unique_ptr<RecordFetcher> fetcher;
+
+ // Incremented on every writeConflict, reset to 0 on any successful call to _root->work.
+ size_t writeConflictsInARow = 0;
+
+ for (;;) {
+ // These are the conditions which can cause us to yield:
+ // 1) The yield policy's timer elapsed, or
+ // 2) some stage requested a yield due to a document fetch, or
+ // 3) we need to yield and retry due to a WriteConflictException.
+ // In all cases, the actual yielding happens here.
+ if (_yieldPolicy->shouldYield()) {
+ _yieldPolicy->yield(fetcher.get());
+
+ if (killed()) {
+ if (NULL != objOut) {
+ Status status(ErrorCodes::OperationFailed,
+ str::stream() << "Operation aborted because: " << *_killReason);
+ *objOut = Snapshotted<BSONObj>(
+ SnapshotId(), WorkingSetCommon::buildMemberStatusObject(status));
}
+ return PlanExecutor::DEAD;
}
+ }
- // We're done using the fetcher, so it should be freed. We don't want to
- // use the same RecordFetcher twice.
- fetcher.reset();
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState code = _root->work(&id);
+ // We're done using the fetcher, so it should be freed. We don't want to
+ // use the same RecordFetcher twice.
+ fetcher.reset();
- if (code != PlanStage::NEED_YIELD)
- writeConflictsInARow = 0;
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState code = _root->work(&id);
- if (PlanStage::ADVANCED == code) {
- // Fast count.
- if (WorkingSet::INVALID_ID == id) {
- invariant(NULL == objOut);
- invariant(NULL == dlOut);
- return PlanExecutor::ADVANCED;
- }
+ if (code != PlanStage::NEED_YIELD)
+ writeConflictsInARow = 0;
- WorkingSetMember* member = _workingSet->get(id);
- bool hasRequestedData = true;
+ if (PlanStage::ADVANCED == code) {
+ // Fast count.
+ if (WorkingSet::INVALID_ID == id) {
+ invariant(NULL == objOut);
+ invariant(NULL == dlOut);
+ return PlanExecutor::ADVANCED;
+ }
- if (NULL != objOut) {
- if (WorkingSetMember::LOC_AND_IDX == member->state) {
- if (1 != member->keyData.size()) {
- _workingSet->free(id);
- hasRequestedData = false;
- }
- else {
- // TODO: currently snapshot ids are only associated with documents, and
- // not with index keys.
- *objOut = Snapshotted<BSONObj>(SnapshotId(),
- member->keyData[0].keyData);
- }
- }
- else if (member->hasObj()) {
- *objOut = member->obj;
- }
- else {
- _workingSet->free(id);
- hasRequestedData = false;
- }
- }
+ WorkingSetMember* member = _workingSet->get(id);
+ bool hasRequestedData = true;
- if (NULL != dlOut) {
- if (member->hasLoc()) {
- *dlOut = member->loc;
- }
- else {
+ if (NULL != objOut) {
+ if (WorkingSetMember::LOC_AND_IDX == member->state) {
+ if (1 != member->keyData.size()) {
_workingSet->free(id);
hasRequestedData = false;
+ } else {
+ // TODO: currently snapshot ids are only associated with documents, and
+ // not with index keys.
+ *objOut = Snapshotted<BSONObj>(SnapshotId(), member->keyData[0].keyData);
}
- }
-
- if (hasRequestedData) {
+ } else if (member->hasObj()) {
+ *objOut = member->obj;
+ } else {
_workingSet->free(id);
- return PlanExecutor::ADVANCED;
+ hasRequestedData = false;
}
- // This result didn't have the data the caller wanted, try again.
}
- else if (PlanStage::NEED_YIELD == code) {
- if (id == WorkingSet::INVALID_ID) {
- if (!_yieldPolicy->allowedToYield()) throw WriteConflictException();
- CurOp::get(_opCtx)->debug().writeConflicts++;
- writeConflictsInARow++;
- WriteConflictException::logAndBackoff(writeConflictsInARow,
- "plan execution",
- _collection->ns().ns());
+ if (NULL != dlOut) {
+ if (member->hasLoc()) {
+ *dlOut = member->loc;
+ } else {
+ _workingSet->free(id);
+ hasRequestedData = false;
}
- else {
- WorkingSetMember* member = _workingSet->get(id);
- invariant(member->hasFetcher());
- // Transfer ownership of the fetcher. Next time around the loop a yield will
- // happen.
- fetcher.reset(member->releaseFetcher());
- }
-
- // If we're allowed to, we will yield next time through the loop.
- if (_yieldPolicy->allowedToYield()) _yieldPolicy->forceYield();
}
- else if (PlanStage::NEED_TIME == code) {
- // Fall through to yield check at end of large conditional.
+
+ if (hasRequestedData) {
+ _workingSet->free(id);
+ return PlanExecutor::ADVANCED;
}
- else if (PlanStage::IS_EOF == code) {
- return PlanExecutor::IS_EOF;
+ // This result didn't have the data the caller wanted, try again.
+ } else if (PlanStage::NEED_YIELD == code) {
+ if (id == WorkingSet::INVALID_ID) {
+ if (!_yieldPolicy->allowedToYield())
+ throw WriteConflictException();
+ CurOp::get(_opCtx)->debug().writeConflicts++;
+ writeConflictsInARow++;
+ WriteConflictException::logAndBackoff(
+ writeConflictsInARow, "plan execution", _collection->ns().ns());
+
+ } else {
+ WorkingSetMember* member = _workingSet->get(id);
+ invariant(member->hasFetcher());
+ // Transfer ownership of the fetcher. Next time around the loop a yield will
+ // happen.
+ fetcher.reset(member->releaseFetcher());
}
- else {
- invariant(PlanStage::DEAD == code || PlanStage::FAILURE == code);
- if (NULL != objOut) {
- BSONObj statusObj;
- WorkingSetCommon::getStatusMemberObject(*_workingSet, id, &statusObj);
- *objOut = Snapshotted<BSONObj>(SnapshotId(), statusObj);
- }
+ // If we're allowed to, we will yield next time through the loop.
+ if (_yieldPolicy->allowedToYield())
+ _yieldPolicy->forceYield();
+ } else if (PlanStage::NEED_TIME == code) {
+ // Fall through to yield check at end of large conditional.
+ } else if (PlanStage::IS_EOF == code) {
+ return PlanExecutor::IS_EOF;
+ } else {
+ invariant(PlanStage::DEAD == code || PlanStage::FAILURE == code);
- return (PlanStage::DEAD == code) ? PlanExecutor::DEAD : PlanExecutor::FAILURE;
+ if (NULL != objOut) {
+ BSONObj statusObj;
+ WorkingSetCommon::getStatusMemberObject(*_workingSet, id, &statusObj);
+ *objOut = Snapshotted<BSONObj>(SnapshotId(), statusObj);
}
- }
- }
-
- bool PlanExecutor::isEOF() {
- return killed() || (_stash.empty() && _root->isEOF());
- }
- void PlanExecutor::registerExec() {
- _safety.reset(new ScopedExecutorRegistration(this));
- }
-
- void PlanExecutor::deregisterExec() {
- _safety.reset();
+ return (PlanStage::DEAD == code) ? PlanExecutor::DEAD : PlanExecutor::FAILURE;
+ }
}
-
- void PlanExecutor::kill(std::string reason) {
- _killReason = std::move(reason);
- _collection = NULL;
-
- // XXX: PlanExecutor is designed to wrap a single execution tree. In the case of
- // aggregation queries, PlanExecutor wraps a proxy stage responsible for pulling results
- // from an aggregation pipeline. The aggregation pipeline pulls results from yet another
- // PlanExecutor. Such nested PlanExecutors require us to manually propagate kill() to
- // the "inner" executor. This is bad, and hopefully can be fixed down the line with the
- // unification of agg and query.
- //
- // The CachedPlanStage is another special case. It needs to update the plan cache from
- // its destructor. It needs to know whether it has been killed so that it can avoid
- // touching a potentially invalid plan cache in this case.
- //
- // TODO: get rid of this code block.
- {
- PlanStage* foundStage = getStageByType(_root.get(), STAGE_PIPELINE_PROXY);
- if (foundStage) {
- PipelineProxyStage* proxyStage = static_cast<PipelineProxyStage*>(foundStage);
- shared_ptr<PlanExecutor> childExec = proxyStage->getChildExecutor();
- if (childExec) {
- childExec->kill(*_killReason);
- }
+}
+
+bool PlanExecutor::isEOF() {
+ return killed() || (_stash.empty() && _root->isEOF());
+}
+
+void PlanExecutor::registerExec() {
+ _safety.reset(new ScopedExecutorRegistration(this));
+}
+
+void PlanExecutor::deregisterExec() {
+ _safety.reset();
+}
+
+void PlanExecutor::kill(std::string reason) {
+ _killReason = std::move(reason);
+ _collection = NULL;
+
+ // XXX: PlanExecutor is designed to wrap a single execution tree. In the case of
+ // aggregation queries, PlanExecutor wraps a proxy stage responsible for pulling results
+ // from an aggregation pipeline. The aggregation pipeline pulls results from yet another
+ // PlanExecutor. Such nested PlanExecutors require us to manually propagate kill() to
+ // the "inner" executor. This is bad, and hopefully can be fixed down the line with the
+ // unification of agg and query.
+ //
+ // The CachedPlanStage is another special case. It needs to update the plan cache from
+ // its destructor. It needs to know whether it has been killed so that it can avoid
+ // touching a potentially invalid plan cache in this case.
+ //
+ // TODO: get rid of this code block.
+ {
+ PlanStage* foundStage = getStageByType(_root.get(), STAGE_PIPELINE_PROXY);
+ if (foundStage) {
+ PipelineProxyStage* proxyStage = static_cast<PipelineProxyStage*>(foundStage);
+ shared_ptr<PlanExecutor> childExec = proxyStage->getChildExecutor();
+ if (childExec) {
+ childExec->kill(*_killReason);
}
}
}
+}
- Status PlanExecutor::executePlan() {
- BSONObj obj;
- PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
- while (PlanExecutor::ADVANCED == state) {
- state = this->getNext(&obj, NULL);
- }
-
- if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return Status(ErrorCodes::OperationFailed,
- str::stream() << "Exec error: " << WorkingSetCommon::toStatusString(obj)
- << ", state: " << PlanExecutor::statestr(state));
- }
-
- invariant(PlanExecutor::IS_EOF == state);
- return Status::OK();
+Status PlanExecutor::executePlan() {
+ BSONObj obj;
+ PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
+ while (PlanExecutor::ADVANCED == state) {
+ state = this->getNext(&obj, NULL);
}
- const string& PlanExecutor::ns() {
- return _ns;
+ if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
+ return Status(ErrorCodes::OperationFailed,
+ str::stream() << "Exec error: " << WorkingSetCommon::toStatusString(obj)
+ << ", state: " << PlanExecutor::statestr(state));
}
- void PlanExecutor::setYieldPolicy(YieldPolicy policy, bool registerExecutor) {
- _yieldPolicy->setPolicy(policy);
- if (PlanExecutor::YIELD_AUTO == policy) {
- // Runners that yield automatically generally need to be registered so that
- // after yielding, they receive notifications of events like deletions and
- // index drops. The only exception is that a few PlanExecutors get registered
- // by ClientCursor instead of being registered here. This is unneeded if we only do
- // partial "yields" for WriteConflict retrying.
- if (registerExecutor) {
- this->registerExec();
- }
+ invariant(PlanExecutor::IS_EOF == state);
+ return Status::OK();
+}
+
+const string& PlanExecutor::ns() {
+ return _ns;
+}
+
+void PlanExecutor::setYieldPolicy(YieldPolicy policy, bool registerExecutor) {
+ _yieldPolicy->setPolicy(policy);
+ if (PlanExecutor::YIELD_AUTO == policy) {
+ // Runners that yield automatically generally need to be registered so that
+ // after yielding, they receive notifications of events like deletions and
+ // index drops. The only exception is that a few PlanExecutors get registered
+ // by ClientCursor instead of being registered here. This is unneeded if we only do
+ // partial "yields" for WriteConflict retrying.
+ if (registerExecutor) {
+ this->registerExec();
}
}
-
- void PlanExecutor::enqueue(const BSONObj& obj) {
- _stash.push(obj.getOwned());
- }
-
- //
- // ScopedExecutorRegistration
- //
-
- PlanExecutor::ScopedExecutorRegistration::ScopedExecutorRegistration(PlanExecutor* exec)
- : _exec(exec) {
- // Collection can be null for an EOFStage plan, or other places where registration
- // is not needed.
- if (_exec->collection()) {
- _exec->collection()->getCursorManager()->registerExecutor(exec);
- }
+}
+
+void PlanExecutor::enqueue(const BSONObj& obj) {
+ _stash.push(obj.getOwned());
+}
+
+//
+// ScopedExecutorRegistration
+//
+
+PlanExecutor::ScopedExecutorRegistration::ScopedExecutorRegistration(PlanExecutor* exec)
+ : _exec(exec) {
+ // Collection can be null for an EOFStage plan, or other places where registration
+ // is not needed.
+ if (_exec->collection()) {
+ _exec->collection()->getCursorManager()->registerExecutor(exec);
}
+}
- PlanExecutor::ScopedExecutorRegistration::~ScopedExecutorRegistration() {
- if (_exec->collection()) {
- _exec->collection()->getCursorManager()->deregisterExecutor(_exec);
- }
+PlanExecutor::ScopedExecutorRegistration::~ScopedExecutorRegistration() {
+ if (_exec->collection()) {
+ _exec->collection()->getCursorManager()->deregisterExecutor(_exec);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index c3e6fdc94c7..34611eba38f 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -38,413 +38,414 @@
namespace mongo {
- class BSONObj;
- class Collection;
- class RecordId;
- class PlanStage;
- class PlanExecutor;
- struct PlanStageStats;
- class PlanYieldPolicy;
- class WorkingSet;
+class BSONObj;
+class Collection;
+class RecordId;
+class PlanStage;
+class PlanExecutor;
+struct PlanStageStats;
+class PlanYieldPolicy;
+class WorkingSet;
+
+/**
+ * A PlanExecutor is the abstraction that knows how to crank a tree of stages into execution.
+ * The executor is usually part of a larger abstraction that is interacting with the cache
+ * and/or the query optimizer.
+ *
+ * Executes a plan. Calls work() on a plan until a result is produced. Stops when the plan is
+ * EOF or if the plan errors.
+ */
+class PlanExecutor {
+public:
+ enum ExecState {
+ // We successfully populated the out parameter.
+ ADVANCED,
+
+ // We're EOF. We won't return any more results (edge case exception: capped+tailable).
+ IS_EOF,
+
+ // We were killed. This is a special failure case in which we cannot rely on the
+ // collection or database to still be valid.
+ // If the underlying PlanStage has any information on the error, it will be available in
+ // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
+ // details from the output BSON object.
+ DEAD,
+
+ // getNext was asked for data it cannot provide, or the underlying PlanStage had an
+ // unrecoverable error.
+ // If the underlying PlanStage has any information on the error, it will be available in
+ // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
+ // details from the output BSON object.
+ FAILURE,
+ };
/**
- * A PlanExecutor is the abstraction that knows how to crank a tree of stages into execution.
- * The executor is usually part of a larger abstraction that is interacting with the cache
- * and/or the query optimizer.
- *
- * Executes a plan. Calls work() on a plan until a result is produced. Stops when the plan is
- * EOF or if the plan errors.
+ * The yielding policy of the plan executor. By default, an executor does not yield itself
+ * (YIELD_MANUAL).
*/
- class PlanExecutor {
- public:
-
- enum ExecState {
- // We successfully populated the out parameter.
- ADVANCED,
-
- // We're EOF. We won't return any more results (edge case exception: capped+tailable).
- IS_EOF,
-
- // We were killed. This is a special failure case in which we cannot rely on the
- // collection or database to still be valid.
- // If the underlying PlanStage has any information on the error, it will be available in
- // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
- // details from the output BSON object.
- DEAD,
-
- // getNext was asked for data it cannot provide, or the underlying PlanStage had an
- // unrecoverable error.
- // If the underlying PlanStage has any information on the error, it will be available in
- // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
- // details from the output BSON object.
- FAILURE,
- };
-
- /**
- * The yielding policy of the plan executor. By default, an executor does not yield itself
- * (YIELD_MANUAL).
- */
- enum YieldPolicy {
- // Any call to getNext() may yield. In particular, the executor may be killed during any
- // call to getNext(). If this occurs, getNext() will return DEAD. Additionally, this
- // will handle all WriteConflictExceptions that occur while processing the query.
- YIELD_AUTO,
-
- // This will handle WriteConflictExceptions that occur while processing the query, but
- // will not yield locks. abandonSnapshot() will be called if a WriteConflictException
- // occurs so callers must be prepared to get a new snapshot.
- WRITE_CONFLICT_RETRY_ONLY,
-
- // Owner must yield manually if yields are requested. How to yield yourself:
- //
- // 0. Let's say you have PlanExecutor* exec.
- //
- // 1. Register your PlanExecutor with ClientCursor. Registered executors are informed
- // about RecordId deletions and namespace invalidation, as well as other important
- // events. Do this by calling registerExec() on the executor. Alternatively, this can
- // be done per-yield (as described below).
- //
- // 2. Construct a PlanYieldPolicy 'policy', passing 'exec' to the constructor.
- //
- // 3. Call PlanYieldPolicy::yield() on 'policy'. If your PlanExecutor is not yet
- // registered (because you want to register on a per-yield basis), then pass
- // 'true' to yield().
- //
- // 4. The call to yield() returns a boolean indicating whether or not 'exec' is
- // still alove. If it is false, then 'exec' was killed during the yield and is
- // no longer valid.
- //
- // It is not possible to handle WriteConflictExceptions in this mode without restarting
- // the query.
- YIELD_MANUAL,
- };
-
+ enum YieldPolicy {
+ // Any call to getNext() may yield. In particular, the executor may be killed during any
+ // call to getNext(). If this occurs, getNext() will return DEAD. Additionally, this
+ // will handle all WriteConflictExceptions that occur while processing the query.
+ YIELD_AUTO,
+
+ // This will handle WriteConflictExceptions that occur while processing the query, but
+ // will not yield locks. abandonSnapshot() will be called if a WriteConflictException
+ // occurs so callers must be prepared to get a new snapshot.
+ WRITE_CONFLICT_RETRY_ONLY,
+
+ // Owner must yield manually if yields are requested. How to yield yourself:
//
- // Factory methods.
+ // 0. Let's say you have PlanExecutor* exec.
//
- // On success, return a new PlanExecutor, owned by the caller, through 'out'.
+ // 1. Register your PlanExecutor with ClientCursor. Registered executors are informed
+ // about RecordId deletions and namespace invalidation, as well as other important
+ // events. Do this by calling registerExec() on the executor. Alternatively, this can
+ // be done per-yield (as described below).
//
- // Passing YIELD_AUTO to any of these factories will construct a yielding executor which
- // may yield in the following circumstances:
- // 1) During plan selection inside the call to make().
- // 2) On any call to getNext().
- // 3) While executing the plan inside executePlan().
+ // 2. Construct a PlanYieldPolicy 'policy', passing 'exec' to the constructor.
//
- // The executor will also be automatically registered to receive notifications in the
- // case of YIELD_AUTO, so no further calls to registerExec() or setYieldPolicy() are
- // necessary.
+ // 3. Call PlanYieldPolicy::yield() on 'policy'. If your PlanExecutor is not yet
+ // registered (because you want to register on a per-yield basis), then pass
+ // 'true' to yield().
//
+ // 4. The call to yield() returns a boolean indicating whether or not 'exec' is
+ // still alove. If it is false, then 'exec' was killed during the yield and is
+ // no longer valid.
+ //
+ // It is not possible to handle WriteConflictExceptions in this mode without restarting
+ // the query.
+ YIELD_MANUAL,
+ };
- /**
- * Used when there is no canonical query and no query solution.
- *
- * Right now this is only for idhack updates which neither canonicalize
- * nor go through normal planning.
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /**
- * Used when we have a NULL collection and no canonical query. In this case,
- * we need to explicitly pass a namespace to the plan executor.
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /**
- * Used when there is a canonical query but no query solution (e.g. idhack
- * queries, queries against a NULL collection, queries using the subplan stage).
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /**
- * The constructor for the normal case, when you have both a canonical query
- * and a query solution.
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- ~PlanExecutor();
+ //
+ // Factory methods.
+ //
+ // On success, return a new PlanExecutor, owned by the caller, through 'out'.
+ //
+ // Passing YIELD_AUTO to any of these factories will construct a yielding executor which
+ // may yield in the following circumstances:
+ // 1) During plan selection inside the call to make().
+ // 2) On any call to getNext().
+ // 3) While executing the plan inside executePlan().
+ //
+ // The executor will also be automatically registered to receive notifications in the
+ // case of YIELD_AUTO, so no further calls to registerExec() or setYieldPolicy() are
+ // necessary.
+ //
- //
- // Accessors
- //
+ /**
+ * Used when there is no canonical query and no query solution.
+ *
+ * Right now this is only for idhack updates which neither canonicalize
+ * nor go through normal planning.
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
- /**
- * Get the working set used by this executor, without transferring ownership.
- */
- WorkingSet* getWorkingSet() const;
-
- /**
- * Get the stage tree wrapped by this executor, without transferring ownership.
- */
- PlanStage* getRootStage() const;
-
- /**
- * Get the query that this executor is executing, without transferring ownership.
- */
- CanonicalQuery* getCanonicalQuery() const;
-
- /**
- * The collection in which this executor is working.
- */
- const Collection* collection() const;
-
- /**
- * Return the NS that the query is running over.
- */
- const std::string& ns();
-
- /**
- * Return the OperationContext that the plan is currently executing within.
- */
- OperationContext* getOpCtx() const;
-
- /**
- * Generates a tree of stats objects with a separate lifetime from the execution
- * stage tree wrapped by this PlanExecutor. The caller owns the returned pointer.
- *
- * This is OK even if we were killed.
- */
- PlanStageStats* getStats() const;
+ /**
+ * Used when we have a NULL collection and no canonical query. In this case,
+ * we need to explicitly pass a namespace to the plan executor.
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ const std::string& ns,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
- //
- // Methods that just pass down to the PlanStage tree.
- //
+ /**
+ * Used when there is a canonical query but no query solution (e.g. idhack
+ * queries, queries against a NULL collection, queries using the subplan stage).
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
- /**
- * Save any state required to either
- * 1. hibernate waiting for a getMore, or
- * 2. yield the lock (on applicable storage engines) to allow writes to proceed.
- */
- void saveState();
-
- /**
- * Restores the state saved by a saveState() call.
- *
- * Returns true if the state was successfully restored and the execution tree can be
- * work()'d.
- *
- * If allowed, will yield and retry if a WriteConflictException is encountered.
- *
- * Returns false otherwise. The execution tree cannot be worked and should be deleted.
- */
- bool restoreState(OperationContext* opCtx);
-
- /**
- * Same as restoreState but without the logic to retry if a WriteConflictException is
- * thrown.
- *
- * This is only public for PlanYieldPolicy. DO NOT CALL ANYWHERE ELSE.
- */
- bool restoreStateWithoutRetrying(OperationContext* opCtx);
+ /**
+ * The constructor for the normal case, when you have both a canonical query
+ * and a query solution.
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
- //
- // Running Support
- //
+ ~PlanExecutor();
- /**
- * Return the next result from the underlying execution tree.
- *
- * For read operations, objOut or dlOut are populated with another query result.
- *
- * For write operations, the return depends on the particulars of the write stage.
- *
- * If a YIELD_AUTO policy is set, then this method may yield.
- */
- ExecState getNextSnapshotted(Snapshotted<BSONObj>* objOut, RecordId* dlOut);
- ExecState getNext(BSONObj* objOut, RecordId* dlOut);
-
- /**
- * Returns 'true' if the plan is done producing results (or writing), 'false' otherwise.
- *
- * Tailable cursors are a possible exception to this: they may have further results even if
- * isEOF() returns true.
- */
- bool isEOF();
-
- /**
- * Execute the plan to completion, throwing out the results. Used when you want to work the
- * underlying tree without getting results back.
- *
- * If a YIELD_AUTO policy is set on this executor, then this will automatically yield.
- */
- Status executePlan();
+ //
+ // Accessors
+ //
- //
- // Concurrency-related methods.
- //
+ /**
+ * Get the working set used by this executor, without transferring ownership.
+ */
+ WorkingSet* getWorkingSet() const;
+
+ /**
+ * Get the stage tree wrapped by this executor, without transferring ownership.
+ */
+ PlanStage* getRootStage() const;
- /**
- * Register this plan executor with the collection cursor manager so that it
- * receives notifications for events that happen while yielding any locks.
- *
- * Deregistration happens automatically when this plan executor is destroyed.
- */
- void registerExec();
-
- /**
- * Unregister this PlanExecutor. Normally you want the PlanExecutor to be registered
- * for its lifetime, and you shouldn't have to call this explicitly.
- */
- void deregisterExec();
-
- /**
- * If we're yielding locks, the database we're operating over or any collection we're
- * relying on may be dropped. When this happens all cursors and plan executors on that
- * database and collection are killed or deleted in some fashion. Callers must specify
- * the 'reason' for why this executor is being killed.
- */
- void kill(std::string reason);
-
- /**
- * If we're yielding locks, writes may occur to documents that we rely on to keep valid
- * state. As such, if the plan yields, it must be notified of relevant writes so that
- * we can ensure that it doesn't crash if we try to access invalid state.
- */
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- /**
- * Helper method to aid in displaying an ExecState for debug or other recreational purposes.
- */
- static std::string statestr(ExecState s);
-
- /**
- * Change the yield policy of the PlanExecutor to 'policy'. If 'registerExecutor' is true,
- * and the yield policy is YIELD_AUTO, then the plan executor gets registered to receive
- * notifications of events from other threads.
- *
- * Everybody who sets the policy to YIELD_AUTO really wants to call registerExec()
- * immediately after EXCEPT commands that create cursors...so we expose the ability to
- * register (or not) here, rather than require all users to have yet another RAII object.
- * Only cursor-creating things like find.cpp set registerExecutor to false.
- */
- void setYieldPolicy(YieldPolicy policy, bool registerExecutor = true);
-
- /**
- * Stash the BSONObj so that it gets returned from the PlanExecutor on a later call to
- * getNext().
- *
- * Enqueued documents are returned in FIFO order. The queued results are exhausted before
- * generating further results from the underlying query plan.
- *
- * Subsequent calls to getNext() must request the BSONObj and *not* the RecordId.
- *
- * If used in combination with getNextSnapshotted(), then the SnapshotId associated with
- * 'obj' will be null when 'obj' is dequeued.
- */
- void enqueue(const BSONObj& obj);
-
- private:
- /**
- * RAII approach to ensuring that plan executors are deregistered.
- *
- * While retrieving the first batch of results, runQuery manually registers the executor
- * with ClientCursor. Certain query execution paths, namely $where, can throw an exception.
- * If we fail to deregister the executor, we will call invalidate/kill on the
- * still-registered-yet-deleted executor.
- *
- * For any subsequent calls to getMore, the executor is already registered with ClientCursor
- * by virtue of being cached, so this exception-proofing is not required.
- */
- struct ScopedExecutorRegistration {
- ScopedExecutorRegistration(PlanExecutor* exec);
- ~ScopedExecutorRegistration();
-
- PlanExecutor* const _exec;
- };
-
- /**
- * New PlanExecutor instances are created with the static make() methods above.
- */
- PlanExecutor(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns);
-
- /**
- * Public factory methods delegate to this private factory to do their work.
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /**
- * Clients of PlanExecutor expect that on receiving a new instance from one of the make()
- * factory methods, plan selection has already been completed. In order to enforce this
- * property, this function is called to do plan selection prior to returning the new
- * PlanExecutor.
- *
- * If the tree contains plan selection stages, such as MultiPlanStage or SubplanStage,
- * this calls into their underlying plan selection facilities. Otherwise, does nothing.
- *
- * If a YIELD_AUTO policy is set then locks are yielded during plan selection.
- */
- Status pickBestPlan(YieldPolicy policy);
-
- bool killed() { return static_cast<bool>(_killReason); };
-
- // The OperationContext that we're executing within. We need this in order to release
- // locks.
- OperationContext* _opCtx;
-
- // Collection over which this plan executor runs. Used to resolve record ids retrieved by
- // the plan stages. The collection must not be destroyed while there are active plans.
- const Collection* _collection;
-
- std::unique_ptr<CanonicalQuery> _cq;
- std::unique_ptr<WorkingSet> _workingSet;
- std::unique_ptr<QuerySolution> _qs;
- std::unique_ptr<PlanStage> _root;
-
- // Deregisters this executor when it is destroyed.
- std::unique_ptr<ScopedExecutorRegistration> _safety;
-
- // What namespace are we operating over?
- std::string _ns;
-
- // If _killReason has a value, then we have been killed and the value represents the reason
- // for the kill.
- boost::optional<std::string> _killReason;
-
- // This is used to handle automatic yielding when allowed by the YieldPolicy. Never NULL.
- // TODO make this a non-pointer member. This requires some header shuffling so that this
- // file includes plan_yield_policy.h rather than the other way around.
- const std::unique_ptr<PlanYieldPolicy> _yieldPolicy;
-
- // A stash of results generated by this plan that the user of the PlanExecutor didn't want
- // to consume yet. We empty the queue before retrieving further results from the plan
- // stages.
- std::queue<BSONObj> _stash;
+ /**
+ * Get the query that this executor is executing, without transferring ownership.
+ */
+ CanonicalQuery* getCanonicalQuery() const;
+
+ /**
+ * The collection in which this executor is working.
+ */
+ const Collection* collection() const;
+
+ /**
+ * Return the NS that the query is running over.
+ */
+ const std::string& ns();
+
+ /**
+ * Return the OperationContext that the plan is currently executing within.
+ */
+ OperationContext* getOpCtx() const;
+
+ /**
+ * Generates a tree of stats objects with a separate lifetime from the execution
+ * stage tree wrapped by this PlanExecutor. The caller owns the returned pointer.
+ *
+ * This is OK even if we were killed.
+ */
+ PlanStageStats* getStats() const;
+
+ //
+ // Methods that just pass down to the PlanStage tree.
+ //
+
+ /**
+ * Save any state required to either
+ * 1. hibernate waiting for a getMore, or
+ * 2. yield the lock (on applicable storage engines) to allow writes to proceed.
+ */
+ void saveState();
+
+ /**
+ * Restores the state saved by a saveState() call.
+ *
+ * Returns true if the state was successfully restored and the execution tree can be
+ * work()'d.
+ *
+ * If allowed, will yield and retry if a WriteConflictException is encountered.
+ *
+ * Returns false otherwise. The execution tree cannot be worked and should be deleted.
+ */
+ bool restoreState(OperationContext* opCtx);
+
+ /**
+ * Same as restoreState but without the logic to retry if a WriteConflictException is
+ * thrown.
+ *
+ * This is only public for PlanYieldPolicy. DO NOT CALL ANYWHERE ELSE.
+ */
+ bool restoreStateWithoutRetrying(OperationContext* opCtx);
+
+ //
+ // Running Support
+ //
+
+ /**
+ * Return the next result from the underlying execution tree.
+ *
+ * For read operations, objOut or dlOut are populated with another query result.
+ *
+ * For write operations, the return depends on the particulars of the write stage.
+ *
+ * If a YIELD_AUTO policy is set, then this method may yield.
+ */
+ ExecState getNextSnapshotted(Snapshotted<BSONObj>* objOut, RecordId* dlOut);
+ ExecState getNext(BSONObj* objOut, RecordId* dlOut);
+
+ /**
+ * Returns 'true' if the plan is done producing results (or writing), 'false' otherwise.
+ *
+ * Tailable cursors are a possible exception to this: they may have further results even if
+ * isEOF() returns true.
+ */
+ bool isEOF();
+
+ /**
+ * Execute the plan to completion, throwing out the results. Used when you want to work the
+ * underlying tree without getting results back.
+ *
+ * If a YIELD_AUTO policy is set on this executor, then this will automatically yield.
+ */
+ Status executePlan();
+
+ //
+ // Concurrency-related methods.
+ //
+
+ /**
+ * Register this plan executor with the collection cursor manager so that it
+ * receives notifications for events that happen while yielding any locks.
+ *
+ * Deregistration happens automatically when this plan executor is destroyed.
+ */
+ void registerExec();
+
+ /**
+ * Unregister this PlanExecutor. Normally you want the PlanExecutor to be registered
+ * for its lifetime, and you shouldn't have to call this explicitly.
+ */
+ void deregisterExec();
+
+ /**
+ * If we're yielding locks, the database we're operating over or any collection we're
+ * relying on may be dropped. When this happens all cursors and plan executors on that
+ * database and collection are killed or deleted in some fashion. Callers must specify
+ * the 'reason' for why this executor is being killed.
+ */
+ void kill(std::string reason);
+
+ /**
+ * If we're yielding locks, writes may occur to documents that we rely on to keep valid
+ * state. As such, if the plan yields, it must be notified of relevant writes so that
+ * we can ensure that it doesn't crash if we try to access invalid state.
+ */
+ void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ /**
+ * Helper method to aid in displaying an ExecState for debug or other recreational purposes.
+ */
+ static std::string statestr(ExecState s);
+
+ /**
+ * Change the yield policy of the PlanExecutor to 'policy'. If 'registerExecutor' is true,
+ * and the yield policy is YIELD_AUTO, then the plan executor gets registered to receive
+ * notifications of events from other threads.
+ *
+ * Everybody who sets the policy to YIELD_AUTO really wants to call registerExec()
+ * immediately after EXCEPT commands that create cursors...so we expose the ability to
+ * register (or not) here, rather than require all users to have yet another RAII object.
+ * Only cursor-creating things like find.cpp set registerExecutor to false.
+ */
+ void setYieldPolicy(YieldPolicy policy, bool registerExecutor = true);
+
+ /**
+ * Stash the BSONObj so that it gets returned from the PlanExecutor on a later call to
+ * getNext().
+ *
+ * Enqueued documents are returned in FIFO order. The queued results are exhausted before
+ * generating further results from the underlying query plan.
+ *
+ * Subsequent calls to getNext() must request the BSONObj and *not* the RecordId.
+ *
+ * If used in combination with getNextSnapshotted(), then the SnapshotId associated with
+ * 'obj' will be null when 'obj' is dequeued.
+ */
+ void enqueue(const BSONObj& obj);
+
+private:
+ /**
+ * RAII approach to ensuring that plan executors are deregistered.
+ *
+ * While retrieving the first batch of results, runQuery manually registers the executor
+ * with ClientCursor. Certain query execution paths, namely $where, can throw an exception.
+ * If we fail to deregister the executor, we will call invalidate/kill on the
+ * still-registered-yet-deleted executor.
+ *
+ * For any subsequent calls to getMore, the executor is already registered with ClientCursor
+ * by virtue of being cached, so this exception-proofing is not required.
+ */
+ struct ScopedExecutorRegistration {
+ ScopedExecutorRegistration(PlanExecutor* exec);
+ ~ScopedExecutorRegistration();
+
+ PlanExecutor* const _exec;
+ };
+
+ /**
+ * New PlanExecutor instances are created with the static make() methods above.
+ */
+ PlanExecutor(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ const std::string& ns);
+
+ /**
+ * Public factory methods delegate to this private factory to do their work.
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ const std::string& ns,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
+
+ /**
+ * Clients of PlanExecutor expect that on receiving a new instance from one of the make()
+ * factory methods, plan selection has already been completed. In order to enforce this
+ * property, this function is called to do plan selection prior to returning the new
+ * PlanExecutor.
+ *
+ * If the tree contains plan selection stages, such as MultiPlanStage or SubplanStage,
+ * this calls into their underlying plan selection facilities. Otherwise, does nothing.
+ *
+ * If a YIELD_AUTO policy is set then locks are yielded during plan selection.
+ */
+ Status pickBestPlan(YieldPolicy policy);
+
+ bool killed() {
+ return static_cast<bool>(_killReason);
};
+ // The OperationContext that we're executing within. We need this in order to release
+ // locks.
+ OperationContext* _opCtx;
+
+ // Collection over which this plan executor runs. Used to resolve record ids retrieved by
+ // the plan stages. The collection must not be destroyed while there are active plans.
+ const Collection* _collection;
+
+ std::unique_ptr<CanonicalQuery> _cq;
+ std::unique_ptr<WorkingSet> _workingSet;
+ std::unique_ptr<QuerySolution> _qs;
+ std::unique_ptr<PlanStage> _root;
+
+ // Deregisters this executor when it is destroyed.
+ std::unique_ptr<ScopedExecutorRegistration> _safety;
+
+ // What namespace are we operating over?
+ std::string _ns;
+
+ // If _killReason has a value, then we have been killed and the value represents the reason
+ // for the kill.
+ boost::optional<std::string> _killReason;
+
+ // This is used to handle automatic yielding when allowed by the YieldPolicy. Never NULL.
+ // TODO make this a non-pointer member. This requires some header shuffling so that this
+ // file includes plan_yield_policy.h rather than the other way around.
+ const std::unique_ptr<PlanYieldPolicy> _yieldPolicy;
+
+ // A stash of results generated by this plan that the user of the PlanExecutor didn't want
+ // to consume yet. We empty the queue before retrieving further results from the plan
+ // stages.
+ std::queue<BSONObj> _stash;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp
index 5a2672bd526..cd2fbde6b03 100644
--- a/src/mongo/db/query/plan_ranker.cpp
+++ b/src/mongo/db/query/plan_ranker.cpp
@@ -48,220 +48,208 @@
namespace {
- /**
- * Comparator for (scores, candidateIndex) in pickBestPlan().
- */
- bool scoreComparator(const std::pair<double, size_t>& lhs,
- const std::pair<double, size_t>& rhs) {
- // Just compare score in lhs.first and rhs.first;
- // Ignore candidate array index in lhs.second and rhs.second.
- return lhs.first > rhs.first;
- }
+/**
+ * Comparator for (scores, candidateIndex) in pickBestPlan().
+ */
+bool scoreComparator(const std::pair<double, size_t>& lhs, const std::pair<double, size_t>& rhs) {
+ // Just compare score in lhs.first and rhs.first;
+ // Ignore candidate array index in lhs.second and rhs.second.
+ return lhs.first > rhs.first;
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::endl;
- using std::vector;
-
- // static
- size_t PlanRanker::pickBestPlan(const vector<CandidatePlan>& candidates,
- PlanRankingDecision* why) {
- invariant(!candidates.empty());
- invariant(why);
-
- // A plan that hits EOF is automatically scored above
- // its peers. If multiple plans hit EOF during the same
- // set of round-robin calls to work(), then all such plans
- // receive the bonus.
- double eofBonus = 1.0;
-
- // Each plan will have a stat tree.
- vector<PlanStageStats*> statTrees;
-
- // Get stat trees from each plan.
- // Copy stats trees instead of transferring ownership
- // because multi plan runner will need its own stats
- // trees for explain.
- for (size_t i = 0; i < candidates.size(); ++i) {
- statTrees.push_back(candidates[i].root->getStats());
- }
+using std::endl;
+using std::vector;
+
+// static
+size_t PlanRanker::pickBestPlan(const vector<CandidatePlan>& candidates, PlanRankingDecision* why) {
+ invariant(!candidates.empty());
+ invariant(why);
+
+ // A plan that hits EOF is automatically scored above
+ // its peers. If multiple plans hit EOF during the same
+ // set of round-robin calls to work(), then all such plans
+ // receive the bonus.
+ double eofBonus = 1.0;
+
+ // Each plan will have a stat tree.
+ vector<PlanStageStats*> statTrees;
+
+ // Get stat trees from each plan.
+ // Copy stats trees instead of transferring ownership
+ // because multi plan runner will need its own stats
+ // trees for explain.
+ for (size_t i = 0; i < candidates.size(); ++i) {
+ statTrees.push_back(candidates[i].root->getStats());
+ }
- // Holds (score, candidateInndex).
- // Used to derive scores and candidate ordering.
- vector<std::pair<double, size_t> > scoresAndCandidateindices;
-
- // Compute score for each tree. Record the best.
- for (size_t i = 0; i < statTrees.size(); ++i) {
- LOG(5) << "Scoring plan " << i << ":" << endl
- << candidates[i].solution->toString() << "Stats:\n"
- << Explain::statsToBSON(*statTrees[i]).jsonString(Strict, true);
- LOG(2) << "Scoring query plan: "
- << Explain::getPlanSummary(candidates[i].root)
- << " planHitEOF=" << statTrees[i]->common.isEOF;
-
- double score = scoreTree(statTrees[i]);
- LOG(5) << "score = " << score << endl;
- if (statTrees[i]->common.isEOF) {
- LOG(5) << "Adding +" << eofBonus << " EOF bonus to score." << endl;
- score += 1;
- }
- scoresAndCandidateindices.push_back(std::make_pair(score, i));
+ // Holds (score, candidateInndex).
+ // Used to derive scores and candidate ordering.
+ vector<std::pair<double, size_t>> scoresAndCandidateindices;
+
+ // Compute score for each tree. Record the best.
+ for (size_t i = 0; i < statTrees.size(); ++i) {
+ LOG(5) << "Scoring plan " << i << ":" << endl
+ << candidates[i].solution->toString() << "Stats:\n"
+ << Explain::statsToBSON(*statTrees[i]).jsonString(Strict, true);
+ LOG(2) << "Scoring query plan: " << Explain::getPlanSummary(candidates[i].root)
+ << " planHitEOF=" << statTrees[i]->common.isEOF;
+
+ double score = scoreTree(statTrees[i]);
+ LOG(5) << "score = " << score << endl;
+ if (statTrees[i]->common.isEOF) {
+ LOG(5) << "Adding +" << eofBonus << " EOF bonus to score." << endl;
+ score += 1;
}
+ scoresAndCandidateindices.push_back(std::make_pair(score, i));
+ }
- // Sort (scores, candidateIndex). Get best child and populate candidate ordering.
- std::stable_sort(scoresAndCandidateindices.begin(), scoresAndCandidateindices.end(),
- scoreComparator);
-
- // Update results in 'why'
- // Stats and scores in 'why' are sorted in descending order by score.
- why->stats.clear();
- why->scores.clear();
- why->candidateOrder.clear();
- for (size_t i = 0; i < scoresAndCandidateindices.size(); ++i) {
- double score = scoresAndCandidateindices[i].first;
- size_t candidateIndex = scoresAndCandidateindices[i].second;
-
- // We shouldn't cache the scores with the EOF bonus included,
- // as this is just a tie-breaking measure for plan selection.
- // Plans not run through the multi plan runner will not receive
- // the bonus.
- //
- // An example of a bad thing that could happen if we stored scores
- // with the EOF bonus included:
- //
- // Let's say Plan A hits EOF, is the highest ranking plan, and gets
- // cached as such. On subsequent runs it will not receive the bonus.
- // Eventually the plan cache feedback mechanism will evict the cache
- // entry---the scores will appear to have fallen due to the missing
- // EOF bonus.
- //
- // This begs the question, why don't we include the EOF bonus in
- // scoring of cached plans as well? The problem here is that the cached
- // plan runner always runs plans to completion before scoring. Queries
- // that don't get the bonus in the multi plan runner might get the bonus
- // after being run from the plan cache.
- if (statTrees[candidateIndex]->common.isEOF) {
- score -= eofBonus;
- }
-
- why->stats.mutableVector().push_back(statTrees[candidateIndex]);
- why->scores.push_back(score);
- why->candidateOrder.push_back(candidateIndex);
+ // Sort (scores, candidateIndex). Get best child and populate candidate ordering.
+ std::stable_sort(
+ scoresAndCandidateindices.begin(), scoresAndCandidateindices.end(), scoreComparator);
+
+ // Update results in 'why'
+ // Stats and scores in 'why' are sorted in descending order by score.
+ why->stats.clear();
+ why->scores.clear();
+ why->candidateOrder.clear();
+ for (size_t i = 0; i < scoresAndCandidateindices.size(); ++i) {
+ double score = scoresAndCandidateindices[i].first;
+ size_t candidateIndex = scoresAndCandidateindices[i].second;
+
+ // We shouldn't cache the scores with the EOF bonus included,
+ // as this is just a tie-breaking measure for plan selection.
+ // Plans not run through the multi plan runner will not receive
+ // the bonus.
+ //
+ // An example of a bad thing that could happen if we stored scores
+ // with the EOF bonus included:
+ //
+ // Let's say Plan A hits EOF, is the highest ranking plan, and gets
+ // cached as such. On subsequent runs it will not receive the bonus.
+ // Eventually the plan cache feedback mechanism will evict the cache
+ // entry---the scores will appear to have fallen due to the missing
+ // EOF bonus.
+ //
+ // This begs the question, why don't we include the EOF bonus in
+ // scoring of cached plans as well? The problem here is that the cached
+ // plan runner always runs plans to completion before scoring. Queries
+ // that don't get the bonus in the multi plan runner might get the bonus
+ // after being run from the plan cache.
+ if (statTrees[candidateIndex]->common.isEOF) {
+ score -= eofBonus;
}
- size_t bestChild = scoresAndCandidateindices[0].second;
- return bestChild;
+ why->stats.mutableVector().push_back(statTrees[candidateIndex]);
+ why->scores.push_back(score);
+ why->candidateOrder.push_back(candidateIndex);
}
- // TODO: Move this out. This is a signal for ranking but will become its own complicated
- // stats-collecting beast.
- double computeSelectivity(const PlanStageStats* stats) {
- if (STAGE_IXSCAN == stats->stageType) {
- IndexScanStats* iss = static_cast<IndexScanStats*>(stats->specific.get());
- return iss->keyPattern.nFields();
- }
- else {
- double sum = 0;
- for (size_t i = 0; i < stats->children.size(); ++i) {
- sum += computeSelectivity(stats->children[i]);
- }
- return sum;
+ size_t bestChild = scoresAndCandidateindices[0].second;
+ return bestChild;
+}
+
+// TODO: Move this out. This is a signal for ranking but will become its own complicated
+// stats-collecting beast.
+double computeSelectivity(const PlanStageStats* stats) {
+ if (STAGE_IXSCAN == stats->stageType) {
+ IndexScanStats* iss = static_cast<IndexScanStats*>(stats->specific.get());
+ return iss->keyPattern.nFields();
+ } else {
+ double sum = 0;
+ for (size_t i = 0; i < stats->children.size(); ++i) {
+ sum += computeSelectivity(stats->children[i]);
}
+ return sum;
}
+}
- bool hasStage(const StageType type, const PlanStageStats* stats) {
- if (type == stats->stageType) {
+bool hasStage(const StageType type, const PlanStageStats* stats) {
+ if (type == stats->stageType) {
+ return true;
+ }
+ for (size_t i = 0; i < stats->children.size(); ++i) {
+ if (hasStage(type, stats->children[i])) {
return true;
}
- for (size_t i = 0; i < stats->children.size(); ++i) {
- if (hasStage(type, stats->children[i])) {
- return true;
- }
- }
- return false;
+ }
+ return false;
+}
+
+// static
+double PlanRanker::scoreTree(const PlanStageStats* stats) {
+ // We start all scores at 1. Our "no plan selected" score is 0 and we want all plans to
+ // be greater than that.
+ double baseScore = 1;
+
+ // How many "units of work" did the plan perform. Each call to work(...)
+ // counts as one unit.
+ size_t workUnits = stats->common.works;
+
+ // How much did a plan produce?
+ // Range: [0, 1]
+ double productivity =
+ static_cast<double>(stats->common.advanced) / static_cast<double>(workUnits);
+
+ // Just enough to break a tie. Must be small enough to ensure that a more productive
+ // plan doesn't lose to a less productive plan due to tie breaking.
+ static const double epsilon = std::min(1.0 / static_cast<double>(10 * workUnits), 1e-4);
+
+ // We prefer covered projections.
+ //
+ // We only do this when we have a projection stage because we have so many jstests that
+ // check bounds even when a collscan plan is just as good as the ixscan'd plan :(
+ double noFetchBonus = epsilon;
+ if (hasStage(STAGE_PROJECTION, stats) && hasStage(STAGE_FETCH, stats)) {
+ noFetchBonus = 0;
}
- // static
- double PlanRanker::scoreTree(const PlanStageStats* stats) {
- // We start all scores at 1. Our "no plan selected" score is 0 and we want all plans to
- // be greater than that.
- double baseScore = 1;
+ // In the case of ties, prefer solutions without a blocking sort
+ // to solutions with a blocking sort.
+ double noSortBonus = epsilon;
+ if (hasStage(STAGE_SORT, stats)) {
+ noSortBonus = 0;
+ }
- // How many "units of work" did the plan perform. Each call to work(...)
- // counts as one unit.
- size_t workUnits = stats->common.works;
+ // In the case of ties, prefer single index solutions to ixisect. Index
+ // intersection solutions are often slower than single-index solutions
+ // because they require examining a superset of index keys that would be
+ // examined by a single index scan.
+ //
+ // On the other hand, index intersection solutions examine the same
+ // number or fewer of documents. In the case that index intersection
+ // allows us to examine fewer documents, the penalty given to ixisect
+ // can be made up via the no fetch bonus.
+ double noIxisectBonus = epsilon;
+ if (hasStage(STAGE_AND_HASH, stats) || hasStage(STAGE_AND_SORTED, stats)) {
+ noIxisectBonus = 0;
+ }
- // How much did a plan produce?
- // Range: [0, 1]
- double productivity = static_cast<double>(stats->common.advanced)
- / static_cast<double>(workUnits);
+ double tieBreakers = noFetchBonus + noSortBonus + noIxisectBonus;
+ double score = baseScore + productivity + tieBreakers;
- // Just enough to break a tie. Must be small enough to ensure that a more productive
- // plan doesn't lose to a less productive plan due to tie breaking.
- static const double epsilon = std::min(1.0 / static_cast<double>(10 * workUnits), 1e-4);
+ mongoutils::str::stream ss;
+ ss << "score(" << score << ") = baseScore(" << baseScore << ")"
+ << " + productivity((" << stats->common.advanced << " advanced)/(" << stats->common.works
+ << " works) = " << productivity << ")"
+ << " + tieBreakers(" << noFetchBonus << " noFetchBonus + " << noSortBonus
+ << " noSortBonus + " << noIxisectBonus << " noIxisectBonus = " << tieBreakers << ")";
+ std::string scoreStr = ss;
+ LOG(2) << scoreStr;
- // We prefer covered projections.
- //
- // We only do this when we have a projection stage because we have so many jstests that
- // check bounds even when a collscan plan is just as good as the ixscan'd plan :(
- double noFetchBonus = epsilon;
- if (hasStage(STAGE_PROJECTION, stats) && hasStage(STAGE_FETCH, stats)) {
- noFetchBonus = 0;
- }
-
- // In the case of ties, prefer solutions without a blocking sort
- // to solutions with a blocking sort.
- double noSortBonus = epsilon;
- if (hasStage(STAGE_SORT, stats)) {
- noSortBonus = 0;
- }
-
- // In the case of ties, prefer single index solutions to ixisect. Index
- // intersection solutions are often slower than single-index solutions
- // because they require examining a superset of index keys that would be
- // examined by a single index scan.
- //
- // On the other hand, index intersection solutions examine the same
- // number or fewer of documents. In the case that index intersection
- // allows us to examine fewer documents, the penalty given to ixisect
- // can be made up via the no fetch bonus.
- double noIxisectBonus = epsilon;
+ if (internalQueryForceIntersectionPlans) {
if (hasStage(STAGE_AND_HASH, stats) || hasStage(STAGE_AND_SORTED, stats)) {
- noIxisectBonus = 0;
- }
-
- double tieBreakers = noFetchBonus + noSortBonus + noIxisectBonus;
- double score = baseScore + productivity + tieBreakers;
-
- mongoutils::str::stream ss;
- ss << "score(" << score << ") = baseScore(" << baseScore << ")"
- << " + productivity((" << stats->common.advanced
- << " advanced)/("
- << stats->common.works
- << " works) = "
- << productivity << ")"
- << " + tieBreakers(" << noFetchBonus
- << " noFetchBonus + "
- << noSortBonus
- << " noSortBonus + "
- << noIxisectBonus
- << " noIxisectBonus = "
- << tieBreakers << ")";
- std::string scoreStr = ss;
- LOG(2) << scoreStr;
-
- if (internalQueryForceIntersectionPlans) {
- if (hasStage(STAGE_AND_HASH, stats) || hasStage(STAGE_AND_SORTED, stats)) {
- // The boost should be >2.001 to make absolutely sure the ixisect plan will win due
- // to the combination of 1) productivity, 2) eof bonus, and 3) no ixisect bonus.
- score += 3;
- LOG(5) << "Score boosted to " << score << " due to intersection forcing." << endl;
- }
+ // The boost should be >2.001 to make absolutely sure the ixisect plan will win due
+ // to the combination of 1) productivity, 2) eof bonus, and 3) no ixisect bonus.
+ score += 3;
+ LOG(5) << "Score boosted to " << score << " due to intersection forcing." << endl;
}
-
- return score;
}
+ return score;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/plan_ranker.h b/src/mongo/db/query/plan_ranker.h
index ed41c0c1c5a..653fb332f12 100644
--- a/src/mongo/db/query/plan_ranker.h
+++ b/src/mongo/db/query/plan_ranker.h
@@ -39,86 +39,85 @@
namespace mongo {
- struct CandidatePlan;
- struct PlanRankingDecision;
+struct CandidatePlan;
+struct PlanRankingDecision;
+/**
+ * Ranks 2 or more plans.
+ */
+class PlanRanker {
+public:
/**
- * Ranks 2 or more plans.
+ * Returns index in 'candidates' of which plan is best.
+ * Populates 'why' with information relevant to how each plan fared in the ranking process.
+ * Caller owns pointers in 'why'.
+ * 'candidateOrder' holds indices into candidates ordered by score (winner in first element).
*/
- class PlanRanker {
- public:
- /**
- * Returns index in 'candidates' of which plan is best.
- * Populates 'why' with information relevant to how each plan fared in the ranking process.
- * Caller owns pointers in 'why'.
- * 'candidateOrder' holds indices into candidates ordered by score (winner in first element).
- */
- static size_t pickBestPlan(const std::vector<CandidatePlan>& candidates,
- PlanRankingDecision* why);
-
- /**
- * Assign the stats tree a 'goodness' score. The higher the score, the better
- * the plan. The exact value isn't meaningful except for imposing a ranking.
- */
- static double scoreTree(const PlanStageStats* stats);
- };
+ static size_t pickBestPlan(const std::vector<CandidatePlan>& candidates,
+ PlanRankingDecision* why);
/**
- * A container holding one to-be-ranked plan and its associated/relevant data.
- * Does not own any of its pointers.
+ * Assign the stats tree a 'goodness' score. The higher the score, the better
+ * the plan. The exact value isn't meaningful except for imposing a ranking.
*/
- struct CandidatePlan {
- CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w)
- : solution(s), root(r), ws(w), failed(false) { }
+ static double scoreTree(const PlanStageStats* stats);
+};
+
+/**
+ * A container holding one to-be-ranked plan and its associated/relevant data.
+ * Does not own any of its pointers.
+ */
+struct CandidatePlan {
+ CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w)
+ : solution(s), root(r), ws(w), failed(false) {}
- QuerySolution* solution;
- PlanStage* root;
- WorkingSet* ws;
+ QuerySolution* solution;
+ PlanStage* root;
+ WorkingSet* ws;
- // Any results produced during the plan's execution prior to ranking are retained here.
- std::list<WorkingSetID> results;
+ // Any results produced during the plan's execution prior to ranking are retained here.
+ std::list<WorkingSetID> results;
- bool failed;
- };
+ bool failed;
+};
+
+/**
+ * Information about why a plan was picked to be the best. Data here is placed into the cache
+ * and used to compare expected performance with actual.
+ */
+struct PlanRankingDecision {
+ PlanRankingDecision() {}
/**
- * Information about why a plan was picked to be the best. Data here is placed into the cache
- * and used to compare expected performance with actual.
+ * Make a deep copy.
*/
- struct PlanRankingDecision {
-
- PlanRankingDecision() { }
-
- /**
- * Make a deep copy.
- */
- PlanRankingDecision* clone() const {
- PlanRankingDecision* decision = new PlanRankingDecision();
- for (size_t i = 0; i < stats.size(); ++i) {
- PlanStageStats* s = stats.vector()[i];
- invariant(s);
- decision->stats.mutableVector().push_back(s->clone());
- }
- decision->scores = scores;
- decision->candidateOrder = candidateOrder;
- return decision;
+ PlanRankingDecision* clone() const {
+ PlanRankingDecision* decision = new PlanRankingDecision();
+ for (size_t i = 0; i < stats.size(); ++i) {
+ PlanStageStats* s = stats.vector()[i];
+ invariant(s);
+ decision->stats.mutableVector().push_back(s->clone());
}
-
- // Stats of all plans sorted in descending order by score.
- // Owned by us.
- OwnedPointerVector<PlanStageStats> stats;
-
- // The "goodness" score corresponding to 'stats'.
- // Sorted in descending order.
- std::vector<double> scores;
-
- // Ordering of original plans in descending of score.
- // Filled in by PlanRanker::pickBestPlan(candidates, ...)
- // so that candidates[candidateOrder[0]] refers to the best plan
- // with corresponding cores[0] and stats[0]. Runner-up would be
- // candidates[candidateOrder[1]] followed by
- // candidates[candidateOrder[2]], ...
- std::vector<size_t> candidateOrder;
- };
+ decision->scores = scores;
+ decision->candidateOrder = candidateOrder;
+ return decision;
+ }
+
+ // Stats of all plans sorted in descending order by score.
+ // Owned by us.
+ OwnedPointerVector<PlanStageStats> stats;
+
+ // The "goodness" score corresponding to 'stats'.
+ // Sorted in descending order.
+ std::vector<double> scores;
+
+ // Ordering of original plans in descending of score.
+ // Filled in by PlanRanker::pickBestPlan(candidates, ...)
+ // so that candidates[candidateOrder[0]] refers to the best plan
+ // with corresponding cores[0] and stats[0]. Runner-up would be
+ // candidates[candidateOrder[1]] followed by
+ // candidates[candidateOrder[2]], ...
+ std::vector<size_t> candidateOrder;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp
index 67eb574ef5c..45d996893a9 100644
--- a/src/mongo/db/query/plan_yield_policy.cpp
+++ b/src/mongo/db/query/plan_yield_policy.cpp
@@ -38,75 +38,73 @@
namespace mongo {
- PlanYieldPolicy::PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy)
- : _policy(policy),
- _forceYield(false),
- _elapsedTracker(internalQueryExecYieldIterations, internalQueryExecYieldPeriodMS),
- _planYielding(exec) { }
-
- bool PlanYieldPolicy::shouldYield() {
- if (!allowedToYield()) return false;
- invariant(!_planYielding->getOpCtx()->lockState()->inAWriteUnitOfWork());
- if (_forceYield) return true;
- return _elapsedTracker.intervalHasElapsed();
- }
-
- void PlanYieldPolicy::resetTimer() {
- _elapsedTracker.resetLastTime();
- }
-
- bool PlanYieldPolicy::yield(RecordFetcher* fetcher) {
- invariant(_planYielding);
- invariant(allowedToYield());
-
- _forceYield = false;
+PlanYieldPolicy::PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy)
+ : _policy(policy),
+ _forceYield(false),
+ _elapsedTracker(internalQueryExecYieldIterations, internalQueryExecYieldPeriodMS),
+ _planYielding(exec) {}
+
+bool PlanYieldPolicy::shouldYield() {
+ if (!allowedToYield())
+ return false;
+ invariant(!_planYielding->getOpCtx()->lockState()->inAWriteUnitOfWork());
+ if (_forceYield)
+ return true;
+ return _elapsedTracker.intervalHasElapsed();
+}
+
+void PlanYieldPolicy::resetTimer() {
+ _elapsedTracker.resetLastTime();
+}
+
+bool PlanYieldPolicy::yield(RecordFetcher* fetcher) {
+ invariant(_planYielding);
+ invariant(allowedToYield());
+
+ _forceYield = false;
+
+ OperationContext* opCtx = _planYielding->getOpCtx();
+ invariant(opCtx);
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork());
+
+ // Can't use MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN/END since we need to call saveState
+ // before reseting the transaction.
+ for (int attempt = 1; true; attempt++) {
+ try {
+ // All YIELD_AUTO plans will get here eventually when the elapsed tracker triggers
+ // that it's time to yield. Whether or not we will actually yield, we need to check
+ // if this operation has been interrupted. Throws if the interrupt flag is set.
+ if (_policy == PlanExecutor::YIELD_AUTO) {
+ opCtx->checkForInterrupt();
+ }
- OperationContext* opCtx = _planYielding->getOpCtx();
- invariant(opCtx);
- invariant(!opCtx->lockState()->inAWriteUnitOfWork());
+ // No need to yield if the collection is NULL.
+ if (NULL == _planYielding->collection()) {
+ return true;
+ }
- // Can't use MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN/END since we need to call saveState
- // before reseting the transaction.
- for (int attempt = 1; true; attempt++) {
try {
- // All YIELD_AUTO plans will get here eventually when the elapsed tracker triggers
- // that it's time to yield. Whether or not we will actually yield, we need to check
- // if this operation has been interrupted. Throws if the interrupt flag is set.
- if (_policy == PlanExecutor::YIELD_AUTO) {
- opCtx->checkForInterrupt();
- }
-
- // No need to yield if the collection is NULL.
- if (NULL == _planYielding->collection()) {
- return true;
- }
-
- try {
- _planYielding->saveState();
- }
- catch (const WriteConflictException& wce) {
- invariant(!"WriteConflictException not allowed in saveState");
- }
-
- if (_policy == PlanExecutor::WRITE_CONFLICT_RETRY_ONLY) {
- // Just reset the snapshot. Leave all LockManager locks alone.
- opCtx->recoveryUnit()->abandonSnapshot();
- }
- else {
- // Release and reacquire locks.
- QueryYield::yieldAllLocks(opCtx, fetcher);
- }
-
- return _planYielding->restoreStateWithoutRetrying(opCtx);
+ _planYielding->saveState();
+ } catch (const WriteConflictException& wce) {
+ invariant(!"WriteConflictException not allowed in saveState");
}
- catch (const WriteConflictException& wce) {
- CurOp::get(opCtx)->debug().writeConflicts++;
- WriteConflictException::logAndBackoff(attempt,
- "plan execution restoreState",
- _planYielding->collection()->ns().ns());
- // retry
+
+ if (_policy == PlanExecutor::WRITE_CONFLICT_RETRY_ONLY) {
+ // Just reset the snapshot. Leave all LockManager locks alone.
+ opCtx->recoveryUnit()->abandonSnapshot();
+ } else {
+ // Release and reacquire locks.
+ QueryYield::yieldAllLocks(opCtx, fetcher);
}
+
+ return _planYielding->restoreStateWithoutRetrying(opCtx);
+ } catch (const WriteConflictException& wce) {
+ CurOp::get(opCtx)->debug().writeConflicts++;
+ WriteConflictException::logAndBackoff(
+ attempt, "plan execution restoreState", _planYielding->collection()->ns().ns());
+ // retry
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_yield_policy.h b/src/mongo/db/query/plan_yield_policy.h
index 8dc5d45447e..0de47c608cd 100644
--- a/src/mongo/db/query/plan_yield_policy.h
+++ b/src/mongo/db/query/plan_yield_policy.h
@@ -34,64 +34,67 @@
namespace mongo {
- class RecordFetcher;
-
- class PlanYieldPolicy {
- public:
- /**
- * If policy == WRITE_CONFLICT_RETRY_ONLY, shouldYield will only return true after
- * forceYield has been called, and yield will only abandonSnapshot without releasing any
- * locks.
- */
- PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy);
-
- /**
- * Used by YIELD_AUTO plan executors in order to check whether it is time to yield.
- * PlanExecutors give up their locks periodically in order to be fair to other
- * threads.
- */
- bool shouldYield();
-
- /**
- * Resets the yield timer so that we wait for a while before yielding again.
- */
- void resetTimer();
-
- /**
- * Used to cause a plan executor to give up locks and go to sleep. The PlanExecutor
- * must *not* be in saved state. Handles calls to save/restore state internally.
- *
- * If 'fetcher' is non-NULL, then we are yielding because the storage engine told us
- * that we will page fault on this record. We use 'fetcher' to retrieve the record
- * after we give up our locks.
- *
- * Returns true if the executor was restored successfully and is still alive. Returns false
- * if the executor got killed during yield.
- */
- bool yield(RecordFetcher* fetcher = NULL);
-
- /**
- * All calls to shouldYield will return true until the next call to yield.
- */
- void forceYield() {
- dassert(allowedToYield());
- _forceYield = true;
- }
-
- bool allowedToYield() const { return _policy != PlanExecutor::YIELD_MANUAL; }
-
- void setPolicy(PlanExecutor::YieldPolicy policy) { _policy = policy; }
-
- private:
- PlanExecutor::YieldPolicy _policy;
-
- bool _forceYield;
- ElapsedTracker _elapsedTracker;
-
- // The plan executor which this yield policy is responsible for yielding. Must
- // not outlive the plan executor.
- PlanExecutor* const _planYielding;
- };
-
-} // namespace mongo
-
+class RecordFetcher;
+
+class PlanYieldPolicy {
+public:
+ /**
+ * If policy == WRITE_CONFLICT_RETRY_ONLY, shouldYield will only return true after
+ * forceYield has been called, and yield will only abandonSnapshot without releasing any
+ * locks.
+ */
+ PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy);
+
+ /**
+ * Used by YIELD_AUTO plan executors in order to check whether it is time to yield.
+ * PlanExecutors give up their locks periodically in order to be fair to other
+ * threads.
+ */
+ bool shouldYield();
+
+ /**
+ * Resets the yield timer so that we wait for a while before yielding again.
+ */
+ void resetTimer();
+
+ /**
+ * Used to cause a plan executor to give up locks and go to sleep. The PlanExecutor
+ * must *not* be in saved state. Handles calls to save/restore state internally.
+ *
+ * If 'fetcher' is non-NULL, then we are yielding because the storage engine told us
+ * that we will page fault on this record. We use 'fetcher' to retrieve the record
+ * after we give up our locks.
+ *
+ * Returns true if the executor was restored successfully and is still alive. Returns false
+ * if the executor got killed during yield.
+ */
+ bool yield(RecordFetcher* fetcher = NULL);
+
+ /**
+ * All calls to shouldYield will return true until the next call to yield.
+ */
+ void forceYield() {
+ dassert(allowedToYield());
+ _forceYield = true;
+ }
+
+ bool allowedToYield() const {
+ return _policy != PlanExecutor::YIELD_MANUAL;
+ }
+
+ void setPolicy(PlanExecutor::YieldPolicy policy) {
+ _policy = policy;
+ }
+
+private:
+ PlanExecutor::YieldPolicy _policy;
+
+ bool _forceYield;
+ ElapsedTracker _elapsedTracker;
+
+ // The plan executor which this yield policy is responsible for yielding. Must
+ // not outlive the plan executor.
+ PlanExecutor* const _planYielding;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp
index e0f714716b8..1c059933f17 100644
--- a/src/mongo/db/query/planner_access.cpp
+++ b/src/mongo/db/query/planner_access.cpp
@@ -46,1305 +46,1250 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- /**
- * Text node functors.
- */
- bool isTextNode(const QuerySolutionNode* node) {
- return STAGE_TEXT == node->getType();
- }
+/**
+ * Text node functors.
+ */
+bool isTextNode(const QuerySolutionNode* node) {
+ return STAGE_TEXT == node->getType();
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::unique_ptr;
- using std::vector;
+using std::unique_ptr;
+using std::vector;
- // static
- QuerySolutionNode* QueryPlannerAccess::makeCollectionScan(const CanonicalQuery& query,
- bool tailable,
- const QueryPlannerParams& params) {
- // Make the (only) node, a collection scan.
- CollectionScanNode* csn = new CollectionScanNode();
- csn->name = query.ns();
- csn->filter.reset(query.root()->shallowClone());
- csn->tailable = tailable;
- csn->maxScan = query.getParsed().getMaxScan();
-
- // If the hint is {$natural: +-1} this changes the direction of the collection scan.
- if (!query.getParsed().getHint().isEmpty()) {
- BSONElement natural = query.getParsed().getHint().getFieldDotted("$natural");
- if (!natural.eoo()) {
- csn->direction = natural.numberInt() >= 0 ? 1 : -1;
- }
+// static
+QuerySolutionNode* QueryPlannerAccess::makeCollectionScan(const CanonicalQuery& query,
+ bool tailable,
+ const QueryPlannerParams& params) {
+ // Make the (only) node, a collection scan.
+ CollectionScanNode* csn = new CollectionScanNode();
+ csn->name = query.ns();
+ csn->filter.reset(query.root()->shallowClone());
+ csn->tailable = tailable;
+ csn->maxScan = query.getParsed().getMaxScan();
+
+ // If the hint is {$natural: +-1} this changes the direction of the collection scan.
+ if (!query.getParsed().getHint().isEmpty()) {
+ BSONElement natural = query.getParsed().getHint().getFieldDotted("$natural");
+ if (!natural.eoo()) {
+ csn->direction = natural.numberInt() >= 0 ? 1 : -1;
}
+ }
- // The sort can specify $natural as well. The sort direction should override the hint
- // direction if both are specified.
- const BSONObj& sortObj = query.getParsed().getSort();
- if (!sortObj.isEmpty()) {
- BSONElement natural = sortObj.getFieldDotted("$natural");
- if (!natural.eoo()) {
- csn->direction = natural.numberInt() >= 0 ? 1 : -1;
- }
+ // The sort can specify $natural as well. The sort direction should override the hint
+ // direction if both are specified.
+ const BSONObj& sortObj = query.getParsed().getSort();
+ if (!sortObj.isEmpty()) {
+ BSONElement natural = sortObj.getFieldDotted("$natural");
+ if (!natural.eoo()) {
+ csn->direction = natural.numberInt() >= 0 ? 1 : -1;
}
-
- return csn;
}
- // static
- QuerySolutionNode* QueryPlannerAccess::makeLeafNode(const CanonicalQuery& query,
- const IndexEntry& index,
- size_t pos,
- MatchExpression* expr,
- IndexBoundsBuilder::BoundsTightness* tightnessOut) {
- // We're guaranteed that all GEO_NEARs are first. This slightly violates the "sort index
- // predicates by their position in the compound index" rule but GEO_NEAR isn't an ixscan.
- // This saves our bacon when we have {foo: 1, bar: "2dsphere"} and the predicate on bar is a
- // $near. If we didn't get the GEO_NEAR first we'd create an IndexScanNode and later cast
- // it to a GeoNear2DSphereNode
- //
- // This should gracefully deal with the case where we have a pred over foo but no geo clause
- // over bar. In that case there is no GEO_NEAR to appear first and it's treated like a
- // straight ixscan.
-
- if (MatchExpression::GEO_NEAR == expr->matchType()) {
- // We must not keep the expression node around.
- *tightnessOut = IndexBoundsBuilder::EXACT;
- GeoNearMatchExpression* nearExpr = static_cast<GeoNearMatchExpression*>(expr);
-
- BSONElement elt = index.keyPattern.firstElement();
- bool indexIs2D = (String == elt.type() && "2d" == elt.String());
-
- if (indexIs2D) {
- GeoNear2DNode* ret = new GeoNear2DNode();
- ret->indexKeyPattern = index.keyPattern;
- ret->nq = &nearExpr->getData();
- ret->baseBounds.fields.resize(index.keyPattern.nFields());
- if (NULL != query.getProj()) {
- ret->addPointMeta = query.getProj()->wantGeoNearPoint();
- ret->addDistMeta = query.getProj()->wantGeoNearDistance();
- }
-
- return ret;
- }
- else {
- GeoNear2DSphereNode* ret = new GeoNear2DSphereNode();
- ret->indexKeyPattern = index.keyPattern;
- ret->nq = &nearExpr->getData();
- ret->baseBounds.fields.resize(index.keyPattern.nFields());
- if (NULL != query.getProj()) {
- ret->addPointMeta = query.getProj()->wantGeoNearPoint();
- ret->addDistMeta = query.getProj()->wantGeoNearDistance();
- }
- return ret;
- }
- }
- else if (MatchExpression::TEXT == expr->matchType()) {
- // We must not keep the expression node around.
- *tightnessOut = IndexBoundsBuilder::EXACT;
- TextMatchExpression* textExpr = static_cast<TextMatchExpression*>(expr);
- TextNode* ret = new TextNode();
+ return csn;
+}
+
+// static
+QuerySolutionNode* QueryPlannerAccess::makeLeafNode(
+ const CanonicalQuery& query,
+ const IndexEntry& index,
+ size_t pos,
+ MatchExpression* expr,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+ // We're guaranteed that all GEO_NEARs are first. This slightly violates the "sort index
+ // predicates by their position in the compound index" rule but GEO_NEAR isn't an ixscan.
+ // This saves our bacon when we have {foo: 1, bar: "2dsphere"} and the predicate on bar is a
+ // $near. If we didn't get the GEO_NEAR first we'd create an IndexScanNode and later cast
+ // it to a GeoNear2DSphereNode
+ //
+ // This should gracefully deal with the case where we have a pred over foo but no geo clause
+ // over bar. In that case there is no GEO_NEAR to appear first and it's treated like a
+ // straight ixscan.
+
+ if (MatchExpression::GEO_NEAR == expr->matchType()) {
+ // We must not keep the expression node around.
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ GeoNearMatchExpression* nearExpr = static_cast<GeoNearMatchExpression*>(expr);
+
+ BSONElement elt = index.keyPattern.firstElement();
+ bool indexIs2D = (String == elt.type() && "2d" == elt.String());
+
+ if (indexIs2D) {
+ GeoNear2DNode* ret = new GeoNear2DNode();
ret->indexKeyPattern = index.keyPattern;
- ret->query = textExpr->getQuery();
- ret->language = textExpr->getLanguage();
- ret->caseSensitive = textExpr->getCaseSensitive();
- return ret;
- }
- else {
- // Note that indexKeyPattern.firstElement().fieldName() may not equal expr->path()
- // because expr might be inside an array operator that provides a path prefix.
- IndexScanNode* isn = new IndexScanNode();
- isn->indexKeyPattern = index.keyPattern;
- isn->indexIsMultiKey = index.multikey;
- isn->bounds.fields.resize(index.keyPattern.nFields());
- isn->maxScan = query.getParsed().getMaxScan();
- isn->addKeyMetadata = query.getParsed().returnKey();
-
- // Get the ixtag->pos-th element of the index key pattern.
- // TODO: cache this instead/with ixtag->pos?
- BSONObjIterator it(index.keyPattern);
- BSONElement keyElt = it.next();
- for (size_t i = 0; i < pos; ++i) {
- verify(it.more());
- keyElt = it.next();
+ ret->nq = &nearExpr->getData();
+ ret->baseBounds.fields.resize(index.keyPattern.nFields());
+ if (NULL != query.getProj()) {
+ ret->addPointMeta = query.getProj()->wantGeoNearPoint();
+ ret->addDistMeta = query.getProj()->wantGeoNearDistance();
}
- verify(!keyElt.eoo());
-
- IndexBoundsBuilder::translate(expr, keyElt, index, &isn->bounds.fields[pos],
- tightnessOut);
- return isn;
+ return ret;
+ } else {
+ GeoNear2DSphereNode* ret = new GeoNear2DSphereNode();
+ ret->indexKeyPattern = index.keyPattern;
+ ret->nq = &nearExpr->getData();
+ ret->baseBounds.fields.resize(index.keyPattern.nFields());
+ if (NULL != query.getProj()) {
+ ret->addPointMeta = query.getProj()->wantGeoNearPoint();
+ ret->addDistMeta = query.getProj()->wantGeoNearDistance();
+ }
+ return ret;
}
- }
+ } else if (MatchExpression::TEXT == expr->matchType()) {
+ // We must not keep the expression node around.
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ TextMatchExpression* textExpr = static_cast<TextMatchExpression*>(expr);
+ TextNode* ret = new TextNode();
+ ret->indexKeyPattern = index.keyPattern;
+ ret->query = textExpr->getQuery();
+ ret->language = textExpr->getLanguage();
+ ret->caseSensitive = textExpr->getCaseSensitive();
+ return ret;
+ } else {
+ // Note that indexKeyPattern.firstElement().fieldName() may not equal expr->path()
+ // because expr might be inside an array operator that provides a path prefix.
+ IndexScanNode* isn = new IndexScanNode();
+ isn->indexKeyPattern = index.keyPattern;
+ isn->indexIsMultiKey = index.multikey;
+ isn->bounds.fields.resize(index.keyPattern.nFields());
+ isn->maxScan = query.getParsed().getMaxScan();
+ isn->addKeyMetadata = query.getParsed().returnKey();
- bool QueryPlannerAccess::shouldMergeWithLeaf(const MatchExpression* expr,
- const ScanBuildingState& scanState) {
- const QuerySolutionNode* node = scanState.currentScan.get();
- if (NULL == node || NULL == expr) {
- return false;
+ // Get the ixtag->pos-th element of the index key pattern.
+ // TODO: cache this instead/with ixtag->pos?
+ BSONObjIterator it(index.keyPattern);
+ BSONElement keyElt = it.next();
+ for (size_t i = 0; i < pos; ++i) {
+ verify(it.more());
+ keyElt = it.next();
}
+ verify(!keyElt.eoo());
- if (NULL == scanState.ixtag) {
- return false;
- }
+ IndexBoundsBuilder::translate(expr, keyElt, index, &isn->bounds.fields[pos], tightnessOut);
- if (scanState.currentIndexNumber != scanState.ixtag->index) {
- return false;
- }
+ return isn;
+ }
+}
- size_t pos = scanState.ixtag->pos;
- const IndexEntry& index = scanState.indices[scanState.currentIndexNumber];
- const MatchExpression::MatchType mergeType = scanState.root->matchType();
+bool QueryPlannerAccess::shouldMergeWithLeaf(const MatchExpression* expr,
+ const ScanBuildingState& scanState) {
+ const QuerySolutionNode* node = scanState.currentScan.get();
+ if (NULL == node || NULL == expr) {
+ return false;
+ }
- const StageType type = node->getType();
- const MatchExpression::MatchType exprType = expr->matchType();
+ if (NULL == scanState.ixtag) {
+ return false;
+ }
- //
- // First handle special solution tree leaf types. In general, normal index bounds
- // building is not used for special leaf types, and hence we cannot merge leaves.
- //
- // This rule is always true for OR, but there are exceptions for AND.
- // Specifically, we can often merge a predicate with a special leaf type
- // by adding a filter to the special leaf type.
- //
+ if (scanState.currentIndexNumber != scanState.ixtag->index) {
+ return false;
+ }
- if (STAGE_TEXT == type) {
- // Currently only one text predicate is allowed, but to be safe, make sure that we
- // do not try to merge two text predicates.
- return MatchExpression::AND == mergeType
- && MatchExpression::TEXT != exprType;
- }
+ size_t pos = scanState.ixtag->pos;
+ const IndexEntry& index = scanState.indices[scanState.currentIndexNumber];
+ const MatchExpression::MatchType mergeType = scanState.root->matchType();
+
+ const StageType type = node->getType();
+ const MatchExpression::MatchType exprType = expr->matchType();
+
+ //
+ // First handle special solution tree leaf types. In general, normal index bounds
+ // building is not used for special leaf types, and hence we cannot merge leaves.
+ //
+ // This rule is always true for OR, but there are exceptions for AND.
+ // Specifically, we can often merge a predicate with a special leaf type
+ // by adding a filter to the special leaf type.
+ //
+
+ if (STAGE_TEXT == type) {
+ // Currently only one text predicate is allowed, but to be safe, make sure that we
+ // do not try to merge two text predicates.
+ return MatchExpression::AND == mergeType && MatchExpression::TEXT != exprType;
+ }
- if (STAGE_GEO_NEAR_2D == type || STAGE_GEO_NEAR_2DSPHERE == type) {
- // Currently only one GEO_NEAR is allowed, but to be safe, make sure that we
- // do not try to merge two GEO_NEAR predicates.
- return MatchExpression::AND == mergeType
- && MatchExpression::GEO_NEAR != exprType;
- }
+ if (STAGE_GEO_NEAR_2D == type || STAGE_GEO_NEAR_2DSPHERE == type) {
+ // Currently only one GEO_NEAR is allowed, but to be safe, make sure that we
+ // do not try to merge two GEO_NEAR predicates.
+ return MatchExpression::AND == mergeType && MatchExpression::GEO_NEAR != exprType;
+ }
- //
- // If we're here, then we're done checking for special leaf nodes, and the leaf
- // must be a regular index scan.
- //
+ //
+ // If we're here, then we're done checking for special leaf nodes, and the leaf
+ // must be a regular index scan.
+ //
- invariant(type == STAGE_IXSCAN);
- const IndexScanNode* scan = static_cast<const IndexScanNode*>(node);
- const IndexBounds* boundsToFillOut = &scan->bounds;
+ invariant(type == STAGE_IXSCAN);
+ const IndexScanNode* scan = static_cast<const IndexScanNode*>(node);
+ const IndexBounds* boundsToFillOut = &scan->bounds;
- if (boundsToFillOut->fields[pos].name.empty()) {
- // The bounds will be compounded. This is OK because the
- // plan enumerator told us that it is OK.
+ if (boundsToFillOut->fields[pos].name.empty()) {
+ // The bounds will be compounded. This is OK because the
+ // plan enumerator told us that it is OK.
+ return true;
+ } else {
+ if (MatchExpression::AND == mergeType) {
+ // The bounds will be intersected. This is OK provided
+ // that the index is NOT multikey.
+ return !index.multikey;
+ } else {
+ // The bounds will be unionized.
return true;
}
- else {
- if (MatchExpression::AND == mergeType) {
- // The bounds will be intersected. This is OK provided
- // that the index is NOT multikey.
- return !index.multikey;
- }
- else {
- // The bounds will be unionized.
- return true;
- }
- }
+ }
+}
+
+void QueryPlannerAccess::mergeWithLeafNode(MatchExpression* expr, ScanBuildingState* scanState) {
+ QuerySolutionNode* node = scanState->currentScan.get();
+ invariant(NULL != node);
+
+ const MatchExpression::MatchType mergeType = scanState->root->matchType();
+ size_t pos = scanState->ixtag->pos;
+ const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
+ const StageType type = node->getType();
+ // Text data is covered, but not exactly. Text covering is unlike any other covering
+ // so we deal with it in addFilterToSolutionNode.
+ if (STAGE_TEXT == type) {
+ scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
+ return;
}
- void QueryPlannerAccess::mergeWithLeafNode(MatchExpression* expr,
- ScanBuildingState* scanState) {
- QuerySolutionNode* node = scanState->currentScan.get();
- invariant(NULL != node);
+ IndexBounds* boundsToFillOut = NULL;
- const MatchExpression::MatchType mergeType = scanState->root->matchType();
- size_t pos = scanState->ixtag->pos;
- const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
+ if (STAGE_GEO_NEAR_2D == type) {
+ invariant(INDEX_2D == index.type);
- const StageType type = node->getType();
+ // 2D indexes are weird - the "2d" field stores a normally-indexed BinData field, but
+ // additional array fields are *not* exploded into multi-keys - they are stored directly
+ // as arrays in the index. Also, no matter what the index expression, the "2d" field is
+ // always first.
+ // This means that we can only generically accumulate bounds for 2D indexes over the
+ // first "2d" field (pos == 0) - MatchExpressions over other fields in the 2D index may
+ // be covered (can be evaluated using only the 2D index key). The additional fields
+ // must not affect the index scan bounds, since they are not stored in an
+ // IndexScan-compatible format.
- // Text data is covered, but not exactly. Text covering is unlike any other covering
- // so we deal with it in addFilterToSolutionNode.
- if (STAGE_TEXT == type) {
+ if (pos > 0) {
+ // Marking this field as covered allows the planner to accumulate a MatchExpression
+ // over the returned 2D index keys instead of adding to the index bounds.
scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
return;
}
- IndexBounds* boundsToFillOut = NULL;
-
- if (STAGE_GEO_NEAR_2D == type) {
-
- invariant(INDEX_2D == index.type);
-
- // 2D indexes are weird - the "2d" field stores a normally-indexed BinData field, but
- // additional array fields are *not* exploded into multi-keys - they are stored directly
- // as arrays in the index. Also, no matter what the index expression, the "2d" field is
- // always first.
- // This means that we can only generically accumulate bounds for 2D indexes over the
- // first "2d" field (pos == 0) - MatchExpressions over other fields in the 2D index may
- // be covered (can be evaluated using only the 2D index key). The additional fields
- // must not affect the index scan bounds, since they are not stored in an
- // IndexScan-compatible format.
-
- if (pos > 0) {
- // Marking this field as covered allows the planner to accumulate a MatchExpression
- // over the returned 2D index keys instead of adding to the index bounds.
- scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
- return;
- }
+ // We may have other $geoPredicates on a near index - generate bounds for these
+ GeoNear2DNode* gn = static_cast<GeoNear2DNode*>(node);
+ boundsToFillOut = &gn->baseBounds;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == type) {
+ GeoNear2DSphereNode* gn = static_cast<GeoNear2DSphereNode*>(node);
+ boundsToFillOut = &gn->baseBounds;
+ } else {
+ verify(type == STAGE_IXSCAN);
+ IndexScanNode* scan = static_cast<IndexScanNode*>(node);
- // We may have other $geoPredicates on a near index - generate bounds for these
- GeoNear2DNode* gn = static_cast<GeoNear2DNode*>(node);
- boundsToFillOut = &gn->baseBounds;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == type) {
- GeoNear2DSphereNode* gn = static_cast<GeoNear2DSphereNode*>(node);
- boundsToFillOut = &gn->baseBounds;
+ // See STAGE_GEO_NEAR_2D above - 2D indexes can only accumulate scan bounds over the
+ // first "2d" field (pos == 0)
+ if (INDEX_2D == index.type && pos > 0) {
+ scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
+ return;
}
- else {
- verify(type == STAGE_IXSCAN);
- IndexScanNode* scan = static_cast<IndexScanNode*>(node);
-
- // See STAGE_GEO_NEAR_2D above - 2D indexes can only accumulate scan bounds over the
- // first "2d" field (pos == 0)
- if (INDEX_2D == index.type && pos > 0) {
- scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
- return;
- }
- boundsToFillOut = &scan->bounds;
- }
+ boundsToFillOut = &scan->bounds;
+ }
- // Get the ixtag->pos-th element of the index key pattern.
- // TODO: cache this instead/with ixtag->pos?
- BSONObjIterator it(index.keyPattern);
- BSONElement keyElt = it.next();
- for (size_t i = 0; i < pos; ++i) {
- verify(it.more());
- keyElt = it.next();
- }
- verify(!keyElt.eoo());
- scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ // Get the ixtag->pos-th element of the index key pattern.
+ // TODO: cache this instead/with ixtag->pos?
+ BSONObjIterator it(index.keyPattern);
+ BSONElement keyElt = it.next();
+ for (size_t i = 0; i < pos; ++i) {
+ verify(it.more());
+ keyElt = it.next();
+ }
+ verify(!keyElt.eoo());
+ scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
- verify(boundsToFillOut->fields.size() > pos);
+ verify(boundsToFillOut->fields.size() > pos);
- OrderedIntervalList* oil = &boundsToFillOut->fields[pos];
+ OrderedIntervalList* oil = &boundsToFillOut->fields[pos];
- if (boundsToFillOut->fields[pos].name.empty()) {
- IndexBoundsBuilder::translate(expr, keyElt, index, oil, &scanState->tightness);
- }
- else {
- if (MatchExpression::AND == mergeType) {
- IndexBoundsBuilder::translateAndIntersect(expr, keyElt, index, oil,
- &scanState->tightness);
- }
- else {
- verify(MatchExpression::OR == mergeType);
- IndexBoundsBuilder::translateAndUnion(expr, keyElt, index, oil,
- &scanState->tightness);
- }
+ if (boundsToFillOut->fields[pos].name.empty()) {
+ IndexBoundsBuilder::translate(expr, keyElt, index, oil, &scanState->tightness);
+ } else {
+ if (MatchExpression::AND == mergeType) {
+ IndexBoundsBuilder::translateAndIntersect(
+ expr, keyElt, index, oil, &scanState->tightness);
+ } else {
+ verify(MatchExpression::OR == mergeType);
+ IndexBoundsBuilder::translateAndUnion(expr, keyElt, index, oil, &scanState->tightness);
}
}
+}
+
+// static
+void QueryPlannerAccess::finishTextNode(QuerySolutionNode* node, const IndexEntry& index) {
+ TextNode* tn = static_cast<TextNode*>(node);
+
+ // Figure out what positions are prefix positions. We build an index key prefix from
+ // the predicates over the text index prefix keys.
+ // For example, say keyPattern = { a: 1, _fts: "text", _ftsx: 1, b: 1 }
+ // prefixEnd should be 1.
+ size_t prefixEnd = 0;
+ BSONObjIterator it(tn->indexKeyPattern);
+ // Count how many prefix terms we have.
+ while (it.more()) {
+ // We know that the only key pattern with a type of String is the _fts field
+ // which is immediately after all prefix fields.
+ if (String == it.next().type()) {
+ break;
+ }
+ ++prefixEnd;
+ }
- // static
- void QueryPlannerAccess::finishTextNode(QuerySolutionNode* node, const IndexEntry& index) {
- TextNode* tn = static_cast<TextNode*>(node);
-
- // Figure out what positions are prefix positions. We build an index key prefix from
- // the predicates over the text index prefix keys.
- // For example, say keyPattern = { a: 1, _fts: "text", _ftsx: 1, b: 1 }
- // prefixEnd should be 1.
- size_t prefixEnd = 0;
- BSONObjIterator it(tn->indexKeyPattern);
- // Count how many prefix terms we have.
- while (it.more()) {
- // We know that the only key pattern with a type of String is the _fts field
- // which is immediately after all prefix fields.
- if (String == it.next().type()) {
- break;
+ // If there's no prefix, the filter is already on the node and the index prefix is null.
+ // We can just return.
+ if (!prefixEnd) {
+ return;
+ }
+
+ // We can't create a text stage if there aren't EQ predicates on its prefix terms. So
+ // if we've made it this far, we should have collected the prefix predicates in the
+ // filter.
+ invariant(NULL != tn->filter.get());
+ MatchExpression* textFilterMe = tn->filter.get();
+
+ BSONObjBuilder prefixBob;
+
+ if (MatchExpression::AND != textFilterMe->matchType()) {
+ // Only one prefix term.
+ invariant(1 == prefixEnd);
+ // Sanity check: must be an EQ.
+ invariant(MatchExpression::EQ == textFilterMe->matchType());
+
+ EqualityMatchExpression* eqExpr = static_cast<EqualityMatchExpression*>(textFilterMe);
+ prefixBob.append(eqExpr->getData());
+ tn->filter.reset();
+ } else {
+ invariant(MatchExpression::AND == textFilterMe->matchType());
+
+ // Indexed by the keyPattern position index assignment. We want to add
+ // prefixes in order but we must order them first.
+ vector<MatchExpression*> prefixExprs(prefixEnd, NULL);
+
+ AndMatchExpression* amExpr = static_cast<AndMatchExpression*>(textFilterMe);
+ invariant(amExpr->numChildren() >= prefixEnd);
+
+ // Look through the AND children. The prefix children we want to
+ // stash in prefixExprs.
+ size_t curChild = 0;
+ while (curChild < amExpr->numChildren()) {
+ MatchExpression* child = amExpr->getChild(curChild);
+ IndexTag* ixtag = static_cast<IndexTag*>(child->getTag());
+ invariant(NULL != ixtag);
+ // Skip this child if it's not part of a prefix, or if we've already assigned a
+ // predicate to this prefix position.
+ if (ixtag->pos >= prefixEnd || prefixExprs[ixtag->pos] != NULL) {
+ ++curChild;
+ continue;
}
- ++prefixEnd;
+ // prefixExprs takes ownership of 'child'.
+ prefixExprs[ixtag->pos] = child;
+ amExpr->getChildVector()->erase(amExpr->getChildVector()->begin() + curChild);
+ // Don't increment curChild.
+ }
+
+ // Go through the prefix equalities in order and create an index prefix out of them.
+ for (size_t i = 0; i < prefixExprs.size(); ++i) {
+ MatchExpression* prefixMe = prefixExprs[i];
+ invariant(NULL != prefixMe);
+ invariant(MatchExpression::EQ == prefixMe->matchType());
+ EqualityMatchExpression* eqExpr = static_cast<EqualityMatchExpression*>(prefixMe);
+ prefixBob.append(eqExpr->getData());
+ // We removed this from the AND expression that owned it, so we must clean it
+ // up ourselves.
+ delete prefixMe;
}
- // If there's no prefix, the filter is already on the node and the index prefix is null.
- // We can just return.
- if (!prefixEnd) {
- return;
+ // Clear out an empty $and.
+ if (0 == amExpr->numChildren()) {
+ tn->filter.reset();
+ } else if (1 == amExpr->numChildren()) {
+ // Clear out unsightly only child of $and
+ MatchExpression* child = amExpr->getChild(0);
+ amExpr->getChildVector()->clear();
+ // Deletes current filter which is amExpr.
+ tn->filter.reset(child);
}
+ }
- // We can't create a text stage if there aren't EQ predicates on its prefix terms. So
- // if we've made it this far, we should have collected the prefix predicates in the
- // filter.
- invariant(NULL != tn->filter.get());
- MatchExpression* textFilterMe = tn->filter.get();
-
- BSONObjBuilder prefixBob;
-
- if (MatchExpression::AND != textFilterMe->matchType()) {
- // Only one prefix term.
- invariant(1 == prefixEnd);
- // Sanity check: must be an EQ.
- invariant(MatchExpression::EQ == textFilterMe->matchType());
+ tn->indexPrefix = prefixBob.obj();
+}
- EqualityMatchExpression* eqExpr = static_cast<EqualityMatchExpression*>(textFilterMe);
- prefixBob.append(eqExpr->getData());
- tn->filter.reset();
+// static
+bool QueryPlannerAccess::orNeedsFetch(const ScanBuildingState* scanState) {
+ if (scanState->loosestBounds == IndexBoundsBuilder::EXACT) {
+ return false;
+ } else if (scanState->loosestBounds == IndexBoundsBuilder::INEXACT_FETCH) {
+ return true;
+ } else {
+ invariant(scanState->loosestBounds == IndexBoundsBuilder::INEXACT_COVERED);
+ const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
+ return index.multikey;
+ }
+}
+
+// static
+void QueryPlannerAccess::finishAndOutputLeaf(ScanBuildingState* scanState,
+ vector<QuerySolutionNode*>* out) {
+ finishLeafNode(scanState->currentScan.get(), scanState->indices[scanState->currentIndexNumber]);
+
+ if (MatchExpression::OR == scanState->root->matchType()) {
+ if (orNeedsFetch(scanState)) {
+ // In order to correctly evaluate the predicates for this index, we have to
+ // fetch the full documents. Add a fetch node above the index scan whose filter
+ // includes *all* of the predicates used to generate the ixscan.
+ FetchNode* fetch = new FetchNode();
+ // Takes ownership.
+ fetch->filter.reset(scanState->curOr.release());
+ // Takes ownership.
+ fetch->children.push_back(scanState->currentScan.release());
+
+ scanState->currentScan.reset(fetch);
+ } else if (scanState->loosestBounds == IndexBoundsBuilder::INEXACT_COVERED) {
+ // This an OR, at least one of the predicates used to generate 'currentScan'
+ // is inexact covered, but none is inexact fetch. This means that we can put
+ // these predicates, joined by an $or, as filters on the index scan. This avoids
+ // a fetch and allows the predicates to be covered by the index.
+ //
+ // Ex.
+ // Say we have index {a: 1} and query {$or: [{a: /foo/}, {a: /bar/}]}.
+ // The entire query, {$or: [{a: /foo/}, {a: /bar/}]}, should be a filter
+ // in the index scan stage itself.
+ scanState->currentScan->filter.reset(scanState->curOr.release());
}
- else {
- invariant(MatchExpression::AND == textFilterMe->matchType());
-
- // Indexed by the keyPattern position index assignment. We want to add
- // prefixes in order but we must order them first.
- vector<MatchExpression*> prefixExprs(prefixEnd, NULL);
-
- AndMatchExpression* amExpr = static_cast<AndMatchExpression*>(textFilterMe);
- invariant(amExpr->numChildren() >= prefixEnd);
-
- // Look through the AND children. The prefix children we want to
- // stash in prefixExprs.
- size_t curChild = 0;
- while (curChild < amExpr->numChildren()) {
- MatchExpression* child = amExpr->getChild(curChild);
- IndexTag* ixtag = static_cast<IndexTag*>(child->getTag());
- invariant(NULL != ixtag);
- // Skip this child if it's not part of a prefix, or if we've already assigned a
- // predicate to this prefix position.
- if (ixtag->pos >= prefixEnd || prefixExprs[ixtag->pos] != NULL) {
- ++curChild;
- continue;
- }
- // prefixExprs takes ownership of 'child'.
- prefixExprs[ixtag->pos] = child;
- amExpr->getChildVector()->erase(amExpr->getChildVector()->begin() + curChild);
- // Don't increment curChild.
- }
+ }
- // Go through the prefix equalities in order and create an index prefix out of them.
- for (size_t i = 0; i < prefixExprs.size(); ++i) {
- MatchExpression* prefixMe = prefixExprs[i];
- invariant(NULL != prefixMe);
- invariant(MatchExpression::EQ == prefixMe->matchType());
- EqualityMatchExpression* eqExpr = static_cast<EqualityMatchExpression*>(prefixMe);
- prefixBob.append(eqExpr->getData());
- // We removed this from the AND expression that owned it, so we must clean it
- // up ourselves.
- delete prefixMe;
- }
+ out->push_back(scanState->currentScan.release());
+}
- // Clear out an empty $and.
- if (0 == amExpr->numChildren()) {
- tn->filter.reset();
- }
- else if (1 == amExpr->numChildren()) {
- // Clear out unsightly only child of $and
- MatchExpression* child = amExpr->getChild(0);
- amExpr->getChildVector()->clear();
- // Deletes current filter which is amExpr.
- tn->filter.reset(child);
- }
- }
+// static
+void QueryPlannerAccess::finishLeafNode(QuerySolutionNode* node, const IndexEntry& index) {
+ const StageType type = node->getType();
- tn->indexPrefix = prefixBob.obj();
+ if (STAGE_TEXT == type) {
+ finishTextNode(node, index);
+ return;
}
- // static
- bool QueryPlannerAccess::orNeedsFetch(const ScanBuildingState* scanState) {
- if (scanState->loosestBounds == IndexBoundsBuilder::EXACT) {
- return false;
- }
- else if (scanState->loosestBounds == IndexBoundsBuilder::INEXACT_FETCH) {
- return true;
- }
- else {
- invariant(scanState->loosestBounds == IndexBoundsBuilder::INEXACT_COVERED);
- const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
- return index.multikey;
- }
+ IndexBounds* bounds = NULL;
+
+ if (STAGE_GEO_NEAR_2D == type) {
+ GeoNear2DNode* gnode = static_cast<GeoNear2DNode*>(node);
+ bounds = &gnode->baseBounds;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == type) {
+ GeoNear2DSphereNode* gnode = static_cast<GeoNear2DSphereNode*>(node);
+ bounds = &gnode->baseBounds;
+ } else {
+ verify(type == STAGE_IXSCAN);
+ IndexScanNode* scan = static_cast<IndexScanNode*>(node);
+ bounds = &scan->bounds;
}
- // static
- void QueryPlannerAccess::finishAndOutputLeaf(ScanBuildingState* scanState,
- vector<QuerySolutionNode*>* out) {
- finishLeafNode(scanState->currentScan.get(),
- scanState->indices[scanState->currentIndexNumber]);
-
- if (MatchExpression::OR == scanState->root->matchType()) {
- if (orNeedsFetch(scanState)) {
- // In order to correctly evaluate the predicates for this index, we have to
- // fetch the full documents. Add a fetch node above the index scan whose filter
- // includes *all* of the predicates used to generate the ixscan.
- FetchNode* fetch = new FetchNode();
- // Takes ownership.
- fetch->filter.reset(scanState->curOr.release());
- // Takes ownership.
- fetch->children.push_back(scanState->currentScan.release());
-
- scanState->currentScan.reset(fetch);
- }
- else if (scanState->loosestBounds == IndexBoundsBuilder::INEXACT_COVERED) {
- // This an OR, at least one of the predicates used to generate 'currentScan'
- // is inexact covered, but none is inexact fetch. This means that we can put
- // these predicates, joined by an $or, as filters on the index scan. This avoids
- // a fetch and allows the predicates to be covered by the index.
- //
- // Ex.
- // Say we have index {a: 1} and query {$or: [{a: /foo/}, {a: /bar/}]}.
- // The entire query, {$or: [{a: /foo/}, {a: /bar/}]}, should be a filter
- // in the index scan stage itself.
- scanState->currentScan->filter.reset(scanState->curOr.release());
- }
+ // Find the first field in the scan's bounds that was not filled out.
+ // TODO: could cache this.
+ size_t firstEmptyField = 0;
+ for (firstEmptyField = 0; firstEmptyField < bounds->fields.size(); ++firstEmptyField) {
+ if ("" == bounds->fields[firstEmptyField].name) {
+ verify(bounds->fields[firstEmptyField].intervals.empty());
+ break;
}
-
- out->push_back(scanState->currentScan.release());
}
- // static
- void QueryPlannerAccess::finishLeafNode(QuerySolutionNode* node, const IndexEntry& index) {
- const StageType type = node->getType();
+ // All fields are filled out with bounds, nothing to do.
+ if (firstEmptyField == bounds->fields.size()) {
+ IndexBoundsBuilder::alignBounds(bounds, index.keyPattern);
+ return;
+ }
- if (STAGE_TEXT == type) {
- finishTextNode(node, index);
- return;
- }
+ // Skip ahead to the firstEmptyField-th element, where we begin filling in bounds.
+ BSONObjIterator it(index.keyPattern);
+ for (size_t i = 0; i < firstEmptyField; ++i) {
+ verify(it.more());
+ it.next();
+ }
- IndexBounds* bounds = NULL;
+ // For each field in the key...
+ while (it.more()) {
+ BSONElement kpElt = it.next();
+ // There may be filled-in fields to the right of the firstEmptyField.
+ // Example:
+ // The index {loc:"2dsphere", x:1}
+ // With a predicate over x and a near search over loc.
+ if ("" == bounds->fields[firstEmptyField].name) {
+ verify(bounds->fields[firstEmptyField].intervals.empty());
+ // ...build the "all values" interval.
+ IndexBoundsBuilder::allValuesForField(kpElt, &bounds->fields[firstEmptyField]);
+ }
+ ++firstEmptyField;
+ }
- if (STAGE_GEO_NEAR_2D == type) {
- GeoNear2DNode* gnode = static_cast<GeoNear2DNode*>(node);
- bounds = &gnode->baseBounds;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == type) {
- GeoNear2DSphereNode* gnode = static_cast<GeoNear2DSphereNode*>(node);
- bounds = &gnode->baseBounds;
+ // Make sure that the length of the key is the length of the bounds we started.
+ verify(firstEmptyField == bounds->fields.size());
+
+ // We create bounds assuming a forward direction but can easily reverse bounds to align
+ // according to our desired direction.
+ IndexBoundsBuilder::alignBounds(bounds, index.keyPattern);
+}
+
+// static
+void QueryPlannerAccess::findElemMatchChildren(const MatchExpression* node,
+ vector<MatchExpression*>* out,
+ vector<MatchExpression*>* subnodesOut) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ MatchExpression* child = node->getChild(i);
+ if (Indexability::isBoundsGenerating(child) && NULL != child->getTag()) {
+ out->push_back(child);
+ } else if (MatchExpression::AND == child->matchType() ||
+ Indexability::arrayUsesIndexOnChildren(child)) {
+ findElemMatchChildren(child, out, subnodesOut);
+ } else if (NULL != child->getTag()) {
+ subnodesOut->push_back(child);
}
- else {
- verify(type == STAGE_IXSCAN);
- IndexScanNode* scan = static_cast<IndexScanNode*>(node);
- bounds = &scan->bounds;
- }
-
- // Find the first field in the scan's bounds that was not filled out.
- // TODO: could cache this.
- size_t firstEmptyField = 0;
- for (firstEmptyField = 0; firstEmptyField < bounds->fields.size(); ++firstEmptyField) {
- if ("" == bounds->fields[firstEmptyField].name) {
- verify(bounds->fields[firstEmptyField].intervals.empty());
- break;
+ }
+}
+
+// static
+bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out) {
+ // Initialize the ScanBuildingState.
+ ScanBuildingState scanState(root, inArrayOperator, indices);
+
+ while (scanState.curChild < root->numChildren()) {
+ MatchExpression* child = root->getChild(scanState.curChild);
+
+ // If there is no tag, it's not using an index. We've sorted our children such that the
+ // children with tags are first, so we stop now.
+ if (NULL == child->getTag()) {
+ break;
+ }
+
+ scanState.ixtag = static_cast<IndexTag*>(child->getTag());
+ // If there's a tag it must be valid.
+ verify(IndexTag::kNoIndex != scanState.ixtag->index);
+
+ // If the child can't use an index on its own field (and the child is not a negation
+ // of a bounds-generating expression), then it's indexed by virtue of one of
+ // its children having an index.
+ //
+ // NOTE: If the child is logical, it could possibly collapse into a single ixscan. we
+ // ignore this for now.
+ if (!Indexability::isBoundsGenerating(child)) {
+ // If we're here, then the child is indexed by virtue of its children.
+ // In most cases this means that we recursively build indexed data
+ // access on 'child'.
+ if (!processIndexScansSubnode(query, &scanState, params, out)) {
+ return false;
}
+ continue;
}
- // All fields are filled out with bounds, nothing to do.
- if (firstEmptyField == bounds->fields.size()) {
- IndexBoundsBuilder::alignBounds(bounds, index.keyPattern);
- return;
- }
+ // If we're here, we now know that 'child' can use an index directly and the index is
+ // over the child's field.
- // Skip ahead to the firstEmptyField-th element, where we begin filling in bounds.
- BSONObjIterator it(index.keyPattern);
- for (size_t i = 0; i < firstEmptyField; ++i) {
- verify(it.more());
- it.next();
+ // If 'child' is a NOT, then the tag we're interested in is on the NOT's
+ // child node.
+ if (MatchExpression::NOT == child->matchType()) {
+ scanState.ixtag = static_cast<IndexTag*>(child->getChild(0)->getTag());
+ invariant(IndexTag::kNoIndex != scanState.ixtag->index);
}
- // For each field in the key...
- while (it.more()) {
- BSONElement kpElt = it.next();
- // There may be filled-in fields to the right of the firstEmptyField.
- // Example:
- // The index {loc:"2dsphere", x:1}
- // With a predicate over x and a near search over loc.
- if ("" == bounds->fields[firstEmptyField].name) {
- verify(bounds->fields[firstEmptyField].intervals.empty());
- // ...build the "all values" interval.
- IndexBoundsBuilder::allValuesForField(kpElt,
- &bounds->fields[firstEmptyField]);
+ // If the child we're looking at uses a different index than the current index scan, add
+ // the current index scan to the output as we're done with it. The index scan created
+ // by the child then becomes our new current index scan. Note that the current scan
+ // could be NULL, in which case we don't output it. The rest of the logic is identical.
+ //
+ // If the child uses the same index as the current index scan, we may be able to merge
+ // the bounds for the two scans.
+ //
+ // Guiding principle: must the values we're testing come from the same array in the
+ // document? If so, we can combine bounds (via intersection or compounding). If not,
+ // we can't.
+ //
+ // If the index is NOT multikey, it's always semantically correct to combine bounds,
+ // as there are no arrays to worry about.
+ //
+ // If the index is multikey, there are arrays of values. There are several
+ // complications in the multikey case that have to be obeyed both by the enumerator
+ // and here as we try to merge predicates into query solution leaves. The hairy
+ // details of these rules are documented near the top of planner_access.h.
+ if (shouldMergeWithLeaf(child, scanState)) {
+ // The child uses the same index we're currently building a scan for. Merge
+ // the bounds and filters.
+ verify(scanState.currentIndexNumber == scanState.ixtag->index);
+ scanState.tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ mergeWithLeafNode(child, &scanState);
+ handleFilter(&scanState);
+ } else {
+ if (NULL != scanState.currentScan.get()) {
+ // Output the current scan before starting to construct a new out.
+ finishAndOutputLeaf(&scanState, out);
+ } else {
+ verify(IndexTag::kNoIndex == scanState.currentIndexNumber);
}
- ++firstEmptyField;
- }
- // Make sure that the length of the key is the length of the bounds we started.
- verify(firstEmptyField == bounds->fields.size());
+ // Reset state before producing a new leaf.
+ scanState.resetForNextScan(scanState.ixtag);
- // We create bounds assuming a forward direction but can easily reverse bounds to align
- // according to our desired direction.
- IndexBoundsBuilder::alignBounds(bounds, index.keyPattern);
- }
+ scanState.currentScan.reset(makeLeafNode(query,
+ indices[scanState.currentIndexNumber],
+ scanState.ixtag->pos,
+ child,
+ &scanState.tightness));
- // static
- void QueryPlannerAccess::findElemMatchChildren(const MatchExpression* node,
- vector<MatchExpression*>* out,
- vector<MatchExpression*>* subnodesOut) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- MatchExpression* child = node->getChild(i);
- if (Indexability::isBoundsGenerating(child) &&
- NULL != child->getTag()) {
- out->push_back(child);
- }
- else if (MatchExpression::AND == child->matchType() ||
- Indexability::arrayUsesIndexOnChildren(child)) {
- findElemMatchChildren(child, out, subnodesOut);
- }
- else if (NULL != child->getTag()) {
- subnodesOut->push_back(child);
- }
+ handleFilter(&scanState);
}
}
- // static
- bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out) {
- // Initialize the ScanBuildingState.
- ScanBuildingState scanState(root, inArrayOperator, indices);
-
- while (scanState.curChild < root->numChildren()) {
- MatchExpression* child = root->getChild(scanState.curChild);
-
- // If there is no tag, it's not using an index. We've sorted our children such that the
- // children with tags are first, so we stop now.
- if (NULL == child->getTag()) { break; }
-
- scanState.ixtag = static_cast<IndexTag*>(child->getTag());
- // If there's a tag it must be valid.
- verify(IndexTag::kNoIndex != scanState.ixtag->index);
-
- // If the child can't use an index on its own field (and the child is not a negation
- // of a bounds-generating expression), then it's indexed by virtue of one of
- // its children having an index.
- //
- // NOTE: If the child is logical, it could possibly collapse into a single ixscan. we
- // ignore this for now.
- if (!Indexability::isBoundsGenerating(child)) {
- // If we're here, then the child is indexed by virtue of its children.
- // In most cases this means that we recursively build indexed data
- // access on 'child'.
- if (!processIndexScansSubnode(query, &scanState, params, out)) {
- return false;
- }
- continue;
- }
-
- // If we're here, we now know that 'child' can use an index directly and the index is
- // over the child's field.
-
- // If 'child' is a NOT, then the tag we're interested in is on the NOT's
- // child node.
- if (MatchExpression::NOT == child->matchType()) {
- scanState.ixtag = static_cast<IndexTag*>(child->getChild(0)->getTag());
- invariant(IndexTag::kNoIndex != scanState.ixtag->index);
- }
-
- // If the child we're looking at uses a different index than the current index scan, add
- // the current index scan to the output as we're done with it. The index scan created
- // by the child then becomes our new current index scan. Note that the current scan
- // could be NULL, in which case we don't output it. The rest of the logic is identical.
- //
- // If the child uses the same index as the current index scan, we may be able to merge
- // the bounds for the two scans.
- //
- // Guiding principle: must the values we're testing come from the same array in the
- // document? If so, we can combine bounds (via intersection or compounding). If not,
- // we can't.
- //
- // If the index is NOT multikey, it's always semantically correct to combine bounds,
- // as there are no arrays to worry about.
- //
- // If the index is multikey, there are arrays of values. There are several
- // complications in the multikey case that have to be obeyed both by the enumerator
- // and here as we try to merge predicates into query solution leaves. The hairy
- // details of these rules are documented near the top of planner_access.h.
- if (shouldMergeWithLeaf(child, scanState)) {
- // The child uses the same index we're currently building a scan for. Merge
- // the bounds and filters.
- verify(scanState.currentIndexNumber == scanState.ixtag->index);
- scanState.tightness = IndexBoundsBuilder::INEXACT_FETCH;
- mergeWithLeafNode(child, &scanState);
- handleFilter(&scanState);
- }
- else {
- if (NULL != scanState.currentScan.get()) {
- // Output the current scan before starting to construct a new out.
- finishAndOutputLeaf(&scanState, out);
- }
- else {
- verify(IndexTag::kNoIndex == scanState.currentIndexNumber);
- }
-
- // Reset state before producing a new leaf.
- scanState.resetForNextScan(scanState.ixtag);
-
- scanState.currentScan.reset(makeLeafNode(query,
- indices[scanState.currentIndexNumber],
- scanState.ixtag->pos, child,
- &scanState.tightness));
+ // Output the scan we're done with, if it exists.
+ if (NULL != scanState.currentScan.get()) {
+ finishAndOutputLeaf(&scanState, out);
+ }
- handleFilter(&scanState);
+ return true;
+}
+
+// static
+bool QueryPlannerAccess::processIndexScansElemMatch(const CanonicalQuery& query,
+ ScanBuildingState* scanState,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out) {
+ MatchExpression* root = scanState->root;
+ MatchExpression* child = root->getChild(scanState->curChild);
+ const vector<IndexEntry>& indices = scanState->indices;
+
+ // We have an AND with an ELEM_MATCH_OBJECT child. The plan enumerator produces
+ // index taggings which indicate that we should try to compound with
+ // predicates retrieved from inside the subtree rooted at the ELEM_MATCH.
+ // In order to obey the enumerator's tagging, we need to retrieve these
+ // predicates from inside the $elemMatch, and try to merge them with
+ // the current index scan.
+
+ // Contains tagged predicates from inside the tree rooted at 'child'
+ // which are logically part of the AND.
+ vector<MatchExpression*> emChildren;
+
+ // Contains tagged nodes that are not logically part of the AND and
+ // cannot use the index directly (e.g. OR nodes which are tagged to
+ // be indexed).
+ vector<MatchExpression*> emSubnodes;
+
+ // Populate 'emChildren' and 'emSubnodes'.
+ findElemMatchChildren(child, &emChildren, &emSubnodes);
+
+ // Recursively build data access for the nodes inside 'emSubnodes'.
+ for (size_t i = 0; i < emSubnodes.size(); ++i) {
+ MatchExpression* subnode = emSubnodes[i];
+
+ if (!Indexability::isBoundsGenerating(subnode)) {
+ // Must pass true for 'inArrayOperator' because the subnode is
+ // beneath an ELEM_MATCH_OBJECT.
+ QuerySolutionNode* childSolution =
+ buildIndexedDataAccess(query, subnode, true, indices, params);
+
+ // buildIndexedDataAccess(...) returns NULL in error conditions, when
+ // it is unable to construct a query solution from a tagged match
+ // expression tree. If we are unable to construct a solution according
+ // to the instructions from the enumerator, then we bail out early
+ // (by returning false) rather than continuing on and potentially
+ // constructing an invalid solution tree.
+ if (NULL == childSolution) {
+ return false;
}
- }
- // Output the scan we're done with, if it exists.
- if (NULL != scanState.currentScan.get()) {
- finishAndOutputLeaf(&scanState, out);
+ // Output the resulting solution tree.
+ out->push_back(childSolution);
}
-
- return true;
}
- // static
- bool QueryPlannerAccess::processIndexScansElemMatch(const CanonicalQuery& query,
- ScanBuildingState* scanState,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out) {
- MatchExpression* root = scanState->root;
- MatchExpression* child = root->getChild(scanState->curChild);
- const vector<IndexEntry>& indices = scanState->indices;
-
- // We have an AND with an ELEM_MATCH_OBJECT child. The plan enumerator produces
- // index taggings which indicate that we should try to compound with
- // predicates retrieved from inside the subtree rooted at the ELEM_MATCH.
- // In order to obey the enumerator's tagging, we need to retrieve these
- // predicates from inside the $elemMatch, and try to merge them with
- // the current index scan.
-
- // Contains tagged predicates from inside the tree rooted at 'child'
- // which are logically part of the AND.
- vector<MatchExpression*> emChildren;
-
- // Contains tagged nodes that are not logically part of the AND and
- // cannot use the index directly (e.g. OR nodes which are tagged to
- // be indexed).
- vector<MatchExpression*> emSubnodes;
-
- // Populate 'emChildren' and 'emSubnodes'.
- findElemMatchChildren(child, &emChildren, &emSubnodes);
-
- // Recursively build data access for the nodes inside 'emSubnodes'.
- for (size_t i = 0; i < emSubnodes.size(); ++i) {
- MatchExpression* subnode = emSubnodes[i];
-
- if (!Indexability::isBoundsGenerating(subnode)) {
- // Must pass true for 'inArrayOperator' because the subnode is
- // beneath an ELEM_MATCH_OBJECT.
- QuerySolutionNode* childSolution = buildIndexedDataAccess(query,
- subnode,
- true,
- indices,
- params);
-
- // buildIndexedDataAccess(...) returns NULL in error conditions, when
- // it is unable to construct a query solution from a tagged match
- // expression tree. If we are unable to construct a solution according
- // to the instructions from the enumerator, then we bail out early
- // (by returning false) rather than continuing on and potentially
- // constructing an invalid solution tree.
- if (NULL == childSolution) { return false; }
-
- // Output the resulting solution tree.
- out->push_back(childSolution);
- }
- }
-
- // For each predicate in 'emChildren', try to merge it with the current index scan.
- //
- // This loop is similar to that in processIndexScans(...), except it does not call into
- // handleFilters(...). Instead, we leave the entire $elemMatch filter intact. This way,
- // the complete $elemMatch expression will be affixed as a filter later on.
- for (size_t i = 0; i < emChildren.size(); ++i) {
- MatchExpression* emChild = emChildren[i];
- invariant(NULL != emChild->getTag());
- scanState->ixtag = static_cast<IndexTag*>(emChild->getTag());
-
- // If 'emChild' is a NOT, then the tag we're interested in is on the NOT's
- // child node.
- if (MatchExpression::NOT == emChild->matchType()) {
- invariant(NULL != emChild->getChild(0)->getTag());
- scanState->ixtag = static_cast<IndexTag*>(emChild->getChild(0)->getTag());
- invariant(IndexTag::kNoIndex != scanState->ixtag->index);
+ // For each predicate in 'emChildren', try to merge it with the current index scan.
+ //
+ // This loop is similar to that in processIndexScans(...), except it does not call into
+ // handleFilters(...). Instead, we leave the entire $elemMatch filter intact. This way,
+ // the complete $elemMatch expression will be affixed as a filter later on.
+ for (size_t i = 0; i < emChildren.size(); ++i) {
+ MatchExpression* emChild = emChildren[i];
+ invariant(NULL != emChild->getTag());
+ scanState->ixtag = static_cast<IndexTag*>(emChild->getTag());
+
+ // If 'emChild' is a NOT, then the tag we're interested in is on the NOT's
+ // child node.
+ if (MatchExpression::NOT == emChild->matchType()) {
+ invariant(NULL != emChild->getChild(0)->getTag());
+ scanState->ixtag = static_cast<IndexTag*>(emChild->getChild(0)->getTag());
+ invariant(IndexTag::kNoIndex != scanState->ixtag->index);
+ }
+
+ if (shouldMergeWithLeaf(emChild, *scanState)) {
+ // The child uses the same index we're currently building a scan for. Merge
+ // the bounds and filters.
+ verify(scanState->currentIndexNumber == scanState->ixtag->index);
+
+ scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ mergeWithLeafNode(emChild, scanState);
+ } else {
+ if (NULL != scanState->currentScan.get()) {
+ finishAndOutputLeaf(scanState, out);
+ } else {
+ verify(IndexTag::kNoIndex == scanState->currentIndexNumber);
}
- if (shouldMergeWithLeaf(emChild, *scanState)) {
- // The child uses the same index we're currently building a scan for. Merge
- // the bounds and filters.
- verify(scanState->currentIndexNumber == scanState->ixtag->index);
+ scanState->currentIndexNumber = scanState->ixtag->index;
- scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
- mergeWithLeafNode(emChild, scanState);
- }
- else {
- if (NULL != scanState->currentScan.get()) {
- finishAndOutputLeaf(scanState, out);
- }
- else {
- verify(IndexTag::kNoIndex == scanState->currentIndexNumber);
- }
-
- scanState->currentIndexNumber = scanState->ixtag->index;
-
- scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
- scanState->currentScan.reset(makeLeafNode(query, indices[scanState->currentIndexNumber],
- scanState->ixtag->pos,
- emChild, &scanState->tightness));
- }
+ scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ scanState->currentScan.reset(makeLeafNode(query,
+ indices[scanState->currentIndexNumber],
+ scanState->ixtag->pos,
+ emChild,
+ &scanState->tightness));
}
+ }
- // We're done processing the $elemMatch child. We leave it hanging off
- // it's AND parent so that it will be affixed as a filter later on,
- // and move on to the next child of the AND.
+ // We're done processing the $elemMatch child. We leave it hanging off
+ // it's AND parent so that it will be affixed as a filter later on,
+ // and move on to the next child of the AND.
+ ++scanState->curChild;
+ return true;
+}
+
+// static
+bool QueryPlannerAccess::processIndexScansSubnode(const CanonicalQuery& query,
+ ScanBuildingState* scanState,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out) {
+ MatchExpression* root = scanState->root;
+ MatchExpression* child = root->getChild(scanState->curChild);
+ const vector<IndexEntry>& indices = scanState->indices;
+ bool inArrayOperator = scanState->inArrayOperator;
+
+ if (MatchExpression::AND == root->matchType() &&
+ MatchExpression::ELEM_MATCH_OBJECT == child->matchType()) {
+ return processIndexScansElemMatch(query, scanState, params, out);
+ } else if (!inArrayOperator) {
+ // The logical sub-tree is responsible for fully evaluating itself. Any
+ // required filters or fetches are already hung on it. As such, we remove the
+ // filter branch from our tree. buildIndexedDataAccess takes ownership of the
+ // child.
+ root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
+ // The curChild of today is the curChild+1 of yesterday.
+ } else {
++scanState->curChild;
- return true;
}
- // static
- bool QueryPlannerAccess::processIndexScansSubnode(const CanonicalQuery& query,
- ScanBuildingState* scanState,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out) {
- MatchExpression* root = scanState->root;
- MatchExpression* child = root->getChild(scanState->curChild);
- const vector<IndexEntry>& indices = scanState->indices;
- bool inArrayOperator = scanState->inArrayOperator;
-
- if (MatchExpression::AND == root->matchType() &&
- MatchExpression::ELEM_MATCH_OBJECT == child->matchType()) {
- return processIndexScansElemMatch(query, scanState, params, out);
- }
- else if (!inArrayOperator) {
- // The logical sub-tree is responsible for fully evaluating itself. Any
- // required filters or fetches are already hung on it. As such, we remove the
- // filter branch from our tree. buildIndexedDataAccess takes ownership of the
- // child.
- root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
- // The curChild of today is the curChild+1 of yesterday.
- }
- else {
- ++scanState->curChild;
- }
-
- // If inArrayOperator: takes ownership of child, which is OK, since we detached
- // child from root.
- QuerySolutionNode* childSolution = buildIndexedDataAccess(query,
- child,
- inArrayOperator,
- indices,
- params);
- if (NULL == childSolution) { return false; }
- out->push_back(childSolution);
- return true;
+ // If inArrayOperator: takes ownership of child, which is OK, since we detached
+ // child from root.
+ QuerySolutionNode* childSolution =
+ buildIndexedDataAccess(query, child, inArrayOperator, indices, params);
+ if (NULL == childSolution) {
+ return false;
+ }
+ out->push_back(childSolution);
+ return true;
+}
+
+// static
+QuerySolutionNode* QueryPlannerAccess::buildIndexedAnd(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const vector<IndexEntry>& indices,
+ const QueryPlannerParams& params) {
+ unique_ptr<MatchExpression> autoRoot;
+ if (!inArrayOperator) {
+ autoRoot.reset(root);
}
- // static
- QuerySolutionNode* QueryPlannerAccess::buildIndexedAnd(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const vector<IndexEntry>& indices,
- const QueryPlannerParams& params) {
- unique_ptr<MatchExpression> autoRoot;
- if (!inArrayOperator) {
- autoRoot.reset(root);
- }
-
- // If we are not allowed to trim for ixisect, then clone the match expression before
- // passing it to processIndexScans(), which may do the trimming. If we end up with
- // an index intersection solution, then we use our copy of the match expression to be
- // sure that the FETCH stage will recheck the entire predicate.
- //
- // XXX: This block is a hack to accommodate the storage layer concurrency model.
- std::unique_ptr<MatchExpression> clonedRoot;
- if (params.options & QueryPlannerParams::CANNOT_TRIM_IXISECT) {
- clonedRoot.reset(root->shallowClone());
- }
-
- vector<QuerySolutionNode*> ixscanNodes;
- if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) {
- return NULL;
- }
-
- //
- // Process all non-indexed predicates. We hang these above the AND with a fetch and
- // filter.
- //
-
- // This is the node we're about to return.
- QuerySolutionNode* andResult;
+ // If we are not allowed to trim for ixisect, then clone the match expression before
+ // passing it to processIndexScans(), which may do the trimming. If we end up with
+ // an index intersection solution, then we use our copy of the match expression to be
+ // sure that the FETCH stage will recheck the entire predicate.
+ //
+ // XXX: This block is a hack to accommodate the storage layer concurrency model.
+ std::unique_ptr<MatchExpression> clonedRoot;
+ if (params.options & QueryPlannerParams::CANNOT_TRIM_IXISECT) {
+ clonedRoot.reset(root->shallowClone());
+ }
- // We must use an index for at least one child of the AND. We shouldn't be here if this
- // isn't the case.
- verify(ixscanNodes.size() >= 1);
+ vector<QuerySolutionNode*> ixscanNodes;
+ if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) {
+ return NULL;
+ }
- // Short-circuit: an AND of one child is just the child.
- if (ixscanNodes.size() == 1) {
- andResult = ixscanNodes[0];
+ //
+ // Process all non-indexed predicates. We hang these above the AND with a fetch and
+ // filter.
+ //
+
+ // This is the node we're about to return.
+ QuerySolutionNode* andResult;
+
+ // We must use an index for at least one child of the AND. We shouldn't be here if this
+ // isn't the case.
+ verify(ixscanNodes.size() >= 1);
+
+ // Short-circuit: an AND of one child is just the child.
+ if (ixscanNodes.size() == 1) {
+ andResult = ixscanNodes[0];
+ } else {
+ // Figure out if we want AndHashNode or AndSortedNode.
+ bool allSortedByDiskLoc = true;
+ for (size_t i = 0; i < ixscanNodes.size(); ++i) {
+ if (!ixscanNodes[i]->sortedByDiskLoc()) {
+ allSortedByDiskLoc = false;
+ break;
+ }
}
- else {
- // Figure out if we want AndHashNode or AndSortedNode.
- bool allSortedByDiskLoc = true;
- for (size_t i = 0; i < ixscanNodes.size(); ++i) {
- if (!ixscanNodes[i]->sortedByDiskLoc()) {
- allSortedByDiskLoc = false;
+ if (allSortedByDiskLoc) {
+ AndSortedNode* asn = new AndSortedNode();
+ asn->children.swap(ixscanNodes);
+ andResult = asn;
+ } else if (internalQueryPlannerEnableHashIntersection) {
+ AndHashNode* ahn = new AndHashNode();
+ ahn->children.swap(ixscanNodes);
+ andResult = ahn;
+ // The AndHashNode provides the sort order of its last child. If any of the
+ // possible subnodes of AndHashNode provides the sort order we care about, we put
+ // that one last.
+ for (size_t i = 0; i < ahn->children.size(); ++i) {
+ ahn->children[i]->computeProperties();
+ const BSONObjSet& sorts = ahn->children[i]->getSort();
+ if (sorts.end() != sorts.find(query.getParsed().getSort())) {
+ std::swap(ahn->children[i], ahn->children.back());
break;
}
}
- if (allSortedByDiskLoc) {
- AndSortedNode* asn = new AndSortedNode();
- asn->children.swap(ixscanNodes);
- andResult = asn;
- }
- else if (internalQueryPlannerEnableHashIntersection) {
- AndHashNode* ahn = new AndHashNode();
- ahn->children.swap(ixscanNodes);
- andResult = ahn;
- // The AndHashNode provides the sort order of its last child. If any of the
- // possible subnodes of AndHashNode provides the sort order we care about, we put
- // that one last.
- for (size_t i = 0; i < ahn->children.size(); ++i) {
- ahn->children[i]->computeProperties();
- const BSONObjSet& sorts = ahn->children[i]->getSort();
- if (sorts.end() != sorts.find(query.getParsed().getSort())) {
- std::swap(ahn->children[i], ahn->children.back());
- break;
- }
- }
- }
- else {
- // We can't use sort-based intersection, and hash-based intersection is disabled.
- // Clean up the index scans and bail out by returning NULL.
- LOG(5) << "Can't build index intersection solution: "
- << "AND_SORTED is not possible and AND_HASH is disabled.";
-
- for (size_t i = 0; i < ixscanNodes.size(); i++) {
- delete ixscanNodes[i];
- }
- return NULL;
- }
- }
-
- // Don't bother doing any kind of fetch analysis lite if we're doing it anyway above us.
- if (inArrayOperator) {
- return andResult;
- }
-
- // XXX: This block is a hack to accommodate the storage layer concurrency model.
- if ((params.options & QueryPlannerParams::CANNOT_TRIM_IXISECT) &&
- (andResult->getType() == STAGE_AND_HASH || andResult->getType() == STAGE_AND_SORTED)) {
- // We got an index intersection solution, and we aren't allowed to answer predicates
- // using the index. We add a fetch with the entire filter.
- invariant(clonedRoot.get());
- FetchNode* fetch = new FetchNode();
- fetch->filter.reset(clonedRoot.release());
- // Takes ownership of 'andResult'.
- fetch->children.push_back(andResult);
- return fetch;
- }
-
- // If there are any nodes still attached to the AND, we can't answer them using the
- // index, so we put a fetch with filter.
- if (root->numChildren() > 0) {
- FetchNode* fetch = new FetchNode();
- verify(NULL != autoRoot.get());
- if (autoRoot->numChildren() == 1) {
- // An $and of one thing is that thing.
- MatchExpression* child = autoRoot->getChild(0);
- autoRoot->getChildVector()->clear();
- // Takes ownership.
- fetch->filter.reset(child);
- // 'autoRoot' will delete the empty $and.
+ } else {
+ // We can't use sort-based intersection, and hash-based intersection is disabled.
+ // Clean up the index scans and bail out by returning NULL.
+ LOG(5) << "Can't build index intersection solution: "
+ << "AND_SORTED is not possible and AND_HASH is disabled.";
+
+ for (size_t i = 0; i < ixscanNodes.size(); i++) {
+ delete ixscanNodes[i];
}
- else { // root->numChildren() > 1
- // Takes ownership.
- fetch->filter.reset(autoRoot.release());
- }
- // takes ownership
- fetch->children.push_back(andResult);
- andResult = fetch;
- }
- else {
- // root has no children, let autoRoot get rid of it when it goes out of scope.
+ return NULL;
}
+ }
+ // Don't bother doing any kind of fetch analysis lite if we're doing it anyway above us.
+ if (inArrayOperator) {
return andResult;
}
- // static
- QuerySolutionNode* QueryPlannerAccess::buildIndexedOr(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const vector<IndexEntry>& indices,
- const QueryPlannerParams& params) {
- unique_ptr<MatchExpression> autoRoot;
- if (!inArrayOperator) {
- autoRoot.reset(root);
- }
+ // XXX: This block is a hack to accommodate the storage layer concurrency model.
+ if ((params.options & QueryPlannerParams::CANNOT_TRIM_IXISECT) &&
+ (andResult->getType() == STAGE_AND_HASH || andResult->getType() == STAGE_AND_SORTED)) {
+ // We got an index intersection solution, and we aren't allowed to answer predicates
+ // using the index. We add a fetch with the entire filter.
+ invariant(clonedRoot.get());
+ FetchNode* fetch = new FetchNode();
+ fetch->filter.reset(clonedRoot.release());
+ // Takes ownership of 'andResult'.
+ fetch->children.push_back(andResult);
+ return fetch;
+ }
- vector<QuerySolutionNode*> ixscanNodes;
- if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) {
- return NULL;
- }
+ // If there are any nodes still attached to the AND, we can't answer them using the
+ // index, so we put a fetch with filter.
+ if (root->numChildren() > 0) {
+ FetchNode* fetch = new FetchNode();
+ verify(NULL != autoRoot.get());
+ if (autoRoot->numChildren() == 1) {
+ // An $and of one thing is that thing.
+ MatchExpression* child = autoRoot->getChild(0);
+ autoRoot->getChildVector()->clear();
+ // Takes ownership.
+ fetch->filter.reset(child);
+ // 'autoRoot' will delete the empty $and.
+ } else { // root->numChildren() > 1
+ // Takes ownership.
+ fetch->filter.reset(autoRoot.release());
+ }
+ // takes ownership
+ fetch->children.push_back(andResult);
+ andResult = fetch;
+ } else {
+ // root has no children, let autoRoot get rid of it when it goes out of scope.
+ }
- // Unlike an AND, an OR cannot have filters hanging off of it. We stop processing
- // when any of our children lack index tags. If a node lacks an index tag it cannot
- // be answered via an index.
- if (!inArrayOperator && 0 != root->numChildren()) {
- warning() << "planner OR error, non-indexed child of OR.";
- // We won't enumerate an OR without indices for each child, so this isn't an issue, even
- // if we have an AND with an OR child -- we won't get here unless the OR is fully
- // indexed.
- return NULL;
- }
+ return andResult;
+}
+
+// static
+QuerySolutionNode* QueryPlannerAccess::buildIndexedOr(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const vector<IndexEntry>& indices,
+ const QueryPlannerParams& params) {
+ unique_ptr<MatchExpression> autoRoot;
+ if (!inArrayOperator) {
+ autoRoot.reset(root);
+ }
- QuerySolutionNode* orResult = NULL;
+ vector<QuerySolutionNode*> ixscanNodes;
+ if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) {
+ return NULL;
+ }
- // An OR of one node is just that node.
- if (1 == ixscanNodes.size()) {
- orResult = ixscanNodes[0];
- }
- else {
- bool shouldMergeSort = false;
-
- if (!query.getParsed().getSort().isEmpty()) {
- const BSONObj& desiredSort = query.getParsed().getSort();
-
- // If there exists a sort order that is present in each child, we can merge them and
- // maintain that sort order / those sort orders.
- ixscanNodes[0]->computeProperties();
- BSONObjSet sharedSortOrders = ixscanNodes[0]->getSort();
-
- if (!sharedSortOrders.empty()) {
- for (size_t i = 1; i < ixscanNodes.size(); ++i) {
- ixscanNodes[i]->computeProperties();
- BSONObjSet isect;
- set_intersection(sharedSortOrders.begin(),
- sharedSortOrders.end(),
- ixscanNodes[i]->getSort().begin(),
- ixscanNodes[i]->getSort().end(),
- std::inserter(isect, isect.end()),
- BSONObjCmp());
- sharedSortOrders = isect;
- if (sharedSortOrders.empty()) {
- break;
- }
+ // Unlike an AND, an OR cannot have filters hanging off of it. We stop processing
+ // when any of our children lack index tags. If a node lacks an index tag it cannot
+ // be answered via an index.
+ if (!inArrayOperator && 0 != root->numChildren()) {
+ warning() << "planner OR error, non-indexed child of OR.";
+ // We won't enumerate an OR without indices for each child, so this isn't an issue, even
+ // if we have an AND with an OR child -- we won't get here unless the OR is fully
+ // indexed.
+ return NULL;
+ }
+
+ QuerySolutionNode* orResult = NULL;
+
+ // An OR of one node is just that node.
+ if (1 == ixscanNodes.size()) {
+ orResult = ixscanNodes[0];
+ } else {
+ bool shouldMergeSort = false;
+
+ if (!query.getParsed().getSort().isEmpty()) {
+ const BSONObj& desiredSort = query.getParsed().getSort();
+
+ // If there exists a sort order that is present in each child, we can merge them and
+ // maintain that sort order / those sort orders.
+ ixscanNodes[0]->computeProperties();
+ BSONObjSet sharedSortOrders = ixscanNodes[0]->getSort();
+
+ if (!sharedSortOrders.empty()) {
+ for (size_t i = 1; i < ixscanNodes.size(); ++i) {
+ ixscanNodes[i]->computeProperties();
+ BSONObjSet isect;
+ set_intersection(sharedSortOrders.begin(),
+ sharedSortOrders.end(),
+ ixscanNodes[i]->getSort().begin(),
+ ixscanNodes[i]->getSort().end(),
+ std::inserter(isect, isect.end()),
+ BSONObjCmp());
+ sharedSortOrders = isect;
+ if (sharedSortOrders.empty()) {
+ break;
}
}
-
- // TODO: If we're looking for the reverse of one of these sort orders we could
- // possibly reverse the ixscan nodes.
- shouldMergeSort = (sharedSortOrders.end() != sharedSortOrders.find(desiredSort));
}
- if (shouldMergeSort) {
- MergeSortNode* msn = new MergeSortNode();
- msn->sort = query.getParsed().getSort();
- msn->children.swap(ixscanNodes);
- orResult = msn;
- }
- else {
- OrNode* orn = new OrNode();
- orn->children.swap(ixscanNodes);
- orResult = orn;
- }
+ // TODO: If we're looking for the reverse of one of these sort orders we could
+ // possibly reverse the ixscan nodes.
+ shouldMergeSort = (sharedSortOrders.end() != sharedSortOrders.find(desiredSort));
}
- // Evaluate text nodes first to ensure that text scores are available.
- // Move text nodes to front of vector.
- std::stable_partition(orResult->children.begin(), orResult->children.end(), isTextNode);
-
- // OR must have an index for each child, so we should have detached all children from
- // 'root', and there's nothing useful to do with an empty or MatchExpression. We let it die
- // via autoRoot.
-
- return orResult;
+ if (shouldMergeSort) {
+ MergeSortNode* msn = new MergeSortNode();
+ msn->sort = query.getParsed().getSort();
+ msn->children.swap(ixscanNodes);
+ orResult = msn;
+ } else {
+ OrNode* orn = new OrNode();
+ orn->children.swap(ixscanNodes);
+ orResult = orn;
+ }
}
- // static
- QuerySolutionNode* QueryPlannerAccess::buildIndexedDataAccess(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const vector<IndexEntry>& indices,
- const QueryPlannerParams& params) {
- if (root->isLogical() && !Indexability::isBoundsGeneratingNot(root)) {
- if (MatchExpression::AND == root->matchType()) {
- // Takes ownership of root.
- return buildIndexedAnd(query, root, inArrayOperator, indices, params);
- }
- else if (MatchExpression::OR == root->matchType()) {
- // Takes ownership of root.
- return buildIndexedOr(query, root, inArrayOperator, indices, params);
- }
- else {
- // Can't do anything with negated logical nodes index-wise.
- if (!inArrayOperator) {
- delete root;
- }
- return NULL;
- }
- }
- else {
- unique_ptr<MatchExpression> autoRoot;
- if (!inArrayOperator) {
- autoRoot.reset(root);
- }
+ // Evaluate text nodes first to ensure that text scores are available.
+ // Move text nodes to front of vector.
+ std::stable_partition(orResult->children.begin(), orResult->children.end(), isTextNode);
- // isArray or isLeaf is true. Either way, it's over one field, and the bounds builder
- // deals with it.
- if (NULL == root->getTag()) {
- // No index to use here, not in the context of logical operator, so we're SOL.
- return NULL;
- }
- else if (Indexability::isBoundsGenerating(root)) {
- // Make an index scan over the tagged index #.
- IndexTag* tag = static_cast<IndexTag*>(root->getTag());
-
- IndexBoundsBuilder::BoundsTightness tightness = IndexBoundsBuilder::EXACT;
- QuerySolutionNode* soln = makeLeafNode(query, indices[tag->index], tag->pos,
- root, &tightness);
- verify(NULL != soln);
- finishLeafNode(soln, indices[tag->index]);
-
- if (inArrayOperator) {
- return soln;
- }
+ // OR must have an index for each child, so we should have detached all children from
+ // 'root', and there's nothing useful to do with an empty or MatchExpression. We let it die
+ // via autoRoot.
- // If the bounds are exact, the set of documents that satisfy the predicate is
- // exactly equal to the set of documents that the scan provides.
- //
- // If the bounds are not exact, the set of documents returned from the scan is a
- // superset of documents that satisfy the predicate, and we must check the
- // predicate.
+ return orResult;
+}
- if (tightness == IndexBoundsBuilder::EXACT) {
- return soln;
- }
- else if (tightness == IndexBoundsBuilder::INEXACT_COVERED
- && !indices[tag->index].multikey) {
- verify(NULL == soln->filter.get());
- soln->filter.reset(autoRoot.release());
- return soln;
- }
- else {
- FetchNode* fetch = new FetchNode();
- verify(NULL != autoRoot.get());
- fetch->filter.reset(autoRoot.release());
- fetch->children.push_back(soln);
- return fetch;
- }
+// static
+QuerySolutionNode* QueryPlannerAccess::buildIndexedDataAccess(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const vector<IndexEntry>& indices,
+ const QueryPlannerParams& params) {
+ if (root->isLogical() && !Indexability::isBoundsGeneratingNot(root)) {
+ if (MatchExpression::AND == root->matchType()) {
+ // Takes ownership of root.
+ return buildIndexedAnd(query, root, inArrayOperator, indices, params);
+ } else if (MatchExpression::OR == root->matchType()) {
+ // Takes ownership of root.
+ return buildIndexedOr(query, root, inArrayOperator, indices, params);
+ } else {
+ // Can't do anything with negated logical nodes index-wise.
+ if (!inArrayOperator) {
+ delete root;
}
- else if (Indexability::arrayUsesIndexOnChildren(root)) {
- QuerySolutionNode* solution = NULL;
-
- invariant(MatchExpression::ELEM_MATCH_OBJECT);
- // The child is an AND.
- invariant(1 == root->numChildren());
- solution = buildIndexedDataAccess(query, root->getChild(0), true, indices, params);
- if (NULL == solution) {
- return NULL;
- }
+ return NULL;
+ }
+ } else {
+ unique_ptr<MatchExpression> autoRoot;
+ if (!inArrayOperator) {
+ autoRoot.reset(root);
+ }
- // There may be an array operator above us.
- if (inArrayOperator) { return solution; }
+ // isArray or isLeaf is true. Either way, it's over one field, and the bounds builder
+ // deals with it.
+ if (NULL == root->getTag()) {
+ // No index to use here, not in the context of logical operator, so we're SOL.
+ return NULL;
+ } else if (Indexability::isBoundsGenerating(root)) {
+ // Make an index scan over the tagged index #.
+ IndexTag* tag = static_cast<IndexTag*>(root->getTag());
+
+ IndexBoundsBuilder::BoundsTightness tightness = IndexBoundsBuilder::EXACT;
+ QuerySolutionNode* soln =
+ makeLeafNode(query, indices[tag->index], tag->pos, root, &tightness);
+ verify(NULL != soln);
+ finishLeafNode(soln, indices[tag->index]);
+
+ if (inArrayOperator) {
+ return soln;
+ }
+ // If the bounds are exact, the set of documents that satisfy the predicate is
+ // exactly equal to the set of documents that the scan provides.
+ //
+ // If the bounds are not exact, the set of documents returned from the scan is a
+ // superset of documents that satisfy the predicate, and we must check the
+ // predicate.
+
+ if (tightness == IndexBoundsBuilder::EXACT) {
+ return soln;
+ } else if (tightness == IndexBoundsBuilder::INEXACT_COVERED &&
+ !indices[tag->index].multikey) {
+ verify(NULL == soln->filter.get());
+ soln->filter.reset(autoRoot.release());
+ return soln;
+ } else {
FetchNode* fetch = new FetchNode();
- // Takes ownership of 'root'.
verify(NULL != autoRoot.get());
fetch->filter.reset(autoRoot.release());
- fetch->children.push_back(solution);
+ fetch->children.push_back(soln);
return fetch;
}
- }
+ } else if (Indexability::arrayUsesIndexOnChildren(root)) {
+ QuerySolutionNode* solution = NULL;
+
+ invariant(MatchExpression::ELEM_MATCH_OBJECT);
+ // The child is an AND.
+ invariant(1 == root->numChildren());
+ solution = buildIndexedDataAccess(query, root->getChild(0), true, indices, params);
+ if (NULL == solution) {
+ return NULL;
+ }
- if (!inArrayOperator) {
- delete root;
- }
+ // There may be an array operator above us.
+ if (inArrayOperator) {
+ return solution;
+ }
- return NULL;
+ FetchNode* fetch = new FetchNode();
+ // Takes ownership of 'root'.
+ verify(NULL != autoRoot.get());
+ fetch->filter.reset(autoRoot.release());
+ fetch->children.push_back(solution);
+ return fetch;
+ }
}
- QuerySolutionNode* QueryPlannerAccess::scanWholeIndex(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- int direction) {
- QuerySolutionNode* solnRoot = NULL;
-
- // Build an ixscan over the id index, use it, and return it.
- IndexScanNode* isn = new IndexScanNode();
- isn->indexKeyPattern = index.keyPattern;
- isn->indexIsMultiKey = index.multikey;
- isn->maxScan = query.getParsed().getMaxScan();
- isn->addKeyMetadata = query.getParsed().returnKey();
+ if (!inArrayOperator) {
+ delete root;
+ }
- IndexBoundsBuilder::allValuesBounds(index.keyPattern, &isn->bounds);
+ return NULL;
+}
- if (-1 == direction) {
- QueryPlannerCommon::reverseScans(isn);
- isn->direction = -1;
- }
+QuerySolutionNode* QueryPlannerAccess::scanWholeIndex(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ int direction) {
+ QuerySolutionNode* solnRoot = NULL;
- MatchExpression* filter = query.root()->shallowClone();
+ // Build an ixscan over the id index, use it, and return it.
+ IndexScanNode* isn = new IndexScanNode();
+ isn->indexKeyPattern = index.keyPattern;
+ isn->indexIsMultiKey = index.multikey;
+ isn->maxScan = query.getParsed().getMaxScan();
+ isn->addKeyMetadata = query.getParsed().returnKey();
- // If it's find({}) remove the no-op root.
- if (MatchExpression::AND == filter->matchType() && (0 == filter->numChildren())) {
- delete filter;
- solnRoot = isn;
- }
- else {
- // TODO: We may not need to do the fetch if the predicates in root are covered. But
- // for now it's safe (though *maybe* slower).
- FetchNode* fetch = new FetchNode();
- fetch->filter.reset(filter);
- fetch->children.push_back(isn);
- solnRoot = fetch;
- }
+ IndexBoundsBuilder::allValuesBounds(index.keyPattern, &isn->bounds);
- return solnRoot;
+ if (-1 == direction) {
+ QueryPlannerCommon::reverseScans(isn);
+ isn->direction = -1;
}
- // static
- void QueryPlannerAccess::addFilterToSolutionNode(QuerySolutionNode* node,
- MatchExpression* match,
- MatchExpression::MatchType type) {
- if (NULL == node->filter) {
- node->filter.reset(match);
- }
- else if (type == node->filter->matchType()) {
- // The 'node' already has either an AND or OR filter that matches 'type'. Add 'match' as
- // another branch of the filter.
- ListOfMatchExpression* listFilter =
- static_cast<ListOfMatchExpression*>(node->filter.get());
- listFilter->add(match);
- }
- else {
- // The 'node' already has a filter that does not match 'type'. If 'type' is AND, then
- // combine 'match' with the existing filter by adding an AND. If 'type' is OR, combine
- // by adding an OR node.
- ListOfMatchExpression* listFilter;
- if (MatchExpression::AND == type) {
- listFilter = new AndMatchExpression();
- }
- else {
- verify(MatchExpression::OR == type);
- listFilter = new OrMatchExpression();
- }
- MatchExpression* oldFilter = node->filter->shallowClone();
- listFilter->add(oldFilter);
- listFilter->add(match);
- node->filter.reset(listFilter);
- }
+ MatchExpression* filter = query.root()->shallowClone();
+
+ // If it's find({}) remove the no-op root.
+ if (MatchExpression::AND == filter->matchType() && (0 == filter->numChildren())) {
+ delete filter;
+ solnRoot = isn;
+ } else {
+ // TODO: We may not need to do the fetch if the predicates in root are covered. But
+ // for now it's safe (though *maybe* slower).
+ FetchNode* fetch = new FetchNode();
+ fetch->filter.reset(filter);
+ fetch->children.push_back(isn);
+ solnRoot = fetch;
}
- // static
- void QueryPlannerAccess::handleFilter(ScanBuildingState* scanState) {
- if (MatchExpression::OR == scanState->root->matchType()) {
- handleFilterOr(scanState);
- }
- else if (MatchExpression::AND == scanState->root->matchType()) {
- handleFilterAnd(scanState);
- }
- else {
- // We must be building leaves for either and AND or an OR.
- invariant(0);
- }
+ return solnRoot;
+}
+
+// static
+void QueryPlannerAccess::addFilterToSolutionNode(QuerySolutionNode* node,
+ MatchExpression* match,
+ MatchExpression::MatchType type) {
+ if (NULL == node->filter) {
+ node->filter.reset(match);
+ } else if (type == node->filter->matchType()) {
+ // The 'node' already has either an AND or OR filter that matches 'type'. Add 'match' as
+ // another branch of the filter.
+ ListOfMatchExpression* listFilter = static_cast<ListOfMatchExpression*>(node->filter.get());
+ listFilter->add(match);
+ } else {
+ // The 'node' already has a filter that does not match 'type'. If 'type' is AND, then
+ // combine 'match' with the existing filter by adding an AND. If 'type' is OR, combine
+ // by adding an OR node.
+ ListOfMatchExpression* listFilter;
+ if (MatchExpression::AND == type) {
+ listFilter = new AndMatchExpression();
+ } else {
+ verify(MatchExpression::OR == type);
+ listFilter = new OrMatchExpression();
+ }
+ MatchExpression* oldFilter = node->filter->shallowClone();
+ listFilter->add(oldFilter);
+ listFilter->add(match);
+ node->filter.reset(listFilter);
+ }
+}
+
+// static
+void QueryPlannerAccess::handleFilter(ScanBuildingState* scanState) {
+ if (MatchExpression::OR == scanState->root->matchType()) {
+ handleFilterOr(scanState);
+ } else if (MatchExpression::AND == scanState->root->matchType()) {
+ handleFilterAnd(scanState);
+ } else {
+ // We must be building leaves for either and AND or an OR.
+ invariant(0);
}
+}
- // static
- void QueryPlannerAccess::handleFilterOr(ScanBuildingState* scanState) {
- MatchExpression* root = scanState->root;
- MatchExpression* child = root->getChild(scanState->curChild);
+// static
+void QueryPlannerAccess::handleFilterOr(ScanBuildingState* scanState) {
+ MatchExpression* root = scanState->root;
+ MatchExpression* child = root->getChild(scanState->curChild);
- if (scanState->inArrayOperator) {
- // We're inside an array operator. The entire array operator expression
- // should always be affixed as a filter. We keep 'curChild' in the $and
- // for affixing later.
- ++scanState->curChild;
+ if (scanState->inArrayOperator) {
+ // We're inside an array operator. The entire array operator expression
+ // should always be affixed as a filter. We keep 'curChild' in the $and
+ // for affixing later.
+ ++scanState->curChild;
+ } else {
+ if (scanState->tightness < scanState->loosestBounds) {
+ scanState->loosestBounds = scanState->tightness;
}
- else {
- if (scanState->tightness < scanState->loosestBounds) {
- scanState->loosestBounds = scanState->tightness;
- }
- // Detach 'child' and add it to 'curOr'.
- root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
- scanState->curOr->getChildVector()->push_back(child);
- }
+ // Detach 'child' and add it to 'curOr'.
+ root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
+ scanState->curOr->getChildVector()->push_back(child);
}
-
- // static
- void QueryPlannerAccess::handleFilterAnd(ScanBuildingState* scanState) {
- MatchExpression* root = scanState->root;
- MatchExpression* child = root->getChild(scanState->curChild);
- const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
-
- if (scanState->inArrayOperator) {
- // We're inside an array operator. The entire array operator expression
- // should always be affixed as a filter. We keep 'curChild' in the $and
- // for affixing later.
- ++scanState->curChild;
- }
- else if (scanState->tightness == IndexBoundsBuilder::EXACT) {
- root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
- delete child;
- }
- else if (scanState->tightness == IndexBoundsBuilder::INEXACT_COVERED
- && (INDEX_TEXT == index.type || !index.multikey)) {
- // The bounds are not exact, but the information needed to
- // evaluate the predicate is in the index key. Remove the
- // MatchExpression from its parent and attach it to the filter
- // of the index scan we're building.
- //
- // We can only use this optimization if the index is NOT multikey.
- // Suppose that we had the multikey index {x: 1} and a document
- // {x: ["a", "b"]}. Now if we query for {x: /b/} the filter might
- // ever only be applied to the index key "a". We'd incorrectly
- // conclude that the document does not match the query :( so we
- // gotta stick to non-multikey indices.
- root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
-
- addFilterToSolutionNode(scanState->currentScan.get(), child, root->matchType());
- }
- else {
- // We keep curChild in the AND for affixing later.
- ++scanState->curChild;
- }
+}
+
+// static
+void QueryPlannerAccess::handleFilterAnd(ScanBuildingState* scanState) {
+ MatchExpression* root = scanState->root;
+ MatchExpression* child = root->getChild(scanState->curChild);
+ const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
+
+ if (scanState->inArrayOperator) {
+ // We're inside an array operator. The entire array operator expression
+ // should always be affixed as a filter. We keep 'curChild' in the $and
+ // for affixing later.
+ ++scanState->curChild;
+ } else if (scanState->tightness == IndexBoundsBuilder::EXACT) {
+ root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
+ delete child;
+ } else if (scanState->tightness == IndexBoundsBuilder::INEXACT_COVERED &&
+ (INDEX_TEXT == index.type || !index.multikey)) {
+ // The bounds are not exact, but the information needed to
+ // evaluate the predicate is in the index key. Remove the
+ // MatchExpression from its parent and attach it to the filter
+ // of the index scan we're building.
+ //
+ // We can only use this optimization if the index is NOT multikey.
+ // Suppose that we had the multikey index {x: 1} and a document
+ // {x: ["a", "b"]}. Now if we query for {x: /b/} the filter might
+ // ever only be applied to the index key "a". We'd incorrectly
+ // conclude that the document does not match the query :( so we
+ // gotta stick to non-multikey indices.
+ root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
+
+ addFilterToSolutionNode(scanState->currentScan.get(), child, root->matchType());
+ } else {
+ // We keep curChild in the AND for affixing later.
+ ++scanState->curChild;
}
-
- QuerySolutionNode* QueryPlannerAccess::makeIndexScan(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- const BSONObj& startKey,
- const BSONObj& endKey) {
- QuerySolutionNode* solnRoot = NULL;
-
- // Build an ixscan over the id index, use it, and return it.
- IndexScanNode* isn = new IndexScanNode();
- isn->indexKeyPattern = index.keyPattern;
- isn->indexIsMultiKey = index.multikey;
- isn->direction = 1;
- isn->maxScan = query.getParsed().getMaxScan();
- isn->addKeyMetadata = query.getParsed().returnKey();
- isn->bounds.isSimpleRange = true;
- isn->bounds.startKey = startKey;
- isn->bounds.endKey = endKey;
- isn->bounds.endKeyInclusive = false;
-
- MatchExpression* filter = query.root()->shallowClone();
-
- // If it's find({}) remove the no-op root.
- if (MatchExpression::AND == filter->matchType() && (0 == filter->numChildren())) {
- delete filter;
- solnRoot = isn;
- }
- else {
- // TODO: We may not need to do the fetch if the predicates in root are covered. But
- // for now it's safe (though *maybe* slower).
- FetchNode* fetch = new FetchNode();
- fetch->filter.reset(filter);
- fetch->children.push_back(isn);
- solnRoot = fetch;
- }
-
- return solnRoot;
+}
+
+QuerySolutionNode* QueryPlannerAccess::makeIndexScan(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ const BSONObj& startKey,
+ const BSONObj& endKey) {
+ QuerySolutionNode* solnRoot = NULL;
+
+ // Build an ixscan over the id index, use it, and return it.
+ IndexScanNode* isn = new IndexScanNode();
+ isn->indexKeyPattern = index.keyPattern;
+ isn->indexIsMultiKey = index.multikey;
+ isn->direction = 1;
+ isn->maxScan = query.getParsed().getMaxScan();
+ isn->addKeyMetadata = query.getParsed().returnKey();
+ isn->bounds.isSimpleRange = true;
+ isn->bounds.startKey = startKey;
+ isn->bounds.endKey = endKey;
+ isn->bounds.endKeyInclusive = false;
+
+ MatchExpression* filter = query.root()->shallowClone();
+
+ // If it's find({}) remove the no-op root.
+ if (MatchExpression::AND == filter->matchType() && (0 == filter->numChildren())) {
+ delete filter;
+ solnRoot = isn;
+ } else {
+ // TODO: We may not need to do the fetch if the predicates in root are covered. But
+ // for now it's safe (though *maybe* slower).
+ FetchNode* fetch = new FetchNode();
+ fetch->filter.reset(filter);
+ fetch->children.push_back(isn);
+ solnRoot = fetch;
}
+ return solnRoot;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/planner_access.h b/src/mongo/db/query/planner_access.h
index 433e2259bb6..55a05ff5161 100644
--- a/src/mongo/db/query/planner_access.h
+++ b/src/mongo/db/query/planner_access.h
@@ -35,376 +35,375 @@
namespace mongo {
- /**
- * MULTIKEY INDEX BOUNDS RULES
- *
- * 1. In general for a multikey index, we cannot intersect bounds
- * even if the index is not compound.
- * Example:
- * Let's say we have the document {a: [5, 7]}.
- * This document satisfies the query {$and: [ {a: 5}, {a: 7} ] }
- * For the index {a:1} we have the keys {"": 5} and {"": 7}.
- * Each child of the AND is tagged with the index {a: 1}
- * The interval for the {a: 5} branch is [5, 5]. It is exact.
- * The interval for the {a: 7} branch is [7, 7]. It is exact.
- * The intersection of the intervals is {}.
- * If we scan over {}, the intersection of the intervals, we will retrieve nothing.
- *
- * 2. In general for a multikey compound index, we *can* compound the bounds.
- * For example, if we have multikey index {a: 1, b: 1} and query {a: 2, b: 3},
- * we can use the bounds {a: [[2, 2]], b: [[3, 3]]}.
- *
- * 3. Despite rule #2, if fields in the compound index share a prefix, then it
- * is not safe to compound the bounds. We can only specify bounds for the first
- * field.
- * Example:
- * Let's say we have the document {a: [ {b: 3}, {c: 4} ] }
- * This document satisfies the query {'a.b': 3, 'a.c': 4}.
- * For the index {'a.b': 1, 'a.c': 1} we have the keys {"": 3, "": null} and
- * {"": null, "": 4}.
- * Let's use the aforementioned index to answer the query.
- * The bounds for 'a.b' are [3,3], and the bounds for 'a.c' are [4,4].
- * If we combine the bounds, we would only look at keys {"": 3, "":4 }.
- * Therefore we wouldn't look at the document's keys in the index.
- * Therefore we don't combine bounds.
- *
- * 4. There is an exception to rule #1, and that is when we're evaluating
- * an $elemMatch.
- * Example:
- * Let's say that we have the same document from (1), {a: [5, 7]}.
- * This document satisfies {a: {$lte: 5, $gte: 7}}, but it does not
- * satisfy {a: {$elemMatch: {$lte: 5, $gte: 7}}}. The $elemMatch indicates
- * that we are allowed to intersect the bounds, which means that we will
- * scan over the empty interval {} and retrieve nothing. This is the
- * expected result because there is no entry in the array "a" that
- * simultaneously satisfies the predicates a<=5 and a>=7.
- *
- * 5. There is also an exception to rule #3, and that is when we're evaluating
- * an $elemMatch. The bounds can be compounded for predicates that share a prefix
- * so long as the shared prefix is the path for which there is an $elemMatch.
- * Example:
- * Suppose we have the same document from (3), {a: [{b: 3}, {c: 4}]}. As discussed
- * above, we cannot compound the index bounds for query {'a.b': 1, 'a.c': 1}.
- * However, for the query {a: {$elemMatch: {b: 1, c: 1}} we can compound the
- * bounds because the $elemMatch is applied to the shared prefix "a".
- */
+/**
+ * MULTIKEY INDEX BOUNDS RULES
+ *
+ * 1. In general for a multikey index, we cannot intersect bounds
+ * even if the index is not compound.
+ * Example:
+ * Let's say we have the document {a: [5, 7]}.
+ * This document satisfies the query {$and: [ {a: 5}, {a: 7} ] }
+ * For the index {a:1} we have the keys {"": 5} and {"": 7}.
+ * Each child of the AND is tagged with the index {a: 1}
+ * The interval for the {a: 5} branch is [5, 5]. It is exact.
+ * The interval for the {a: 7} branch is [7, 7]. It is exact.
+ * The intersection of the intervals is {}.
+ * If we scan over {}, the intersection of the intervals, we will retrieve nothing.
+ *
+ * 2. In general for a multikey compound index, we *can* compound the bounds.
+ * For example, if we have multikey index {a: 1, b: 1} and query {a: 2, b: 3},
+ * we can use the bounds {a: [[2, 2]], b: [[3, 3]]}.
+ *
+ * 3. Despite rule #2, if fields in the compound index share a prefix, then it
+ * is not safe to compound the bounds. We can only specify bounds for the first
+ * field.
+ * Example:
+ * Let's say we have the document {a: [ {b: 3}, {c: 4} ] }
+ * This document satisfies the query {'a.b': 3, 'a.c': 4}.
+ * For the index {'a.b': 1, 'a.c': 1} we have the keys {"": 3, "": null} and
+ * {"": null, "": 4}.
+ * Let's use the aforementioned index to answer the query.
+ * The bounds for 'a.b' are [3,3], and the bounds for 'a.c' are [4,4].
+ * If we combine the bounds, we would only look at keys {"": 3, "":4 }.
+ * Therefore we wouldn't look at the document's keys in the index.
+ * Therefore we don't combine bounds.
+ *
+ * 4. There is an exception to rule #1, and that is when we're evaluating
+ * an $elemMatch.
+ * Example:
+ * Let's say that we have the same document from (1), {a: [5, 7]}.
+ * This document satisfies {a: {$lte: 5, $gte: 7}}, but it does not
+ * satisfy {a: {$elemMatch: {$lte: 5, $gte: 7}}}. The $elemMatch indicates
+ * that we are allowed to intersect the bounds, which means that we will
+ * scan over the empty interval {} and retrieve nothing. This is the
+ * expected result because there is no entry in the array "a" that
+ * simultaneously satisfies the predicates a<=5 and a>=7.
+ *
+ * 5. There is also an exception to rule #3, and that is when we're evaluating
+ * an $elemMatch. The bounds can be compounded for predicates that share a prefix
+ * so long as the shared prefix is the path for which there is an $elemMatch.
+ * Example:
+ * Suppose we have the same document from (3), {a: [{b: 3}, {c: 4}]}. As discussed
+ * above, we cannot compound the index bounds for query {'a.b': 1, 'a.c': 1}.
+ * However, for the query {a: {$elemMatch: {b: 1, c: 1}} we can compound the
+ * bounds because the $elemMatch is applied to the shared prefix "a".
+ */
+/**
+ * Methods for creating a QuerySolutionNode tree that accesses the data required by the query.
+ */
+class QueryPlannerAccess {
+public:
/**
- * Methods for creating a QuerySolutionNode tree that accesses the data required by the query.
+ * Building the leaves (i.e. the index scans) is done by looping through
+ * predicates one at a time. During the process, there is a fair amount of state
+ * information to keep track of, which we consolidate into this data structure.
*/
- class QueryPlannerAccess {
- public:
+ struct ScanBuildingState {
+ ScanBuildingState(MatchExpression* theRoot,
+ bool inArrayOp,
+ const std::vector<IndexEntry>& indexList)
+ : root(theRoot),
+ inArrayOperator(inArrayOp),
+ indices(indexList),
+ currentScan(nullptr),
+ curChild(0),
+ currentIndexNumber(IndexTag::kNoIndex),
+ ixtag(NULL),
+ tightness(IndexBoundsBuilder::INEXACT_FETCH),
+ curOr(nullptr),
+ loosestBounds(IndexBoundsBuilder::EXACT) {}
+
/**
- * Building the leaves (i.e. the index scans) is done by looping through
- * predicates one at a time. During the process, there is a fair amount of state
- * information to keep track of, which we consolidate into this data structure.
+ * Reset the scan building state in preparation for building a new scan.
+ *
+ * This always should be called prior to allocating a new 'currentScan'.
*/
- struct ScanBuildingState {
-
- ScanBuildingState(MatchExpression* theRoot,
- bool inArrayOp,
- const std::vector<IndexEntry>& indexList)
- : root(theRoot),
- inArrayOperator(inArrayOp),
- indices(indexList),
- currentScan(nullptr),
- curChild(0),
- currentIndexNumber(IndexTag::kNoIndex),
- ixtag(NULL),
- tightness(IndexBoundsBuilder::INEXACT_FETCH),
- curOr(nullptr),
- loosestBounds(IndexBoundsBuilder::EXACT) {
+ void resetForNextScan(IndexTag* newTag) {
+ currentScan.reset(NULL);
+ currentIndexNumber = newTag->index;
+ tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ loosestBounds = IndexBoundsBuilder::EXACT;
+
+ if (MatchExpression::OR == root->matchType()) {
+ curOr.reset(new OrMatchExpression());
}
+ }
- /**
- * Reset the scan building state in preparation for building a new scan.
- *
- * This always should be called prior to allocating a new 'currentScan'.
- */
- void resetForNextScan(IndexTag* newTag) {
- currentScan.reset(NULL);
- currentIndexNumber = newTag->index;
- tightness = IndexBoundsBuilder::INEXACT_FETCH;
- loosestBounds = IndexBoundsBuilder::EXACT;
-
- if (MatchExpression::OR == root->matchType()) {
- curOr.reset(new OrMatchExpression());
- }
- }
+ // The root of the MatchExpression tree for which we are currently building index
+ // scans. Should be either an AND node or an OR node.
+ MatchExpression* root;
- // The root of the MatchExpression tree for which we are currently building index
- // scans. Should be either an AND node or an OR node.
- MatchExpression* root;
-
- // Are we inside an array operator such as $elemMatch or $all?
- bool inArrayOperator;
-
- // A list of relevant indices which 'root' may be tagged to use.
- const std::vector<IndexEntry>& indices;
-
- // The index access node that we are currently constructing. We may merge
- // multiple tagged predicates into a single index scan.
- std::unique_ptr<QuerySolutionNode> currentScan;
-
- // An index into the child vector of 'root'. Indicates the child MatchExpression
- // for which we are currently either constructing a new scan or which we are about
- // to merge with 'currentScan'.
- size_t curChild;
-
- // An index into the 'indices', so that 'indices[currentIndexNumber]' gives the
- // index used by 'currentScan'. If there is no currentScan, this should be set
- // to 'IndexTag::kNoIndex'.
- size_t currentIndexNumber;
-
- // The tag on 'curChild'.
- IndexTag* ixtag;
-
- // Whether the bounds for predicate 'curChild' are exact, inexact and covered by
- // the index, or inexact with a fetch required.
- IndexBoundsBuilder::BoundsTightness tightness;
-
- // If 'root' is an $or, the child predicates which are tagged with the same index are
- // detached from the original root and added here. 'curOr' may be attached as a filter
- // later on, or ignored and cleaned up by the unique_ptr.
- std::unique_ptr<MatchExpression> curOr;
-
- // The values of BoundsTightness range from loosest to tightest in this order:
- //
- // INEXACT_FETCH < INEXACT_COVERED < EXACT
- //
- // 'loosestBounds' stores the smallest of these three values encountered so far for
- // the current scan. If at least one of the child predicates assigned to the current
- // index is INEXACT_FETCH, then 'loosestBounds' is INEXACT_FETCH. If at least one of
- // the child predicates assigned to the current index is INEXACT_COVERED but none are
- // INEXACT_FETCH, then 'loosestBounds' is INEXACT_COVERED.
- IndexBoundsBuilder::BoundsTightness loosestBounds;
-
- private:
- // Default constructor is not allowed.
- ScanBuildingState();
- };
+ // Are we inside an array operator such as $elemMatch or $all?
+ bool inArrayOperator;
- /**
- * Return a CollectionScanNode that scans as requested in 'query'.
- */
- static QuerySolutionNode* makeCollectionScan(const CanonicalQuery& query,
- bool tailable,
- const QueryPlannerParams& params);
+ // A list of relevant indices which 'root' may be tagged to use.
+ const std::vector<IndexEntry>& indices;
- /**
- * Return a plan that uses the provided index as a proxy for a collection scan.
- */
- static QuerySolutionNode* scanWholeIndex(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- int direction = 1);
+ // The index access node that we are currently constructing. We may merge
+ // multiple tagged predicates into a single index scan.
+ std::unique_ptr<QuerySolutionNode> currentScan;
- /**
- * Return a plan that scans the provided index from [startKey to endKey).
- */
- static QuerySolutionNode* makeIndexScan(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- const BSONObj& startKey,
- const BSONObj& endKey);
+ // An index into the child vector of 'root'. Indicates the child MatchExpression
+ // for which we are currently either constructing a new scan or which we are about
+ // to merge with 'currentScan'.
+ size_t curChild;
+ // An index into the 'indices', so that 'indices[currentIndexNumber]' gives the
+ // index used by 'currentScan'. If there is no currentScan, this should be set
+ // to 'IndexTag::kNoIndex'.
+ size_t currentIndexNumber;
+
+ // The tag on 'curChild'.
+ IndexTag* ixtag;
+
+ // Whether the bounds for predicate 'curChild' are exact, inexact and covered by
+ // the index, or inexact with a fetch required.
+ IndexBoundsBuilder::BoundsTightness tightness;
+
+ // If 'root' is an $or, the child predicates which are tagged with the same index are
+ // detached from the original root and added here. 'curOr' may be attached as a filter
+ // later on, or ignored and cleaned up by the unique_ptr.
+ std::unique_ptr<MatchExpression> curOr;
+
+ // The values of BoundsTightness range from loosest to tightest in this order:
//
- // Indexed Data Access methods.
- //
- // The inArrayOperator flag deserves some attention. It is set when we're processing a
- // child of an MatchExpression::ELEM_MATCH_OBJECT.
- //
- // When true, the following behavior changes for all methods below that take it as an argument:
- // 0. No deletion of MatchExpression(s). In fact,
- // 1. No mutation of the MatchExpression at all. We need the tree as-is in order to perform
- // a filter on the entire tree.
- // 2. No fetches performed. There will be a final fetch by the caller of buildIndexedDataAccess
- // who set the value of inArrayOperator to true.
- // 3. No compound indices are used and no bounds are combined. These are incorrect in the context
- // of these operators.
+ // INEXACT_FETCH < INEXACT_COVERED < EXACT
//
+ // 'loosestBounds' stores the smallest of these three values encountered so far for
+ // the current scan. If at least one of the child predicates assigned to the current
+ // index is INEXACT_FETCH, then 'loosestBounds' is INEXACT_FETCH. If at least one of
+ // the child predicates assigned to the current index is INEXACT_COVERED but none are
+ // INEXACT_FETCH, then 'loosestBounds' is INEXACT_COVERED.
+ IndexBoundsBuilder::BoundsTightness loosestBounds;
+
+ private:
+ // Default constructor is not allowed.
+ ScanBuildingState();
+ };
- /**
- * If 'inArrayOperator' is false, takes ownership of 'root'.
- */
- static QuerySolutionNode* buildIndexedDataAccess(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params);
+ /**
+ * Return a CollectionScanNode that scans as requested in 'query'.
+ */
+ static QuerySolutionNode* makeCollectionScan(const CanonicalQuery& query,
+ bool tailable,
+ const QueryPlannerParams& params);
- /**
- * Takes ownership of 'root'.
- */
- static QuerySolutionNode* buildIndexedAnd(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params);
+ /**
+ * Return a plan that uses the provided index as a proxy for a collection scan.
+ */
+ static QuerySolutionNode* scanWholeIndex(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ int direction = 1);
- /**
- * Takes ownership of 'root'.
- */
- static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params);
+ /**
+ * Return a plan that scans the provided index from [startKey to endKey).
+ */
+ static QuerySolutionNode* makeIndexScan(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ const BSONObj& startKey,
+ const BSONObj& endKey);
+
+ //
+ // Indexed Data Access methods.
+ //
+ // The inArrayOperator flag deserves some attention. It is set when we're processing a
+ // child of an MatchExpression::ELEM_MATCH_OBJECT.
+ //
+ // When true, the following behavior changes for all methods below that take it as an argument:
+ // 0. No deletion of MatchExpression(s). In fact,
+ // 1. No mutation of the MatchExpression at all. We need the tree as-is in order to perform
+ // a filter on the entire tree.
+ // 2. No fetches performed. There will be a final fetch by the caller of buildIndexedDataAccess
+ // who set the value of inArrayOperator to true.
+ // 3. No compound indices are used and no bounds are combined. These are incorrect in the context
+ // of these operators.
+ //
- /**
- * Traverses the tree rooted at the $elemMatch expression 'node',
- * finding all predicates that can use an index directly and returning
- * them in the out-parameter vector 'out'.
- *
- * Traverses only through AND and ELEM_MATCH_OBJECT nodes.
- *
- * Other nodes (i.e. nodes which cannot use an index directly, and which are
- * neither AND nor ELEM_MATCH_OBJECT) are returned in 'subnodesOut' if they are
- * tagged to use an index.
- */
- static void findElemMatchChildren(const MatchExpression* node,
- std::vector<MatchExpression*>* out,
- std::vector<MatchExpression*>* subnodesOut);
+ /**
+ * If 'inArrayOperator' is false, takes ownership of 'root'.
+ */
+ static QuerySolutionNode* buildIndexedDataAccess(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params);
- /**
- * Helper used by buildIndexedAnd and buildIndexedOr.
- *
- * The children of AND and OR nodes are sorted by the index that the subtree rooted at
- * that node uses. Child nodes that use the same index are adjacent to one another to
- * facilitate grouping of index scans. As such, the processing for AND and OR is
- * almost identical.
- *
- * See tagForSort and sortUsingTags in index_tag.h for details on ordering the children
- * of OR and AND.
- *
- * Does not take ownership of 'root' but may remove children from it.
- */
- static bool processIndexScans(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out);
+ /**
+ * Takes ownership of 'root'.
+ */
+ static QuerySolutionNode* buildIndexedAnd(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params);
- /**
- * Used by processIndexScans(...) in order to recursively build a data access
- * plan for a "subnode", a node in the MatchExpression tree which is indexed by
- * virtue of its children.
- *
- * The resulting scans are outputted in the out-parameter 'out'.
- */
- static bool processIndexScansSubnode(const CanonicalQuery& query,
- ScanBuildingState* scanState,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out);
+ /**
+ * Takes ownership of 'root'.
+ */
+ static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params);
- /**
- * Used by processIndexScansSubnode(...) to build the leaves of the solution tree for an
- * ELEM_MATCH_OBJECT node beneath an AND.
- *
- * The resulting scans are outputted in the out-parameter 'out'.
- */
- static bool processIndexScansElemMatch(const CanonicalQuery& query,
- ScanBuildingState* scanState,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out);
+ /**
+ * Traverses the tree rooted at the $elemMatch expression 'node',
+ * finding all predicates that can use an index directly and returning
+ * them in the out-parameter vector 'out'.
+ *
+ * Traverses only through AND and ELEM_MATCH_OBJECT nodes.
+ *
+ * Other nodes (i.e. nodes which cannot use an index directly, and which are
+ * neither AND nor ELEM_MATCH_OBJECT) are returned in 'subnodesOut' if they are
+ * tagged to use an index.
+ */
+ static void findElemMatchChildren(const MatchExpression* node,
+ std::vector<MatchExpression*>* out,
+ std::vector<MatchExpression*>* subnodesOut);
- //
- // Helpers for creating an index scan.
- //
+ /**
+ * Helper used by buildIndexedAnd and buildIndexedOr.
+ *
+ * The children of AND and OR nodes are sorted by the index that the subtree rooted at
+ * that node uses. Child nodes that use the same index are adjacent to one another to
+ * facilitate grouping of index scans. As such, the processing for AND and OR is
+ * almost identical.
+ *
+ * See tagForSort and sortUsingTags in index_tag.h for details on ordering the children
+ * of OR and AND.
+ *
+ * Does not take ownership of 'root' but may remove children from it.
+ */
+ static bool processIndexScans(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out);
- /**
- * Create a new data access node.
- *
- * If the node is an index scan, the bounds for 'expr' are computed and placed into the
- * first field's OIL position. The rest of the OILs are allocated but uninitialized.
- *
- * If the node is a geo node, grab the geo data from 'expr' and stuff it into the
- * geo solution node of the appropriate type.
- */
- static QuerySolutionNode* makeLeafNode(const CanonicalQuery& query,
- const IndexEntry& index,
- size_t pos,
- MatchExpression* expr,
- IndexBoundsBuilder::BoundsTightness* tightnessOut);
+ /**
+ * Used by processIndexScans(...) in order to recursively build a data access
+ * plan for a "subnode", a node in the MatchExpression tree which is indexed by
+ * virtue of its children.
+ *
+ * The resulting scans are outputted in the out-parameter 'out'.
+ */
+ static bool processIndexScansSubnode(const CanonicalQuery& query,
+ ScanBuildingState* scanState,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out);
- /**
- * Merge the predicate 'expr' with the leaf node 'node'.
- */
- static void mergeWithLeafNode(MatchExpression* expr, ScanBuildingState* scanState);
+ /**
+ * Used by processIndexScansSubnode(...) to build the leaves of the solution tree for an
+ * ELEM_MATCH_OBJECT node beneath an AND.
+ *
+ * The resulting scans are outputted in the out-parameter 'out'.
+ */
+ static bool processIndexScansElemMatch(const CanonicalQuery& query,
+ ScanBuildingState* scanState,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out);
- /**
- * Determines whether it is safe to merge the expression 'expr' with
- * the leaf node of the query solution contained in 'scanState'.
- *
- * Does not take ownership of its arguments.
- */
- static bool shouldMergeWithLeaf(const MatchExpression* expr,
- const ScanBuildingState& scanState);
+ //
+ // Helpers for creating an index scan.
+ //
- /**
- * If index scan (regular or expression index), fill in any bounds that are missing in
- * 'node' with the "all values for this field" interval.
- *
- * If geo, do nothing.
- * If text, punt to finishTextNode.
- */
- static void finishLeafNode(QuerySolutionNode* node, const IndexEntry& index);
+ /**
+ * Create a new data access node.
+ *
+ * If the node is an index scan, the bounds for 'expr' are computed and placed into the
+ * first field's OIL position. The rest of the OILs are allocated but uninitialized.
+ *
+ * If the node is a geo node, grab the geo data from 'expr' and stuff it into the
+ * geo solution node of the appropriate type.
+ */
+ static QuerySolutionNode* makeLeafNode(const CanonicalQuery& query,
+ const IndexEntry& index,
+ size_t pos,
+ MatchExpression* expr,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut);
- /**
- * Fills in any missing bounds by calling finishLeafNode(...) for the scan contained in
- * 'scanState'. The resulting scan is outputted in the out-parameter 'out', transferring
- * ownership in the process.
- *
- * If 'scanState' is building an index scan for OR-related predicates, filters
- * may be affixed to the scan as necessary.
- */
- static void finishAndOutputLeaf(ScanBuildingState* scanState,
- std::vector<QuerySolutionNode*>* out);
+ /**
+ * Merge the predicate 'expr' with the leaf node 'node'.
+ */
+ static void mergeWithLeafNode(MatchExpression* expr, ScanBuildingState* scanState);
- /**
- * Returns true if the current scan in 'scanState' requires a FetchNode.
- */
- static bool orNeedsFetch(const ScanBuildingState* scanState);
+ /**
+ * Determines whether it is safe to merge the expression 'expr' with
+ * the leaf node of the query solution contained in 'scanState'.
+ *
+ * Does not take ownership of its arguments.
+ */
+ static bool shouldMergeWithLeaf(const MatchExpression* expr,
+ const ScanBuildingState& scanState);
- static void finishTextNode(QuerySolutionNode* node, const IndexEntry& index);
+ /**
+ * If index scan (regular or expression index), fill in any bounds that are missing in
+ * 'node' with the "all values for this field" interval.
+ *
+ * If geo, do nothing.
+ * If text, punt to finishTextNode.
+ */
+ static void finishLeafNode(QuerySolutionNode* node, const IndexEntry& index);
- /**
- * Add the filter 'match' to the query solution node 'node'. Takes
- * ownership of 'match'.
- *
- * The MatchType, 'type', indicates whether 'match' is a child of an
- * AND or an OR match expression.
- */
- static void addFilterToSolutionNode(QuerySolutionNode* node, MatchExpression* match,
- MatchExpression::MatchType type);
+ /**
+ * Fills in any missing bounds by calling finishLeafNode(...) for the scan contained in
+ * 'scanState'. The resulting scan is outputted in the out-parameter 'out', transferring
+ * ownership in the process.
+ *
+ * If 'scanState' is building an index scan for OR-related predicates, filters
+ * may be affixed to the scan as necessary.
+ */
+ static void finishAndOutputLeaf(ScanBuildingState* scanState,
+ std::vector<QuerySolutionNode*>* out);
- /**
- * Once a predicate is merged into the current scan, there are a few things we might
- * want to do with the filter:
- * 1) Detach the filter from its parent and delete it because the predicate is
- * answered by exact index bounds.
- * 2) Leave the filter alone so that it can be affixed as part of a fetch node later.
- * 3) Detach the filter from its parent and attach it directly to an index scan node.
- * We can sometimes due this for INEXACT_COVERED predicates which are not answered exactly
- * by the bounds, but can be answered by examing the data in the index key.
- * 4) Detach the filter from its parent and attach it as a child of a separate
- * MatchExpression tree. This is done for proper handling of inexact bounds for $or
- * queries.
- *
- * This executes one of the four options above, according to the data in 'scanState'.
- */
- static void handleFilter(ScanBuildingState* scanState);
+ /**
+ * Returns true if the current scan in 'scanState' requires a FetchNode.
+ */
+ static bool orNeedsFetch(const ScanBuildingState* scanState);
- /**
- * Implements handleFilter(...) for OR queries.
- */
- static void handleFilterAnd(ScanBuildingState* scanState);
+ static void finishTextNode(QuerySolutionNode* node, const IndexEntry& index);
- /**
- * Implements handleFilter(...) for AND queries.
- */
- static void handleFilterOr(ScanBuildingState* scanState);
- };
+ /**
+ * Add the filter 'match' to the query solution node 'node'. Takes
+ * ownership of 'match'.
+ *
+ * The MatchType, 'type', indicates whether 'match' is a child of an
+ * AND or an OR match expression.
+ */
+ static void addFilterToSolutionNode(QuerySolutionNode* node,
+ MatchExpression* match,
+ MatchExpression::MatchType type);
+
+ /**
+ * Once a predicate is merged into the current scan, there are a few things we might
+ * want to do with the filter:
+ * 1) Detach the filter from its parent and delete it because the predicate is
+ * answered by exact index bounds.
+ * 2) Leave the filter alone so that it can be affixed as part of a fetch node later.
+ * 3) Detach the filter from its parent and attach it directly to an index scan node.
+ * We can sometimes due this for INEXACT_COVERED predicates which are not answered exactly
+ * by the bounds, but can be answered by examing the data in the index key.
+ * 4) Detach the filter from its parent and attach it as a child of a separate
+ * MatchExpression tree. This is done for proper handling of inexact bounds for $or
+ * queries.
+ *
+ * This executes one of the four options above, according to the data in 'scanState'.
+ */
+ static void handleFilter(ScanBuildingState* scanState);
+
+ /**
+ * Implements handleFilter(...) for OR queries.
+ */
+ static void handleFilterAnd(ScanBuildingState* scanState);
+
+ /**
+ * Implements handleFilter(...) for AND queries.
+ */
+ static void handleFilterOr(ScanBuildingState* scanState);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index eee3093b98a..4026d62572a 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -39,733 +39,713 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::endl;
+using std::string;
+using std::vector;
- //
- // Helpers for bounds explosion AKA quick-and-dirty SERVER-1205.
- //
+//
+// Helpers for bounds explosion AKA quick-and-dirty SERVER-1205.
+//
- namespace {
+namespace {
- /**
- * Walk the tree 'root' and output all leaf nodes into 'leafNodes'.
- */
- void getLeafNodes(QuerySolutionNode* root, vector<QuerySolutionNode*>* leafNodes) {
- if (0 == root->children.size()) {
- leafNodes->push_back(root);
- }
- else {
- for (size_t i = 0; i < root->children.size(); ++i) {
- getLeafNodes(root->children[i], leafNodes);
- }
- }
+/**
+ * Walk the tree 'root' and output all leaf nodes into 'leafNodes'.
+ */
+void getLeafNodes(QuerySolutionNode* root, vector<QuerySolutionNode*>* leafNodes) {
+ if (0 == root->children.size()) {
+ leafNodes->push_back(root);
+ } else {
+ for (size_t i = 0; i < root->children.size(); ++i) {
+ getLeafNodes(root->children[i], leafNodes);
}
+ }
+}
- /**
- * Returns true if every interval in 'oil' is a point, false otherwise.
- */
- bool isUnionOfPoints(const OrderedIntervalList& oil) {
- // We can't explode if there are empty bounds. Don't consider the
- // oil a union of points if there are no intervals.
- if (0 == oil.intervals.size()) {
- return false;
- }
+/**
+ * Returns true if every interval in 'oil' is a point, false otherwise.
+ */
+bool isUnionOfPoints(const OrderedIntervalList& oil) {
+ // We can't explode if there are empty bounds. Don't consider the
+ // oil a union of points if there are no intervals.
+ if (0 == oil.intervals.size()) {
+ return false;
+ }
- for (size_t i = 0; i < oil.intervals.size(); ++i) {
- if (!oil.intervals[i].isPoint()) {
- return false;
- }
- }
+ for (size_t i = 0; i < oil.intervals.size(); ++i) {
+ if (!oil.intervals[i].isPoint()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Should we try to expand the index scan(s) in 'solnRoot' to pull out an indexed sort?
+ *
+ * Returns the node which should be replaced by the merge sort of exploded scans
+ * in the out-parameter 'toReplace'.
+ */
+bool structureOKForExplode(QuerySolutionNode* solnRoot, QuerySolutionNode** toReplace) {
+ // For now we only explode if we *know* we will pull the sort out. We can look at
+ // more structure (or just explode and recalculate properties and see what happens)
+ // but for now we just explode if it's a sure bet.
+ //
+ // TODO: Can also try exploding if root is AND_HASH (last child dictates order.),
+ // or other less obvious cases...
+ if (STAGE_IXSCAN == solnRoot->getType()) {
+ *toReplace = solnRoot;
+ return true;
+ }
+ if (STAGE_FETCH == solnRoot->getType()) {
+ if (STAGE_IXSCAN == solnRoot->children[0]->getType()) {
+ *toReplace = solnRoot->children[0];
return true;
}
+ }
- /**
- * Should we try to expand the index scan(s) in 'solnRoot' to pull out an indexed sort?
- *
- * Returns the node which should be replaced by the merge sort of exploded scans
- * in the out-parameter 'toReplace'.
- */
- bool structureOKForExplode(QuerySolutionNode* solnRoot, QuerySolutionNode** toReplace) {
- // For now we only explode if we *know* we will pull the sort out. We can look at
- // more structure (or just explode and recalculate properties and see what happens)
- // but for now we just explode if it's a sure bet.
- //
- // TODO: Can also try exploding if root is AND_HASH (last child dictates order.),
- // or other less obvious cases...
- if (STAGE_IXSCAN == solnRoot->getType()) {
- *toReplace = solnRoot;
- return true;
+ if (STAGE_OR == solnRoot->getType()) {
+ for (size_t i = 0; i < solnRoot->children.size(); ++i) {
+ if (STAGE_IXSCAN != solnRoot->children[i]->getType()) {
+ return false;
}
+ }
+ *toReplace = solnRoot;
+ return true;
+ }
- if (STAGE_FETCH == solnRoot->getType()) {
- if (STAGE_IXSCAN == solnRoot->children[0]->getType()) {
- *toReplace = solnRoot->children[0];
- return true;
- }
- }
+ return false;
+}
- if (STAGE_OR == solnRoot->getType()) {
- for (size_t i = 0; i < solnRoot->children.size(); ++i) {
- if (STAGE_IXSCAN != solnRoot->children[i]->getType()) {
- return false;
- }
- }
- *toReplace = solnRoot;
- return true;
- }
+// vectors of vectors can be > > annoying.
+typedef vector<Interval> PointPrefix;
- return false;
- }
+/**
+ * The first 'fieldsToExplode' fields of 'bounds' are points. Compute the Cartesian product
+ * of those fields and place it in 'prefixOut'.
+ */
+void makeCartesianProduct(const IndexBounds& bounds,
+ size_t fieldsToExplode,
+ vector<PointPrefix>* prefixOut) {
+ vector<PointPrefix> prefixForScans;
+
+ // We dump the Cartesian product of bounds into prefixForScans, starting w/the first
+ // field's points.
+ verify(fieldsToExplode >= 1);
+ const OrderedIntervalList& firstOil = bounds.fields[0];
+ verify(firstOil.intervals.size() >= 1);
+ for (size_t i = 0; i < firstOil.intervals.size(); ++i) {
+ const Interval& ival = firstOil.intervals[i];
+ verify(ival.isPoint());
+ PointPrefix pfix;
+ pfix.push_back(ival);
+ prefixForScans.push_back(pfix);
+ }
- // vectors of vectors can be > > annoying.
- typedef vector<Interval> PointPrefix;
-
- /**
- * The first 'fieldsToExplode' fields of 'bounds' are points. Compute the Cartesian product
- * of those fields and place it in 'prefixOut'.
- */
- void makeCartesianProduct(const IndexBounds& bounds,
- size_t fieldsToExplode,
- vector<PointPrefix>* prefixOut) {
-
- vector<PointPrefix> prefixForScans;
-
- // We dump the Cartesian product of bounds into prefixForScans, starting w/the first
- // field's points.
- verify(fieldsToExplode >= 1);
- const OrderedIntervalList& firstOil = bounds.fields[0];
- verify(firstOil.intervals.size() >= 1);
- for (size_t i = 0; i < firstOil.intervals.size(); ++i) {
- const Interval& ival = firstOil.intervals[i];
- verify(ival.isPoint());
- PointPrefix pfix;
+ // For each subsequent field...
+ for (size_t i = 1; i < fieldsToExplode; ++i) {
+ vector<PointPrefix> newPrefixForScans;
+ const OrderedIntervalList& oil = bounds.fields[i];
+ verify(oil.intervals.size() >= 1);
+ // For each point interval in that field (all ivals must be points)...
+ for (size_t j = 0; j < oil.intervals.size(); ++j) {
+ const Interval& ival = oil.intervals[j];
+ verify(ival.isPoint());
+ // Make a new scan by appending it to all scans in prefixForScans.
+ for (size_t k = 0; k < prefixForScans.size(); ++k) {
+ PointPrefix pfix = prefixForScans[k];
pfix.push_back(ival);
- prefixForScans.push_back(pfix);
+ newPrefixForScans.push_back(pfix);
}
-
- // For each subsequent field...
- for (size_t i = 1; i < fieldsToExplode; ++i) {
- vector<PointPrefix> newPrefixForScans;
- const OrderedIntervalList& oil = bounds.fields[i];
- verify(oil.intervals.size() >= 1);
- // For each point interval in that field (all ivals must be points)...
- for (size_t j = 0; j < oil.intervals.size(); ++j) {
- const Interval& ival = oil.intervals[j];
- verify(ival.isPoint());
- // Make a new scan by appending it to all scans in prefixForScans.
- for (size_t k = 0; k < prefixForScans.size(); ++k) {
- PointPrefix pfix = prefixForScans[k];
- pfix.push_back(ival);
- newPrefixForScans.push_back(pfix);
- }
- }
- // And update prefixForScans.
- newPrefixForScans.swap(prefixForScans);
- }
-
- prefixOut->swap(prefixForScans);
}
+ // And update prefixForScans.
+ newPrefixForScans.swap(prefixForScans);
+ }
- /**
- * Take the provided index scan node 'isn'. Returns a list of index scans which are
- * logically equivalent to 'isn' if joined by a MergeSort through the out-parameter
- * 'explosionResult'. These index scan instances are owned by the caller.
- *
- * fieldsToExplode is a count of how many fields in the scan's bounds are the union of point
- * intervals. This is computed beforehand and provided as a small optimization.
- *
- * Example:
- *
- * For the query find({a: {$in: [1,2]}}).sort({b: 1}) using the index {a:1, b:1}:
- * 'isn' will be scan with bounds a:[[1,1],[2,2]] & b: [MinKey, MaxKey]
- * 'sort' will be {b: 1}
- * 'fieldsToExplode' will be 1 (as only one field isUnionOfPoints).
- *
- * On return, 'explosionResult' will contain the following two scans:
- * a:[[1,1]], b:[MinKey, MaxKey]
- * a:[[2,2]], b:[MinKey, MaxKey]
- */
- void explodeScan(IndexScanNode* isn,
- const BSONObj& sort,
- size_t fieldsToExplode,
- vector<QuerySolutionNode*>* explosionResult) {
-
- // Turn the compact bounds in 'isn' into a bunch of points...
- vector<PointPrefix> prefixForScans;
- makeCartesianProduct(isn->bounds, fieldsToExplode, &prefixForScans);
-
- for (size_t i = 0; i < prefixForScans.size(); ++i) {
- const PointPrefix& prefix = prefixForScans[i];
- verify(prefix.size() == fieldsToExplode);
-
- // Copy boring fields into new child.
- IndexScanNode* child = new IndexScanNode();
- child->indexKeyPattern = isn->indexKeyPattern;
- child->direction = isn->direction;
- child->maxScan = isn->maxScan;
- child->addKeyMetadata = isn->addKeyMetadata;
- child->indexIsMultiKey = isn->indexIsMultiKey;
-
- // Copy the filter, if there is one.
- if (isn->filter.get()) {
- child->filter.reset(isn->filter->shallowClone());
- }
+ prefixOut->swap(prefixForScans);
+}
- // Create child bounds.
- child->bounds.fields.resize(isn->bounds.fields.size());
- for (size_t j = 0; j < fieldsToExplode; ++j) {
- child->bounds.fields[j].intervals.push_back(prefix[j]);
- child->bounds.fields[j].name = isn->bounds.fields[j].name;
- }
- for (size_t j = fieldsToExplode; j < isn->bounds.fields.size(); ++j) {
- child->bounds.fields[j] = isn->bounds.fields[j];
- }
- explosionResult->push_back(child);
- }
+/**
+ * Take the provided index scan node 'isn'. Returns a list of index scans which are
+ * logically equivalent to 'isn' if joined by a MergeSort through the out-parameter
+ * 'explosionResult'. These index scan instances are owned by the caller.
+ *
+ * fieldsToExplode is a count of how many fields in the scan's bounds are the union of point
+ * intervals. This is computed beforehand and provided as a small optimization.
+ *
+ * Example:
+ *
+ * For the query find({a: {$in: [1,2]}}).sort({b: 1}) using the index {a:1, b:1}:
+ * 'isn' will be scan with bounds a:[[1,1],[2,2]] & b: [MinKey, MaxKey]
+ * 'sort' will be {b: 1}
+ * 'fieldsToExplode' will be 1 (as only one field isUnionOfPoints).
+ *
+ * On return, 'explosionResult' will contain the following two scans:
+ * a:[[1,1]], b:[MinKey, MaxKey]
+ * a:[[2,2]], b:[MinKey, MaxKey]
+ */
+void explodeScan(IndexScanNode* isn,
+ const BSONObj& sort,
+ size_t fieldsToExplode,
+ vector<QuerySolutionNode*>* explosionResult) {
+ // Turn the compact bounds in 'isn' into a bunch of points...
+ vector<PointPrefix> prefixForScans;
+ makeCartesianProduct(isn->bounds, fieldsToExplode, &prefixForScans);
+
+ for (size_t i = 0; i < prefixForScans.size(); ++i) {
+ const PointPrefix& prefix = prefixForScans[i];
+ verify(prefix.size() == fieldsToExplode);
+
+ // Copy boring fields into new child.
+ IndexScanNode* child = new IndexScanNode();
+ child->indexKeyPattern = isn->indexKeyPattern;
+ child->direction = isn->direction;
+ child->maxScan = isn->maxScan;
+ child->addKeyMetadata = isn->addKeyMetadata;
+ child->indexIsMultiKey = isn->indexIsMultiKey;
+
+ // Copy the filter, if there is one.
+ if (isn->filter.get()) {
+ child->filter.reset(isn->filter->shallowClone());
}
- /**
- * In the tree '*root', replace 'oldNode' with 'newNode'.
- */
- void replaceNodeInTree(QuerySolutionNode** root,
- QuerySolutionNode* oldNode,
- QuerySolutionNode* newNode) {
- if (*root == oldNode) {
- *root = newNode;
- }
- else {
- for (size_t i = 0 ; i < (*root)->children.size(); ++i) {
- replaceNodeInTree(&(*root)->children[i], oldNode, newNode);
- }
- }
+ // Create child bounds.
+ child->bounds.fields.resize(isn->bounds.fields.size());
+ for (size_t j = 0; j < fieldsToExplode; ++j) {
+ child->bounds.fields[j].intervals.push_back(prefix[j]);
+ child->bounds.fields[j].name = isn->bounds.fields[j].name;
}
-
- bool hasNode(QuerySolutionNode* root, StageType type) {
- if (type == root->getType()) {
- return true;
- }
-
- for (size_t i = 0; i < root->children.size(); ++i) {
- if (hasNode(root->children[i], type)) {
- return true;
- }
- }
-
- return false;
+ for (size_t j = fieldsToExplode; j < isn->bounds.fields.size(); ++j) {
+ child->bounds.fields[j] = isn->bounds.fields[j];
}
+ explosionResult->push_back(child);
+ }
+}
- } // namespace
-
- // static
- BSONObj QueryPlannerAnalysis::getSortPattern(const BSONObj& indexKeyPattern) {
- BSONObjBuilder sortBob;
- BSONObjIterator kpIt(indexKeyPattern);
- while (kpIt.more()) {
- BSONElement elt = kpIt.next();
- if (elt.type() == mongo::String) {
- break;
- }
- long long val = elt.safeNumberLong();
- int sortOrder = val >= 0 ? 1 : -1;
- sortBob.append(elt.fieldName(), sortOrder);
+/**
+ * In the tree '*root', replace 'oldNode' with 'newNode'.
+ */
+void replaceNodeInTree(QuerySolutionNode** root,
+ QuerySolutionNode* oldNode,
+ QuerySolutionNode* newNode) {
+ if (*root == oldNode) {
+ *root = newNode;
+ } else {
+ for (size_t i = 0; i < (*root)->children.size(); ++i) {
+ replaceNodeInTree(&(*root)->children[i], oldNode, newNode);
}
- return sortBob.obj();
}
+}
- // static
- bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode** solnRoot) {
- vector<QuerySolutionNode*> leafNodes;
+bool hasNode(QuerySolutionNode* root, StageType type) {
+ if (type == root->getType()) {
+ return true;
+ }
- QuerySolutionNode* toReplace;
- if (!structureOKForExplode(*solnRoot, &toReplace)) {
- return false;
+ for (size_t i = 0; i < root->children.size(); ++i) {
+ if (hasNode(root->children[i], type)) {
+ return true;
}
+ }
- getLeafNodes(*solnRoot, &leafNodes);
+ return false;
+}
- const BSONObj& desiredSort = query.getParsed().getSort();
+} // namespace
- // How many scan leaves will result from our expansion?
- size_t totalNumScans = 0;
+// static
+BSONObj QueryPlannerAnalysis::getSortPattern(const BSONObj& indexKeyPattern) {
+ BSONObjBuilder sortBob;
+ BSONObjIterator kpIt(indexKeyPattern);
+ while (kpIt.more()) {
+ BSONElement elt = kpIt.next();
+ if (elt.type() == mongo::String) {
+ break;
+ }
+ long long val = elt.safeNumberLong();
+ int sortOrder = val >= 0 ? 1 : -1;
+ sortBob.append(elt.fieldName(), sortOrder);
+ }
+ return sortBob.obj();
+}
+
+// static
+bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode** solnRoot) {
+ vector<QuerySolutionNode*> leafNodes;
+
+ QuerySolutionNode* toReplace;
+ if (!structureOKForExplode(*solnRoot, &toReplace)) {
+ return false;
+ }
- // The value of entry i is how many scans we want to blow up for leafNodes[i].
- // We calculate this in the loop below and might as well reuse it if we blow up
- // that scan.
- vector<size_t> fieldsToExplode;
+ getLeafNodes(*solnRoot, &leafNodes);
- // The sort order we're looking for has to possibly be provided by each of the index scans
- // upon explosion.
- for (size_t i = 0; i < leafNodes.size(); ++i) {
- // We can do this because structureOKForExplode is only true if the leaves are index
- // scans.
- IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
- const IndexBounds& bounds = isn->bounds;
+ const BSONObj& desiredSort = query.getParsed().getSort();
- // Not a point interval prefix, can't try to rewrite.
- if (bounds.isSimpleRange) {
- return false;
- }
+ // How many scan leaves will result from our expansion?
+ size_t totalNumScans = 0;
- // How many scans will we create if we blow up this ixscan?
- size_t numScans = 1;
+ // The value of entry i is how many scans we want to blow up for leafNodes[i].
+ // We calculate this in the loop below and might as well reuse it if we blow up
+ // that scan.
+ vector<size_t> fieldsToExplode;
- // Skip every field that is a union of point intervals and build the resulting sort
- // order from the remaining fields.
- BSONObjIterator kpIt(isn->indexKeyPattern);
- size_t boundsIdx = 0;
- while (kpIt.more()) {
- const OrderedIntervalList& oil = bounds.fields[boundsIdx];
- if (!isUnionOfPoints(oil)) {
- break;
- }
- numScans *= oil.intervals.size();
- kpIt.next();
- ++boundsIdx;
- }
-
- // There's no sort order left to gain by exploding. Just go home. TODO: verify nothing
- // clever we can do here.
- if (!kpIt.more()) {
- return false;
- }
+ // The sort order we're looking for has to possibly be provided by each of the index scans
+ // upon explosion.
+ for (size_t i = 0; i < leafNodes.size(); ++i) {
+ // We can do this because structureOKForExplode is only true if the leaves are index
+ // scans.
+ IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
+ const IndexBounds& bounds = isn->bounds;
- // Only explode if there's at least one field to explode for this scan.
- if (0 == boundsIdx) {
- return false;
- }
+ // Not a point interval prefix, can't try to rewrite.
+ if (bounds.isSimpleRange) {
+ return false;
+ }
- // The rest of the fields define the sort order we could obtain by exploding
- // the bounds.
- BSONObjBuilder resultingSortBob;
- while (kpIt.more()) {
- resultingSortBob.append(kpIt.next());
- }
+ // How many scans will we create if we blow up this ixscan?
+ size_t numScans = 1;
- // See if it's the order we're looking for.
- BSONObj possibleSort = resultingSortBob.obj();
- if (!desiredSort.isPrefixOf(possibleSort)) {
- // We can't get the sort order from the index scan. See if we can
- // get the sort by reversing the scan.
- BSONObj reversePossibleSort = QueryPlannerCommon::reverseSortObj(possibleSort);
- if (!desiredSort.isPrefixOf(reversePossibleSort)) {
- // Can't get the sort order from the reversed index scan either. Give up.
- return false;
- }
- else {
- // We can get the sort order we need if we reverse the scan.
- QueryPlannerCommon::reverseScans(isn);
- }
+ // Skip every field that is a union of point intervals and build the resulting sort
+ // order from the remaining fields.
+ BSONObjIterator kpIt(isn->indexKeyPattern);
+ size_t boundsIdx = 0;
+ while (kpIt.more()) {
+ const OrderedIntervalList& oil = bounds.fields[boundsIdx];
+ if (!isUnionOfPoints(oil)) {
+ break;
}
+ numScans *= oil.intervals.size();
+ kpIt.next();
+ ++boundsIdx;
+ }
- // Do some bookkeeping to see how many ixscans we'll create total.
- totalNumScans += numScans;
-
- // And for this scan how many fields we expand.
- fieldsToExplode.push_back(boundsIdx);
+ // There's no sort order left to gain by exploding. Just go home. TODO: verify nothing
+ // clever we can do here.
+ if (!kpIt.more()) {
+ return false;
}
- // Too many ixscans spoil the performance.
- if (totalNumScans > (size_t)internalQueryMaxScansToExplode) {
- LOG(5) << "Could expand ixscans to pull out sort order but resulting scan count"
- << "(" << totalNumScans << ") is too high.";
+ // Only explode if there's at least one field to explode for this scan.
+ if (0 == boundsIdx) {
return false;
}
- // If we're here, we can (probably? depends on how restrictive the structure check is)
- // get our sort order via ixscan blow-up.
- MergeSortNode* merge = new MergeSortNode();
- merge->sort = desiredSort;
- for (size_t i = 0; i < leafNodes.size(); ++i) {
- IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
- explodeScan(isn, desiredSort, fieldsToExplode[i], &merge->children);
+ // The rest of the fields define the sort order we could obtain by exploding
+ // the bounds.
+ BSONObjBuilder resultingSortBob;
+ while (kpIt.more()) {
+ resultingSortBob.append(kpIt.next());
+ }
+
+ // See if it's the order we're looking for.
+ BSONObj possibleSort = resultingSortBob.obj();
+ if (!desiredSort.isPrefixOf(possibleSort)) {
+ // We can't get the sort order from the index scan. See if we can
+ // get the sort by reversing the scan.
+ BSONObj reversePossibleSort = QueryPlannerCommon::reverseSortObj(possibleSort);
+ if (!desiredSort.isPrefixOf(reversePossibleSort)) {
+ // Can't get the sort order from the reversed index scan either. Give up.
+ return false;
+ } else {
+ // We can get the sort order we need if we reverse the scan.
+ QueryPlannerCommon::reverseScans(isn);
+ }
}
- merge->computeProperties();
+ // Do some bookkeeping to see how many ixscans we'll create total.
+ totalNumScans += numScans;
- // Replace 'toReplace' with the new merge sort node.
- replaceNodeInTree(solnRoot, toReplace, merge);
- // And get rid of the node that got replaced.
- delete toReplace;
+ // And for this scan how many fields we expand.
+ fieldsToExplode.push_back(boundsIdx);
+ }
- return true;
+ // Too many ixscans spoil the performance.
+ if (totalNumScans > (size_t)internalQueryMaxScansToExplode) {
+ LOG(5) << "Could expand ixscans to pull out sort order but resulting scan count"
+ << "(" << totalNumScans << ") is too high.";
+ return false;
}
- // static
- QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode* solnRoot,
- bool* blockingSortOut) {
- *blockingSortOut = false;
+ // If we're here, we can (probably? depends on how restrictive the structure check is)
+ // get our sort order via ixscan blow-up.
+ MergeSortNode* merge = new MergeSortNode();
+ merge->sort = desiredSort;
+ for (size_t i = 0; i < leafNodes.size(); ++i) {
+ IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
+ explodeScan(isn, desiredSort, fieldsToExplode[i], &merge->children);
+ }
- const LiteParsedQuery& lpq = query.getParsed();
- const BSONObj& sortObj = lpq.getSort();
+ merge->computeProperties();
- if (sortObj.isEmpty()) {
- return solnRoot;
- }
+ // Replace 'toReplace' with the new merge sort node.
+ replaceNodeInTree(solnRoot, toReplace, merge);
+ // And get rid of the node that got replaced.
+ delete toReplace;
- // TODO: We could check sortObj for any projections other than :1 and :-1
- // and short-cut some of this.
+ return true;
+}
- // If the sort is $natural, we ignore it, assuming that the caller has detected that and
- // outputted a collscan to satisfy the desired order.
- BSONElement natural = sortObj.getFieldDotted("$natural");
- if (!natural.eoo()) {
- return solnRoot;
- }
+// static
+QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode* solnRoot,
+ bool* blockingSortOut) {
+ *blockingSortOut = false;
- // See if solnRoot gives us the sort. If so, we're done.
- BSONObjSet sorts = solnRoot->getSort();
+ const LiteParsedQuery& lpq = query.getParsed();
+ const BSONObj& sortObj = lpq.getSort();
- // If the sort we want is in the set of sort orders provided already, bail out.
- if (sorts.end() != sorts.find(sortObj)) {
- return solnRoot;
- }
+ if (sortObj.isEmpty()) {
+ return solnRoot;
+ }
- // Sort is not provided. See if we provide the reverse of our sort pattern.
- // If so, we can reverse the scan direction(s).
- BSONObj reverseSort = QueryPlannerCommon::reverseSortObj(sortObj);
- if (sorts.end() != sorts.find(reverseSort)) {
- QueryPlannerCommon::reverseScans(solnRoot);
- LOG(5) << "Reversing ixscan to provide sort. Result: "
- << solnRoot->toString() << endl;
- return solnRoot;
- }
+ // TODO: We could check sortObj for any projections other than :1 and :-1
+ // and short-cut some of this.
- // Sort not provided, can't reverse scans to get the sort. One last trick: We can "explode"
- // index scans over point intervals to an OR of sub-scans in order to pull out a sort.
- // Let's try this.
- if (explodeForSort(query, params, &solnRoot)) {
- return solnRoot;
- }
+ // If the sort is $natural, we ignore it, assuming that the caller has detected that and
+ // outputted a collscan to satisfy the desired order.
+ BSONElement natural = sortObj.getFieldDotted("$natural");
+ if (!natural.eoo()) {
+ return solnRoot;
+ }
- // If we're here, we need to add a sort stage.
+ // See if solnRoot gives us the sort. If so, we're done.
+ BSONObjSet sorts = solnRoot->getSort();
- // If we're not allowed to put a blocking sort in, bail out.
- if (params.options & QueryPlannerParams::NO_BLOCKING_SORT) {
- delete solnRoot;
- return NULL;
- }
+ // If the sort we want is in the set of sort orders provided already, bail out.
+ if (sorts.end() != sorts.find(sortObj)) {
+ return solnRoot;
+ }
- // Add a fetch stage so we have the full object when we hit the sort stage. TODO: Can we
- // pull the values that we sort by out of the key and if so in what cases? Perhaps we can
- // avoid a fetch.
- if (!solnRoot->fetched()) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
- }
+ // Sort is not provided. See if we provide the reverse of our sort pattern.
+ // If so, we can reverse the scan direction(s).
+ BSONObj reverseSort = QueryPlannerCommon::reverseSortObj(sortObj);
+ if (sorts.end() != sorts.find(reverseSort)) {
+ QueryPlannerCommon::reverseScans(solnRoot);
+ LOG(5) << "Reversing ixscan to provide sort. Result: " << solnRoot->toString() << endl;
+ return solnRoot;
+ }
- // And build the full sort stage.
- SortNode* sort = new SortNode();
- sort->pattern = sortObj;
- sort->query = lpq.getFilter();
- sort->children.push_back(solnRoot);
- solnRoot = sort;
- // When setting the limit on the sort, we need to consider both
- // the limit N and skip count M. The sort should return an ordered list
- // N + M items so that the skip stage can discard the first M results.
- if (lpq.getLimit()) {
- // We have a true limit. The limit can be combined with the SORT stage.
- sort->limit = static_cast<size_t>(*lpq.getLimit()) + static_cast<size_t>(lpq.getSkip());
- }
- else if (!lpq.isFromFindCommand() && lpq.getBatchSize()) {
- // We have an ntoreturn specified by an OP_QUERY style find. This is used
- // by clients to mean both batchSize and limit.
- //
- // Overflow here would be bad and could cause a nonsense limit. Cast
- // skip and limit values to unsigned ints to make sure that the
- // sum is never stored as signed. (See SERVER-13537).
- sort->limit = static_cast<size_t>(*lpq.getBatchSize()) +
- static_cast<size_t>(lpq.getSkip());
-
- // This is a SORT with a limit. The wire protocol has a single quantity
- // called "numToReturn" which could mean either limit or batchSize.
- // We have no idea what the client intended. One way to handle the ambiguity
- // of a limited OR stage is to use the SPLIT_LIMITED_SORT hack.
- //
- // If wantMore is false (meaning that 'ntoreturn' was initially passed to
- // the server as a negative value), then we treat numToReturn as a limit.
- // Since there is no limit-batchSize ambiguity in this case, we do not use the
- // SPLIT_LIMITED_SORT hack.
- //
- // If numToReturn is really a limit, then we want to add a limit to this
- // SORT stage, and hence perform a topK.
- //
- // If numToReturn is really a batchSize, then we want to perform a regular
- // blocking sort.
- //
- // Since we don't know which to use, just join the two options with an OR,
- // with the topK first. If the client wants a limit, they'll get the efficiency
- // of topK. If they want a batchSize, the other OR branch will deliver the missing
- // results. The OR stage handles deduping.
- if (lpq.wantMore()
- && params.options & QueryPlannerParams::SPLIT_LIMITED_SORT
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
- // If we're here then the SPLIT_LIMITED_SORT hack is turned on,
- // and the query is of a type that allows the hack.
- //
- // Not allowed for geo or text, because we assume elsewhere that those
- // stages appear just once.
- OrNode* orn = new OrNode();
- orn->children.push_back(sort);
- SortNode* sortClone = static_cast<SortNode*>(sort->clone());
- sortClone->limit = 0;
- orn->children.push_back(sortClone);
- solnRoot = orn;
- }
- }
- else {
- sort->limit = 0;
- }
+ // Sort not provided, can't reverse scans to get the sort. One last trick: We can "explode"
+ // index scans over point intervals to an OR of sub-scans in order to pull out a sort.
+ // Let's try this.
+ if (explodeForSort(query, params, &solnRoot)) {
+ return solnRoot;
+ }
- *blockingSortOut = true;
+ // If we're here, we need to add a sort stage.
- return solnRoot;
+ // If we're not allowed to put a blocking sort in, bail out.
+ if (params.options & QueryPlannerParams::NO_BLOCKING_SORT) {
+ delete solnRoot;
+ return NULL;
}
- // static
- QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode* solnRoot) {
- unique_ptr<QuerySolution> soln(new QuerySolution());
- soln->filterData = query.getQueryObj();
- soln->indexFilterApplied = params.indexFiltersApplied;
+ // Add a fetch stage so we have the full object when we hit the sort stage. TODO: Can we
+ // pull the values that we sort by out of the key and if so in what cases? Perhaps we can
+ // avoid a fetch.
+ if (!solnRoot->fetched()) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
+ }
- solnRoot->computeProperties();
+ // And build the full sort stage.
+ SortNode* sort = new SortNode();
+ sort->pattern = sortObj;
+ sort->query = lpq.getFilter();
+ sort->children.push_back(solnRoot);
+ solnRoot = sort;
+ // When setting the limit on the sort, we need to consider both
+ // the limit N and skip count M. The sort should return an ordered list
+ // N + M items so that the skip stage can discard the first M results.
+ if (lpq.getLimit()) {
+ // We have a true limit. The limit can be combined with the SORT stage.
+ sort->limit = static_cast<size_t>(*lpq.getLimit()) + static_cast<size_t>(lpq.getSkip());
+ } else if (!lpq.isFromFindCommand() && lpq.getBatchSize()) {
+ // We have an ntoreturn specified by an OP_QUERY style find. This is used
+ // by clients to mean both batchSize and limit.
+ //
+ // Overflow here would be bad and could cause a nonsense limit. Cast
+ // skip and limit values to unsigned ints to make sure that the
+ // sum is never stored as signed. (See SERVER-13537).
+ sort->limit = static_cast<size_t>(*lpq.getBatchSize()) + static_cast<size_t>(lpq.getSkip());
+
+ // This is a SORT with a limit. The wire protocol has a single quantity
+ // called "numToReturn" which could mean either limit or batchSize.
+ // We have no idea what the client intended. One way to handle the ambiguity
+ // of a limited OR stage is to use the SPLIT_LIMITED_SORT hack.
+ //
+ // If wantMore is false (meaning that 'ntoreturn' was initially passed to
+ // the server as a negative value), then we treat numToReturn as a limit.
+ // Since there is no limit-batchSize ambiguity in this case, we do not use the
+ // SPLIT_LIMITED_SORT hack.
+ //
+ // If numToReturn is really a limit, then we want to add a limit to this
+ // SORT stage, and hence perform a topK.
+ //
+ // If numToReturn is really a batchSize, then we want to perform a regular
+ // blocking sort.
+ //
+ // Since we don't know which to use, just join the two options with an OR,
+ // with the topK first. If the client wants a limit, they'll get the efficiency
+ // of topK. If they want a batchSize, the other OR branch will deliver the missing
+ // results. The OR stage handles deduping.
+ if (lpq.wantMore() && params.options & QueryPlannerParams::SPLIT_LIMITED_SORT &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
+ // If we're here then the SPLIT_LIMITED_SORT hack is turned on,
+ // and the query is of a type that allows the hack.
+ //
+ // Not allowed for geo or text, because we assume elsewhere that those
+ // stages appear just once.
+ OrNode* orn = new OrNode();
+ orn->children.push_back(sort);
+ SortNode* sortClone = static_cast<SortNode*>(sort->clone());
+ sortClone->limit = 0;
+ orn->children.push_back(sortClone);
+ solnRoot = orn;
+ }
+ } else {
+ sort->limit = 0;
+ }
- // solnRoot finds all our results. Let's see what transformations we must perform to the
- // data.
+ *blockingSortOut = true;
- // If we're answering a query on a sharded system, we need to drop documents that aren't
- // logically part of our shard.
- if (params.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ return solnRoot;
+}
- if (!solnRoot->fetched()) {
+// static
+QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode* solnRoot) {
+ unique_ptr<QuerySolution> soln(new QuerySolution());
+ soln->filterData = query.getQueryObj();
+ soln->indexFilterApplied = params.indexFiltersApplied;
- // See if we need to fetch information for our shard key.
- // NOTE: Solution nodes only list ordinary, non-transformed index keys for now
+ solnRoot->computeProperties();
- bool fetch = false;
- BSONObjIterator it(params.shardKey);
- while (it.more()) {
- BSONElement nextEl = it.next();
- if (!solnRoot->hasField(nextEl.fieldName())) {
- fetch = true;
- break;
- }
- }
+ // solnRoot finds all our results. Let's see what transformations we must perform to the
+ // data.
- if (fetch) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
+ // If we're answering a query on a sharded system, we need to drop documents that aren't
+ // logically part of our shard.
+ if (params.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ if (!solnRoot->fetched()) {
+ // See if we need to fetch information for our shard key.
+ // NOTE: Solution nodes only list ordinary, non-transformed index keys for now
+
+ bool fetch = false;
+ BSONObjIterator it(params.shardKey);
+ while (it.more()) {
+ BSONElement nextEl = it.next();
+ if (!solnRoot->hasField(nextEl.fieldName())) {
+ fetch = true;
+ break;
}
}
- ShardingFilterNode* sfn = new ShardingFilterNode();
- sfn->children.push_back(solnRoot);
- solnRoot = sfn;
+ if (fetch) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
+ }
}
- bool hasSortStage = false;
- solnRoot = analyzeSort(query, params, solnRoot, &hasSortStage);
+ ShardingFilterNode* sfn = new ShardingFilterNode();
+ sfn->children.push_back(solnRoot);
+ solnRoot = sfn;
+ }
- // This can happen if we need to create a blocking sort stage and we're not allowed to.
- if (NULL == solnRoot) { return NULL; }
+ bool hasSortStage = false;
+ solnRoot = analyzeSort(query, params, solnRoot, &hasSortStage);
- // A solution can be blocking if it has a blocking sort stage or
- // a hashed AND stage.
- bool hasAndHashStage = hasNode(solnRoot, STAGE_AND_HASH);
- soln->hasBlockingStage = hasSortStage || hasAndHashStage;
+ // This can happen if we need to create a blocking sort stage and we're not allowed to.
+ if (NULL == solnRoot) {
+ return NULL;
+ }
- const LiteParsedQuery& lpq = query.getParsed();
+ // A solution can be blocking if it has a blocking sort stage or
+ // a hashed AND stage.
+ bool hasAndHashStage = hasNode(solnRoot, STAGE_AND_HASH);
+ soln->hasBlockingStage = hasSortStage || hasAndHashStage;
- // If we can (and should), add the keep mutations stage.
+ const LiteParsedQuery& lpq = query.getParsed();
- // We cannot keep mutated documents if:
- //
- // 1. The query requires an index to evaluate the predicate ($text). We can't tell whether
- // or not the doc actually satisfies the $text predicate since we can't evaluate a
- // text MatchExpression.
- //
- // 2. The query implies a sort ($geoNear). It would be rather expensive and hacky to merge
- // the document at the right place.
- //
- // 3. There is an index-provided sort. Ditto above comment about merging.
- //
- // TODO: do we want some kind of pre-planning step where we look for certain nodes and cache
- // them? We do lookups in the tree a few times. This may not matter as most trees are
- // shallow in terms of query nodes.
- bool cannotKeepFlagged = hasNode(solnRoot, STAGE_TEXT)
- || hasNode(solnRoot, STAGE_GEO_NEAR_2D)
- || hasNode(solnRoot, STAGE_GEO_NEAR_2DSPHERE)
- || (!lpq.getSort().isEmpty() && !hasSortStage);
-
- // Only these stages can produce flagged results. A stage has to hold state past one call
- // to work(...) in order to possibly flag a result.
- bool couldProduceFlagged = hasAndHashStage
- || hasNode(solnRoot, STAGE_AND_SORTED)
- || hasNode(solnRoot, STAGE_FETCH);
-
- bool shouldAddMutation = !cannotKeepFlagged && couldProduceFlagged;
-
- if (shouldAddMutation && (params.options & QueryPlannerParams::KEEP_MUTATIONS)) {
- KeepMutationsNode* keep = new KeepMutationsNode();
-
- // We must run the entire expression tree to make sure the document is still valid.
- keep->filter.reset(query.root()->shallowClone());
-
- if (STAGE_SORT == solnRoot->getType()) {
- // We want to insert the invalidated results before the sort stage, if there is one.
- verify(1 == solnRoot->children.size());
- keep->children.push_back(solnRoot->children[0]);
- solnRoot->children[0] = keep;
- }
- else {
- keep->children.push_back(solnRoot);
- solnRoot = keep;
- }
+ // If we can (and should), add the keep mutations stage.
+
+ // We cannot keep mutated documents if:
+ //
+ // 1. The query requires an index to evaluate the predicate ($text). We can't tell whether
+ // or not the doc actually satisfies the $text predicate since we can't evaluate a
+ // text MatchExpression.
+ //
+ // 2. The query implies a sort ($geoNear). It would be rather expensive and hacky to merge
+ // the document at the right place.
+ //
+ // 3. There is an index-provided sort. Ditto above comment about merging.
+ //
+ // TODO: do we want some kind of pre-planning step where we look for certain nodes and cache
+ // them? We do lookups in the tree a few times. This may not matter as most trees are
+ // shallow in terms of query nodes.
+ bool cannotKeepFlagged = hasNode(solnRoot, STAGE_TEXT) ||
+ hasNode(solnRoot, STAGE_GEO_NEAR_2D) || hasNode(solnRoot, STAGE_GEO_NEAR_2DSPHERE) ||
+ (!lpq.getSort().isEmpty() && !hasSortStage);
+
+ // Only these stages can produce flagged results. A stage has to hold state past one call
+ // to work(...) in order to possibly flag a result.
+ bool couldProduceFlagged =
+ hasAndHashStage || hasNode(solnRoot, STAGE_AND_SORTED) || hasNode(solnRoot, STAGE_FETCH);
+
+ bool shouldAddMutation = !cannotKeepFlagged && couldProduceFlagged;
+
+ if (shouldAddMutation && (params.options & QueryPlannerParams::KEEP_MUTATIONS)) {
+ KeepMutationsNode* keep = new KeepMutationsNode();
+
+ // We must run the entire expression tree to make sure the document is still valid.
+ keep->filter.reset(query.root()->shallowClone());
+
+ if (STAGE_SORT == solnRoot->getType()) {
+ // We want to insert the invalidated results before the sort stage, if there is one.
+ verify(1 == solnRoot->children.size());
+ keep->children.push_back(solnRoot->children[0]);
+ solnRoot->children[0] = keep;
+ } else {
+ keep->children.push_back(solnRoot);
+ solnRoot = keep;
}
+ }
- // Project the results.
- if (NULL != query.getProj()) {
- LOG(5) << "PROJECTION: fetched status: " << solnRoot->fetched() << endl;
- LOG(5) << "PROJECTION: Current plan is:\n" << solnRoot->toString() << endl;
-
- ProjectionNode::ProjectionType projType = ProjectionNode::DEFAULT;
- BSONObj coveredKeyObj;
-
- if (query.getProj()->requiresDocument()) {
- LOG(5) << "PROJECTION: claims to require doc adding fetch.\n";
- // If the projection requires the entire document, somebody must fetch.
- if (!solnRoot->fetched()) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
- }
+ // Project the results.
+ if (NULL != query.getProj()) {
+ LOG(5) << "PROJECTION: fetched status: " << solnRoot->fetched() << endl;
+ LOG(5) << "PROJECTION: Current plan is:\n" << solnRoot->toString() << endl;
+
+ ProjectionNode::ProjectionType projType = ProjectionNode::DEFAULT;
+ BSONObj coveredKeyObj;
+
+ if (query.getProj()->requiresDocument()) {
+ LOG(5) << "PROJECTION: claims to require doc adding fetch.\n";
+ // If the projection requires the entire document, somebody must fetch.
+ if (!solnRoot->fetched()) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
}
- else if (!query.getProj()->wantIndexKey()) {
- // The only way we're here is if it's a simple projection. That is, we can pick out
- // the fields we want to include and they're not dotted. So we want to execute the
- // projection in the fast-path simple fashion. Just don't know which fast path yet.
- LOG(5) << "PROJECTION: requires fields\n";
- const vector<string>& fields = query.getProj()->getRequiredFields();
- bool covered = true;
- for (size_t i = 0; i < fields.size(); ++i) {
- if (!solnRoot->hasField(fields[i])) {
- LOG(5) << "PROJECTION: not covered due to field "
- << fields[i] << endl;
- covered = false;
- break;
- }
+ } else if (!query.getProj()->wantIndexKey()) {
+ // The only way we're here is if it's a simple projection. That is, we can pick out
+ // the fields we want to include and they're not dotted. So we want to execute the
+ // projection in the fast-path simple fashion. Just don't know which fast path yet.
+ LOG(5) << "PROJECTION: requires fields\n";
+ const vector<string>& fields = query.getProj()->getRequiredFields();
+ bool covered = true;
+ for (size_t i = 0; i < fields.size(); ++i) {
+ if (!solnRoot->hasField(fields[i])) {
+ LOG(5) << "PROJECTION: not covered due to field " << fields[i] << endl;
+ covered = false;
+ break;
}
+ }
- LOG(5) << "PROJECTION: is covered?: = " << covered << endl;
+ LOG(5) << "PROJECTION: is covered?: = " << covered << endl;
- // If any field is missing from the list of fields the projection wants,
- // a fetch is required.
- if (!covered) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
+ // If any field is missing from the list of fields the projection wants,
+ // a fetch is required.
+ if (!covered) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
- // It's simple but we'll have the full document and we should just iterate
- // over that.
+ // It's simple but we'll have the full document and we should just iterate
+ // over that.
+ projType = ProjectionNode::SIMPLE_DOC;
+ LOG(5) << "PROJECTION: not covered, fetching.";
+ } else {
+ if (solnRoot->fetched()) {
+ // Fetched implies hasObj() so let's run with that.
projType = ProjectionNode::SIMPLE_DOC;
- LOG(5) << "PROJECTION: not covered, fetching.";
- }
- else {
- if (solnRoot->fetched()) {
- // Fetched implies hasObj() so let's run with that.
- projType = ProjectionNode::SIMPLE_DOC;
- LOG(5) << "PROJECTION: covered via FETCH, using SIMPLE_DOC fast path";
- }
- else {
- // If we're here we're not fetched so we're covered. Let's see if we can
- // get out of using the default projType. If there's only one leaf
- // underneath and it's giving us index data we can use the faster covered
- // impl.
- vector<QuerySolutionNode*> leafNodes;
- getLeafNodes(solnRoot, &leafNodes);
-
- if (1 == leafNodes.size()) {
- // Both the IXSCAN and DISTINCT stages provide covered key data.
- if (STAGE_IXSCAN == leafNodes[0]->getType()) {
- projType = ProjectionNode::COVERED_ONE_INDEX;
- IndexScanNode* ixn = static_cast<IndexScanNode*>(leafNodes[0]);
- coveredKeyObj = ixn->indexKeyPattern;
- LOG(5) << "PROJECTION: covered via IXSCAN, using COVERED fast path";
- }
- else if (STAGE_DISTINCT_SCAN == leafNodes[0]->getType()) {
- projType = ProjectionNode::COVERED_ONE_INDEX;
- DistinctNode* dn = static_cast<DistinctNode*>(leafNodes[0]);
- coveredKeyObj = dn->indexKeyPattern;
- LOG(5) << "PROJECTION: covered via DISTINCT, using COVERED fast path";
- }
+ LOG(5) << "PROJECTION: covered via FETCH, using SIMPLE_DOC fast path";
+ } else {
+ // If we're here we're not fetched so we're covered. Let's see if we can
+ // get out of using the default projType. If there's only one leaf
+ // underneath and it's giving us index data we can use the faster covered
+ // impl.
+ vector<QuerySolutionNode*> leafNodes;
+ getLeafNodes(solnRoot, &leafNodes);
+
+ if (1 == leafNodes.size()) {
+ // Both the IXSCAN and DISTINCT stages provide covered key data.
+ if (STAGE_IXSCAN == leafNodes[0]->getType()) {
+ projType = ProjectionNode::COVERED_ONE_INDEX;
+ IndexScanNode* ixn = static_cast<IndexScanNode*>(leafNodes[0]);
+ coveredKeyObj = ixn->indexKeyPattern;
+ LOG(5) << "PROJECTION: covered via IXSCAN, using COVERED fast path";
+ } else if (STAGE_DISTINCT_SCAN == leafNodes[0]->getType()) {
+ projType = ProjectionNode::COVERED_ONE_INDEX;
+ DistinctNode* dn = static_cast<DistinctNode*>(leafNodes[0]);
+ coveredKeyObj = dn->indexKeyPattern;
+ LOG(5) << "PROJECTION: covered via DISTINCT, using COVERED fast path";
}
}
}
}
-
- // We now know we have whatever data is required for the projection.
- ProjectionNode* projNode = new ProjectionNode();
- projNode->children.push_back(solnRoot);
- projNode->fullExpression = query.root();
- projNode->projection = lpq.getProj();
- projNode->projType = projType;
- projNode->coveredKeyObj = coveredKeyObj;
- solnRoot = projNode;
- }
- else {
- // If there's no projection, we must fetch, as the user wants the entire doc.
- if (!solnRoot->fetched()) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
- }
}
- if (0 != lpq.getSkip()) {
- SkipNode* skip = new SkipNode();
- skip->skip = lpq.getSkip();
- skip->children.push_back(solnRoot);
- solnRoot = skip;
+ // We now know we have whatever data is required for the projection.
+ ProjectionNode* projNode = new ProjectionNode();
+ projNode->children.push_back(solnRoot);
+ projNode->fullExpression = query.root();
+ projNode->projection = lpq.getProj();
+ projNode->projType = projType;
+ projNode->coveredKeyObj = coveredKeyObj;
+ solnRoot = projNode;
+ } else {
+ // If there's no projection, we must fetch, as the user wants the entire doc.
+ if (!solnRoot->fetched()) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
}
+ }
- // When there is both a blocking sort and a limit, the limit will
- // be enforced by the blocking sort.
- // Otherwise, we need to limit the results in the case of a hard limit
- // (ie. limit in raw query is negative)
- if (!hasSortStage) {
- // We don't have a sort stage. This means that, if there is a limit, we will have
- // to enforce it ourselves since it's not handled inside SORT.
- if (lpq.getLimit()) {
- LimitNode* limit = new LimitNode();
- limit->limit = *lpq.getLimit();
- limit->children.push_back(solnRoot);
- solnRoot = limit;
- }
- else if (!lpq.isFromFindCommand() && lpq.getBatchSize() && !lpq.wantMore()) {
- // We have a "legacy limit", i.e. a negative ntoreturn value from an OP_QUERY style
- // find.
- LimitNode* limit = new LimitNode();
- limit->limit = *lpq.getBatchSize();
- limit->children.push_back(solnRoot);
- solnRoot = limit;
- }
- }
+ if (0 != lpq.getSkip()) {
+ SkipNode* skip = new SkipNode();
+ skip->skip = lpq.getSkip();
+ skip->children.push_back(solnRoot);
+ solnRoot = skip;
+ }
- soln->root.reset(solnRoot);
- return soln.release();
+ // When there is both a blocking sort and a limit, the limit will
+ // be enforced by the blocking sort.
+ // Otherwise, we need to limit the results in the case of a hard limit
+ // (ie. limit in raw query is negative)
+ if (!hasSortStage) {
+ // We don't have a sort stage. This means that, if there is a limit, we will have
+ // to enforce it ourselves since it's not handled inside SORT.
+ if (lpq.getLimit()) {
+ LimitNode* limit = new LimitNode();
+ limit->limit = *lpq.getLimit();
+ limit->children.push_back(solnRoot);
+ solnRoot = limit;
+ } else if (!lpq.isFromFindCommand() && lpq.getBatchSize() && !lpq.wantMore()) {
+ // We have a "legacy limit", i.e. a negative ntoreturn value from an OP_QUERY style
+ // find.
+ LimitNode* limit = new LimitNode();
+ limit->limit = *lpq.getBatchSize();
+ limit->children.push_back(solnRoot);
+ solnRoot = limit;
+ }
}
+ soln->root.reset(solnRoot);
+ return soln.release();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/planner_analysis.h b/src/mongo/db/query/planner_analysis.h
index b7591bb31b5..6cbb48df136 100644
--- a/src/mongo/db/query/planner_analysis.h
+++ b/src/mongo/db/query/planner_analysis.h
@@ -34,77 +34,77 @@
namespace mongo {
- class Collection;
+class Collection;
- class QueryPlannerAnalysis {
- public:
- /**
- * Takes an index key pattern and returns an object describing the "maximal sort" that this
- * index can provide. Returned object is in normalized sort form (all elements have value 1
- * or -1).
- *
- * Examples:
- * - {a: 1, b: -1} => {a: 1, b: -1}
- * - {a: true} => {a: 1}
- * - {a: "hashed"} => {}
- * - {a: 1, b: "text", c: 1} => {a: 1}
- */
- static BSONObj getSortPattern(const BSONObj& indexKeyPattern);
+class QueryPlannerAnalysis {
+public:
+ /**
+ * Takes an index key pattern and returns an object describing the "maximal sort" that this
+ * index can provide. Returned object is in normalized sort form (all elements have value 1
+ * or -1).
+ *
+ * Examples:
+ * - {a: 1, b: -1} => {a: 1, b: -1}
+ * - {a: true} => {a: 1}
+ * - {a: "hashed"} => {}
+ * - {a: 1, b: "text", c: 1} => {a: 1}
+ */
+ static BSONObj getSortPattern(const BSONObj& indexKeyPattern);
- /**
- * In brief: performs sort and covering analysis.
- *
- * The solution rooted at 'solnRoot' provides data for the query, whether through some
- * configuration of indices or through a collection scan. Additional stages may be required
- * to perform sorting, projection, or other operations that are independent of the source
- * of the data. These stages are added atop 'solnRoot'.
- *
- * 'taggedRoot' is a copy of the parse tree. Nodes in 'solnRoot' may point into it.
- *
- * Takes ownership of 'solnRoot' and 'taggedRoot'.
- *
- * Returns NULL if a solution cannot be constructed given the requirements in 'params'.
- *
- * Caller owns the returned QuerySolution.
- */
- static QuerySolution* analyzeDataAccess(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode* solnRoot);
+ /**
+ * In brief: performs sort and covering analysis.
+ *
+ * The solution rooted at 'solnRoot' provides data for the query, whether through some
+ * configuration of indices or through a collection scan. Additional stages may be required
+ * to perform sorting, projection, or other operations that are independent of the source
+ * of the data. These stages are added atop 'solnRoot'.
+ *
+ * 'taggedRoot' is a copy of the parse tree. Nodes in 'solnRoot' may point into it.
+ *
+ * Takes ownership of 'solnRoot' and 'taggedRoot'.
+ *
+ * Returns NULL if a solution cannot be constructed given the requirements in 'params'.
+ *
+ * Caller owns the returned QuerySolution.
+ */
+ static QuerySolution* analyzeDataAccess(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode* solnRoot);
- /**
- * Sort the results, if there is a sort required.
- */
- static QuerySolutionNode* analyzeSort(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode* solnRoot,
- bool* blockingSortOut);
+ /**
+ * Sort the results, if there is a sort required.
+ */
+ static QuerySolutionNode* analyzeSort(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode* solnRoot,
+ bool* blockingSortOut);
- /**
- * Internal helper function used by analyzeSort.
- *
- * Rewrites an index scan over many point intervals as an OR of many index scans in order to
- * obtain an indexed sort. For full details, see SERVER-1205.
- *
- * Here is an example:
- *
- * Consider the query find({a: {$in: [1,2]}}).sort({b: 1}) with using the index {a:1, b:1}.
- *
- * Our default solution will be to construct one index scan with the bounds a:[[1,1],[2,2]]
- * and b: [MinKey, MaxKey].
- *
- * However, this is logically equivalent to the union of the following scans:
- * a:[1,1], b:[MinKey, MaxKey]
- * a:[2,2], b:[MinKey, MaxKey]
- *
- * Since the bounds on 'a' are a point, each scan provides the sort order {b:1} in addition
- * to {a:1, b:1}.
- *
- * If we union these scans with a merge sort instead of a normal hashing OR, we can preserve
- * the sort order that each scan provides.
- */
- static bool explodeForSort(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode** solnRoot);
- };
+ /**
+ * Internal helper function used by analyzeSort.
+ *
+ * Rewrites an index scan over many point intervals as an OR of many index scans in order to
+ * obtain an indexed sort. For full details, see SERVER-1205.
+ *
+ * Here is an example:
+ *
+ * Consider the query find({a: {$in: [1,2]}}).sort({b: 1}) with using the index {a:1, b:1}.
+ *
+ * Our default solution will be to construct one index scan with the bounds a:[[1,1],[2,2]]
+ * and b: [MinKey, MaxKey].
+ *
+ * However, this is logically equivalent to the union of the following scans:
+ * a:[1,1], b:[MinKey, MaxKey]
+ * a:[2,2], b:[MinKey, MaxKey]
+ *
+ * Since the bounds on 'a' are a point, each scan provides the sort order {b:1} in addition
+ * to {a:1, b:1}.
+ *
+ * If we union these scans with a merge sort instead of a normal hashing OR, we can preserve
+ * the sort order that each scan provides.
+ */
+ static bool explodeForSort(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode** solnRoot);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/planner_analysis_test.cpp b/src/mongo/db/query/planner_analysis_test.cpp
index 18fc86d6f1d..8849e67af5a 100644
--- a/src/mongo/db/query/planner_analysis_test.cpp
+++ b/src/mongo/db/query/planner_analysis_test.cpp
@@ -36,130 +36,118 @@ using namespace mongo;
namespace {
- TEST(QueryPlannerAnalysis, GetSortPatternBasic) {
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1}")));
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1}")));
- ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1}")));
- ASSERT_EQUALS(fromjson("{a: 1, b: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: -1}")));
- ASSERT_EQUALS(fromjson("{a: -1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 1}")));
- ASSERT_EQUALS(fromjson("{a: -1, b: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: -1}")));
- }
-
- TEST(QueryPlannerAnalysis, GetSortPatternOtherElements) {
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 0}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 100}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: Infinity}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: true}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: false}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: []}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: {}}")));
-
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -100}")));
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -Infinity}")));
-
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{}")));
- }
-
- TEST(QueryPlannerAnalysis, GetSortPatternSpecialIndexTypes) {
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'hashed'}")));
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'text'}")));
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: '2dsphere'}")));
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: ''}")));
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'foo'}")));
-
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 'text'}")));
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: '2dsphere'}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text'}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere'}")));
-
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text', c: 1}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere',"
- " c: 1}")));
-
- ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text'}")));
- ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text',"
- " d: 1}")));
- }
-
- // Test the generation of sort orders provided by an index scan done by
- // IndexScanNode::computeProperties().
- TEST(QueryPlannerAnalysis, IxscanSortOrdersBasic) {
- IndexScanNode ixscan;
- ixscan.indexKeyPattern = fromjson("{a: 1, b: 1, c: 1, d: 1, e: 1}");
-
- // Bounds are {a: [[1,1]], b: [[2,2]], c: [[3,3]], d: [[1,5]], e:[[1,1],[2,2]]},
- // all inclusive.
- OrderedIntervalList oil1("a");
- oil1.intervals.push_back(Interval(fromjson("{'': 1, '': 1}"), true, true));
- ixscan.bounds.fields.push_back(oil1);
-
- OrderedIntervalList oil2("b");
- oil2.intervals.push_back(Interval(fromjson("{'': 2, '': 2}"), true, true));
- ixscan.bounds.fields.push_back(oil2);
-
- OrderedIntervalList oil3("c");
- oil3.intervals.push_back(Interval(fromjson("{'': 3, '': 3}"), true, true));
- ixscan.bounds.fields.push_back(oil3);
-
- OrderedIntervalList oil4("d");
- oil4.intervals.push_back(Interval(fromjson("{'': 1, '': 5}"), true, true));
- ixscan.bounds.fields.push_back(oil4);
-
- OrderedIntervalList oil5("e");
- oil5.intervals.push_back(Interval(fromjson("{'': 1, '': 1}"), true, true));
- oil5.intervals.push_back(Interval(fromjson("{'': 2, '': 2}"), true, true));
- ixscan.bounds.fields.push_back(oil5);
-
- // Compute and retrieve the set of sorts.
- ixscan.computeProperties();
- const BSONObjSet& sorts = ixscan.getSort();
-
- // One possible sort is the index key pattern.
- ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1, d: 1, e: 1}")) != sorts.end());
-
- // All prefixes of the key pattern.
- ASSERT(sorts.find(fromjson("{a: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{a: 1, b: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1, d: 1}")) != sorts.end());
-
- // Additional sorts considered due to point intervals on 'a', 'b', and 'c'.
- ASSERT(sorts.find(fromjson("{b: 1, c: 1, d: 1, e: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{c: 1, d: 1, e: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{d: 1, e: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{d: 1}")) != sorts.end());
-
- // There should be 9 total sorts: make sure no other ones snuck their way in.
- ASSERT_EQ(9U, sorts.size());
- }
+TEST(QueryPlannerAnalysis, GetSortPatternBasic) {
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1}")));
+ ASSERT_EQUALS(fromjson("{a: -1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1}")));
+ ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1}")));
+ ASSERT_EQUALS(fromjson("{a: 1, b: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: -1}")));
+ ASSERT_EQUALS(fromjson("{a: -1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 1}")));
+ ASSERT_EQUALS(fromjson("{a: -1, b: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: -1}")));
+}
+
+TEST(QueryPlannerAnalysis, GetSortPatternOtherElements) {
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 0}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 100}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: Infinity}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: true}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: false}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: []}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: {}}")));
+
+ ASSERT_EQUALS(fromjson("{a: -1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: -100}")));
+ ASSERT_EQUALS(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -Infinity}")));
+
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{}")));
+}
+
+TEST(QueryPlannerAnalysis, GetSortPatternSpecialIndexTypes) {
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'hashed'}")));
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'text'}")));
+ ASSERT_EQUALS(fromjson("{}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: '2dsphere'}")));
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: ''}")));
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'foo'}")));
+
+ ASSERT_EQUALS(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 'text'}")));
+ ASSERT_EQUALS(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: '2dsphere'}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text'}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere'}")));
+
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text', c: 1}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson(
+ "{a: 1, b: '2dsphere',"
+ " c: 1}")));
+
+ ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text'}")));
+ ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson(
+ "{a: 1, b: 1, c: 'text',"
+ " d: 1}")));
+}
+
+// Test the generation of sort orders provided by an index scan done by
+// IndexScanNode::computeProperties().
+TEST(QueryPlannerAnalysis, IxscanSortOrdersBasic) {
+ IndexScanNode ixscan;
+ ixscan.indexKeyPattern = fromjson("{a: 1, b: 1, c: 1, d: 1, e: 1}");
+
+ // Bounds are {a: [[1,1]], b: [[2,2]], c: [[3,3]], d: [[1,5]], e:[[1,1],[2,2]]},
+ // all inclusive.
+ OrderedIntervalList oil1("a");
+ oil1.intervals.push_back(Interval(fromjson("{'': 1, '': 1}"), true, true));
+ ixscan.bounds.fields.push_back(oil1);
+
+ OrderedIntervalList oil2("b");
+ oil2.intervals.push_back(Interval(fromjson("{'': 2, '': 2}"), true, true));
+ ixscan.bounds.fields.push_back(oil2);
+
+ OrderedIntervalList oil3("c");
+ oil3.intervals.push_back(Interval(fromjson("{'': 3, '': 3}"), true, true));
+ ixscan.bounds.fields.push_back(oil3);
+
+ OrderedIntervalList oil4("d");
+ oil4.intervals.push_back(Interval(fromjson("{'': 1, '': 5}"), true, true));
+ ixscan.bounds.fields.push_back(oil4);
+
+ OrderedIntervalList oil5("e");
+ oil5.intervals.push_back(Interval(fromjson("{'': 1, '': 1}"), true, true));
+ oil5.intervals.push_back(Interval(fromjson("{'': 2, '': 2}"), true, true));
+ ixscan.bounds.fields.push_back(oil5);
+
+ // Compute and retrieve the set of sorts.
+ ixscan.computeProperties();
+ const BSONObjSet& sorts = ixscan.getSort();
+
+ // One possible sort is the index key pattern.
+ ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1, d: 1, e: 1}")) != sorts.end());
+
+ // All prefixes of the key pattern.
+ ASSERT(sorts.find(fromjson("{a: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{a: 1, b: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1, d: 1}")) != sorts.end());
+
+ // Additional sorts considered due to point intervals on 'a', 'b', and 'c'.
+ ASSERT(sorts.find(fromjson("{b: 1, c: 1, d: 1, e: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{c: 1, d: 1, e: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{d: 1, e: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{d: 1}")) != sorts.end());
+
+ // There should be 9 total sorts: make sure no other ones snuck their way in.
+ ASSERT_EQ(9U, sorts.size());
+}
} // namespace
diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp
index f883b5468be..f1f133c8ee5 100644
--- a/src/mongo/db/query/planner_ixselect.cpp
+++ b/src/mongo/db/query/planner_ixselect.cpp
@@ -44,689 +44,650 @@
namespace mongo {
- static double fieldWithDefault(const BSONObj& infoObj, const string& name, double def) {
- BSONElement e = infoObj[name];
- if (e.isNumber()) { return e.numberDouble(); }
- return def;
+static double fieldWithDefault(const BSONObj& infoObj, const string& name, double def) {
+ BSONElement e = infoObj[name];
+ if (e.isNumber()) {
+ return e.numberDouble();
}
+ return def;
+}
- /**
- * 2d indices don't handle wrapping so we can't use them for queries that wrap.
- */
- static bool twoDWontWrap(const Circle& circle, const IndexEntry& index) {
-
- GeoHashConverter::Parameters hashParams;
- Status paramStatus = GeoHashConverter::parseParameters(index.infoObj, &hashParams);
- verify(paramStatus.isOK()); // we validated the params on index creation
-
- GeoHashConverter conv(hashParams);
-
- // FYI: old code used flat not spherical error.
- double yscandist = rad2deg(circle.radius) + conv.getErrorSphere();
- double xscandist = computeXScanDistance(circle.center.y, yscandist);
- bool ret = circle.center.x + xscandist < 180
- && circle.center.x - xscandist > -180
- && circle.center.y + yscandist < 90
- && circle.center.y - yscandist > -90;
- return ret;
+/**
+ * 2d indices don't handle wrapping so we can't use them for queries that wrap.
+ */
+static bool twoDWontWrap(const Circle& circle, const IndexEntry& index) {
+ GeoHashConverter::Parameters hashParams;
+ Status paramStatus = GeoHashConverter::parseParameters(index.infoObj, &hashParams);
+ verify(paramStatus.isOK()); // we validated the params on index creation
+
+ GeoHashConverter conv(hashParams);
+
+ // FYI: old code used flat not spherical error.
+ double yscandist = rad2deg(circle.radius) + conv.getErrorSphere();
+ double xscandist = computeXScanDistance(circle.center.y, yscandist);
+ bool ret = circle.center.x + xscandist < 180 && circle.center.x - xscandist > -180 &&
+ circle.center.y + yscandist < 90 && circle.center.y - yscandist > -90;
+ return ret;
+}
+
+// static
+void QueryPlannerIXSelect::getFields(const MatchExpression* node,
+ string prefix,
+ unordered_set<string>* out) {
+ // Do not traverse tree beyond a NOR negation node
+ MatchExpression::MatchType exprtype = node->matchType();
+ if (exprtype == MatchExpression::NOR) {
+ return;
}
- // static
- void QueryPlannerIXSelect::getFields(const MatchExpression* node,
- string prefix,
- unordered_set<string>* out) {
- // Do not traverse tree beyond a NOR negation node
- MatchExpression::MatchType exprtype = node->matchType();
- if (exprtype == MatchExpression::NOR) {
- return;
- }
-
- // Leaf nodes with a path and some array operators.
- if (Indexability::nodeCanUseIndexOnOwnField(node)) {
- out->insert(prefix + node->path().toString());
- }
- else if (Indexability::arrayUsesIndexOnChildren(node)) {
- // If the array uses an index on its children, it's something like
- // {foo : {$elemMatch: { bar: 1}}}, in which case the predicate is really over
- // foo.bar.
- //
- // When we have {foo: {$all: [{$elemMatch: {a:1}}], the path of the embedded elemMatch
- // is empty. We don't want to append a dot in that case as the field would be foo..a.
- if (!node->path().empty()) {
- prefix += node->path().toString() + ".";
- }
+ // Leaf nodes with a path and some array operators.
+ if (Indexability::nodeCanUseIndexOnOwnField(node)) {
+ out->insert(prefix + node->path().toString());
+ } else if (Indexability::arrayUsesIndexOnChildren(node)) {
+ // If the array uses an index on its children, it's something like
+ // {foo : {$elemMatch: { bar: 1}}}, in which case the predicate is really over
+ // foo.bar.
+ //
+ // When we have {foo: {$all: [{$elemMatch: {a:1}}], the path of the embedded elemMatch
+ // is empty. We don't want to append a dot in that case as the field would be foo..a.
+ if (!node->path().empty()) {
+ prefix += node->path().toString() + ".";
+ }
- for (size_t i = 0; i < node->numChildren(); ++i) {
- getFields(node->getChild(i), prefix, out);
- }
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ getFields(node->getChild(i), prefix, out);
}
- else if (node->isLogical()) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- getFields(node->getChild(i), prefix, out);
- }
+ } else if (node->isLogical()) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ getFields(node->getChild(i), prefix, out);
}
}
-
- // static
- void QueryPlannerIXSelect::findRelevantIndices(const unordered_set<string>& fields,
- const vector<IndexEntry>& allIndices,
- vector<IndexEntry>* out) {
- for (size_t i = 0; i < allIndices.size(); ++i) {
- BSONObjIterator it(allIndices[i].keyPattern);
- verify(it.more());
- BSONElement elt = it.next();
- if (fields.end() != fields.find(elt.fieldName())) {
- out->push_back(allIndices[i]);
- }
+}
+
+// static
+void QueryPlannerIXSelect::findRelevantIndices(const unordered_set<string>& fields,
+ const vector<IndexEntry>& allIndices,
+ vector<IndexEntry>* out) {
+ for (size_t i = 0; i < allIndices.size(); ++i) {
+ BSONObjIterator it(allIndices[i].keyPattern);
+ verify(it.more());
+ BSONElement elt = it.next();
+ if (fields.end() != fields.find(elt.fieldName())) {
+ out->push_back(allIndices[i]);
}
}
+}
+
+// static
+bool QueryPlannerIXSelect::compatible(const BSONElement& elt,
+ const IndexEntry& index,
+ MatchExpression* node) {
+ // Historically one could create indices with any particular value for the index spec,
+ // including values that now indicate a special index. As such we have to make sure the
+ // index type wasn't overridden before we pay attention to the string in the index key
+ // pattern element.
+ //
+ // e.g. long ago we could have created an index {a: "2dsphere"} and it would
+ // be treated as a btree index by an ancient version of MongoDB. To try to run
+ // 2dsphere queries over it would be folly.
+ string indexedFieldType;
+ if (String != elt.type() || (INDEX_BTREE == index.type)) {
+ indexedFieldType = "";
+ } else {
+ indexedFieldType = elt.String();
+ }
- // static
- bool QueryPlannerIXSelect::compatible(const BSONElement& elt,
- const IndexEntry& index,
- MatchExpression* node) {
- // Historically one could create indices with any particular value for the index spec,
- // including values that now indicate a special index. As such we have to make sure the
- // index type wasn't overridden before we pay attention to the string in the index key
- // pattern element.
- //
- // e.g. long ago we could have created an index {a: "2dsphere"} and it would
- // be treated as a btree index by an ancient version of MongoDB. To try to run
- // 2dsphere queries over it would be folly.
- string indexedFieldType;
- if (String != elt.type() || (INDEX_BTREE == index.type)) {
- indexedFieldType = "";
- }
- else {
- indexedFieldType = elt.String();
- }
-
- // We know elt.fieldname() == node->path().
- MatchExpression::MatchType exprtype = node->matchType();
-
- if (indexedFieldType.empty()) {
- // Can't check for null w/a sparse index.
- if (exprtype == MatchExpression::EQ && index.sparse) {
- const EqualityMatchExpression* expr
- = static_cast<const EqualityMatchExpression*>(node);
- if (expr->getData().isNull()) {
- return false;
- }
- }
+ // We know elt.fieldname() == node->path().
+ MatchExpression::MatchType exprtype = node->matchType();
- // Can't check for $in w/ null element w/a sparse index.
- if (exprtype == MatchExpression::MATCH_IN && index.sparse) {
- const InMatchExpression* expr = static_cast<const InMatchExpression*>(node);
- if (expr->getData().hasNull()) {
- return false;
- }
+ if (indexedFieldType.empty()) {
+ // Can't check for null w/a sparse index.
+ if (exprtype == MatchExpression::EQ && index.sparse) {
+ const EqualityMatchExpression* expr = static_cast<const EqualityMatchExpression*>(node);
+ if (expr->getData().isNull()) {
+ return false;
}
+ }
- // We can't use a btree-indexed field for geo expressions.
- if (exprtype == MatchExpression::GEO || exprtype == MatchExpression::GEO_NEAR) {
+ // Can't check for $in w/ null element w/a sparse index.
+ if (exprtype == MatchExpression::MATCH_IN && index.sparse) {
+ const InMatchExpression* expr = static_cast<const InMatchExpression*>(node);
+ if (expr->getData().hasNull()) {
return false;
}
+ }
- // There are restrictions on when we can use the index if
- // the expression is a NOT.
- if (exprtype == MatchExpression::NOT) {
- // Don't allow indexed NOT on special index types such as geo or text indices.
- if (INDEX_BTREE != index.type) {
- return false;
- }
-
- // Prevent negated preds from using sparse indices. Doing so would cause us to
- // miss documents which do not contain the indexed fields.
- if (index.sparse) {
- return false;
- }
-
- // Can't index negations of MOD, REGEX, TYPE_OPERATOR, or ELEM_MATCH_VALUE.
- MatchExpression::MatchType childtype = node->getChild(0)->matchType();
- if (MatchExpression::REGEX == childtype ||
- MatchExpression::MOD == childtype ||
- MatchExpression::TYPE_OPERATOR == childtype ||
- MatchExpression::ELEM_MATCH_VALUE == childtype) {
- return false;
- }
+ // We can't use a btree-indexed field for geo expressions.
+ if (exprtype == MatchExpression::GEO || exprtype == MatchExpression::GEO_NEAR) {
+ return false;
+ }
- // If it's a negated $in, it can't have any REGEX's inside.
- if (MatchExpression::MATCH_IN == childtype) {
- InMatchExpression* ime = static_cast<InMatchExpression*>(node->getChild(0));
- if (ime->getData().numRegexes() != 0) {
- return false;
- }
- }
+ // There are restrictions on when we can use the index if
+ // the expression is a NOT.
+ if (exprtype == MatchExpression::NOT) {
+ // Don't allow indexed NOT on special index types such as geo or text indices.
+ if (INDEX_BTREE != index.type) {
+ return false;
}
- // We can only index EQ using text indices. This is an artificial limitation imposed by
- // FTSSpec::getIndexPrefix() which will fail if there is not an EQ predicate on each
- // index prefix field of the text index.
- //
- // Example for key pattern {a: 1, b: "text"}:
- // - Allowed: node = {a: 7}
- // - Not allowed: node = {a: {$gt: 7}}
-
- if (INDEX_TEXT != index.type) {
- return true;
+ // Prevent negated preds from using sparse indices. Doing so would cause us to
+ // miss documents which do not contain the indexed fields.
+ if (index.sparse) {
+ return false;
}
- // If we're here we know it's a text index. Equalities are OK anywhere in a text index.
- if (MatchExpression::EQ == exprtype) {
- return true;
+ // Can't index negations of MOD, REGEX, TYPE_OPERATOR, or ELEM_MATCH_VALUE.
+ MatchExpression::MatchType childtype = node->getChild(0)->matchType();
+ if (MatchExpression::REGEX == childtype || MatchExpression::MOD == childtype ||
+ MatchExpression::TYPE_OPERATOR == childtype ||
+ MatchExpression::ELEM_MATCH_VALUE == childtype) {
+ return false;
}
- // Not-equalities can only go in a suffix field of an index kp. We look through the key
- // pattern to see if the field we're looking at now appears as a prefix. If so, we
- // can't use this index for it.
- BSONObjIterator specIt(index.keyPattern);
- while (specIt.more()) {
- BSONElement elt = specIt.next();
- // We hit the dividing mark between prefix and suffix, so whatever field we're
- // looking at is a suffix, since it appears *after* the dividing mark between the
- // two. As such, we can use the index.
- if (String == elt.type()) {
- return true;
- }
-
- // If we're here, we're still looking at prefix elements. We know that exprtype
- // isn't EQ so we can't use this index.
- if (node->path() == elt.fieldNameStringData()) {
+ // If it's a negated $in, it can't have any REGEX's inside.
+ if (MatchExpression::MATCH_IN == childtype) {
+ InMatchExpression* ime = static_cast<InMatchExpression*>(node->getChild(0));
+ if (ime->getData().numRegexes() != 0) {
return false;
}
}
+ }
- // NOTE: This shouldn't be reached. Text index implies there is a separator implies we
- // will always hit the 'return true' above.
- invariant(0);
+ // We can only index EQ using text indices. This is an artificial limitation imposed by
+ // FTSSpec::getIndexPrefix() which will fail if there is not an EQ predicate on each
+ // index prefix field of the text index.
+ //
+ // Example for key pattern {a: 1, b: "text"}:
+ // - Allowed: node = {a: 7}
+ // - Not allowed: node = {a: {$gt: 7}}
+
+ if (INDEX_TEXT != index.type) {
return true;
}
- else if (IndexNames::HASHED == indexedFieldType) {
- return exprtype == MatchExpression::MATCH_IN || exprtype == MatchExpression::EQ;
+
+ // If we're here we know it's a text index. Equalities are OK anywhere in a text index.
+ if (MatchExpression::EQ == exprtype) {
+ return true;
}
- else if (IndexNames::GEO_2DSPHERE == indexedFieldType) {
- if (exprtype == MatchExpression::GEO) {
- // within or intersect.
- GeoMatchExpression* gme = static_cast<GeoMatchExpression*>(node);
- const GeoExpression& gq = gme->getGeoExpression();
- const GeometryContainer& gc = gq.getGeometry();
- return gc.hasS2Region();
+
+ // Not-equalities can only go in a suffix field of an index kp. We look through the key
+ // pattern to see if the field we're looking at now appears as a prefix. If so, we
+ // can't use this index for it.
+ BSONObjIterator specIt(index.keyPattern);
+ while (specIt.more()) {
+ BSONElement elt = specIt.next();
+ // We hit the dividing mark between prefix and suffix, so whatever field we're
+ // looking at is a suffix, since it appears *after* the dividing mark between the
+ // two. As such, we can use the index.
+ if (String == elt.type()) {
+ return true;
}
- else if (exprtype == MatchExpression::GEO_NEAR) {
- GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(node);
- // Make sure the near query is compatible with 2dsphere.
- return gnme->getData().centroid->crs == SPHERE;
+
+ // If we're here, we're still looking at prefix elements. We know that exprtype
+ // isn't EQ so we can't use this index.
+ if (node->path() == elt.fieldNameStringData()) {
+ return false;
}
- return false;
}
- else if (IndexNames::GEO_2D == indexedFieldType) {
- if (exprtype == MatchExpression::GEO_NEAR) {
- GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(node);
- // Make sure the near query is compatible with 2d index
- return gnme->getData().centroid->crs == FLAT || !gnme->getData().isWrappingQuery;
+
+ // NOTE: This shouldn't be reached. Text index implies there is a separator implies we
+ // will always hit the 'return true' above.
+ invariant(0);
+ return true;
+ } else if (IndexNames::HASHED == indexedFieldType) {
+ return exprtype == MatchExpression::MATCH_IN || exprtype == MatchExpression::EQ;
+ } else if (IndexNames::GEO_2DSPHERE == indexedFieldType) {
+ if (exprtype == MatchExpression::GEO) {
+ // within or intersect.
+ GeoMatchExpression* gme = static_cast<GeoMatchExpression*>(node);
+ const GeoExpression& gq = gme->getGeoExpression();
+ const GeometryContainer& gc = gq.getGeometry();
+ return gc.hasS2Region();
+ } else if (exprtype == MatchExpression::GEO_NEAR) {
+ GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(node);
+ // Make sure the near query is compatible with 2dsphere.
+ return gnme->getData().centroid->crs == SPHERE;
+ }
+ return false;
+ } else if (IndexNames::GEO_2D == indexedFieldType) {
+ if (exprtype == MatchExpression::GEO_NEAR) {
+ GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(node);
+ // Make sure the near query is compatible with 2d index
+ return gnme->getData().centroid->crs == FLAT || !gnme->getData().isWrappingQuery;
+ } else if (exprtype == MatchExpression::GEO) {
+ // 2d only supports within.
+ GeoMatchExpression* gme = static_cast<GeoMatchExpression*>(node);
+ const GeoExpression& gq = gme->getGeoExpression();
+ if (GeoExpression::WITHIN != gq.getPred()) {
+ return false;
}
- else if (exprtype == MatchExpression::GEO) {
- // 2d only supports within.
- GeoMatchExpression* gme = static_cast<GeoMatchExpression*>(node);
- const GeoExpression& gq = gme->getGeoExpression();
- if (GeoExpression::WITHIN != gq.getPred()) {
- return false;
- }
- const GeometryContainer& gc = gq.getGeometry();
+ const GeometryContainer& gc = gq.getGeometry();
- // 2d indices require an R2 covering
- if (gc.hasR2Region()) {
- return true;
- }
+ // 2d indices require an R2 covering
+ if (gc.hasR2Region()) {
+ return true;
+ }
- const CapWithCRS* cap = gc.getCapGeometryHack();
+ const CapWithCRS* cap = gc.getCapGeometryHack();
- // 2d indices can answer centerSphere queries.
- if (NULL == cap) {
- return false;
- }
+ // 2d indices can answer centerSphere queries.
+ if (NULL == cap) {
+ return false;
+ }
- verify(SPHERE == cap->crs);
- const Circle& circle = cap->circle;
+ verify(SPHERE == cap->crs);
+ const Circle& circle = cap->circle;
- // No wrapping around the edge of the world is allowed in 2d centerSphere.
- return twoDWontWrap(circle, index);
- }
- return false;
- }
- else if (IndexNames::TEXT == indexedFieldType) {
- return (exprtype == MatchExpression::TEXT);
- }
- else if (IndexNames::GEO_HAYSTACK == indexedFieldType) {
- return false;
- }
- else {
- warning() << "Unknown indexing for node " << node->toString()
- << " and field " << elt.toString() << endl;
- verify(0);
+ // No wrapping around the edge of the world is allowed in 2d centerSphere.
+ return twoDWontWrap(circle, index);
}
+ return false;
+ } else if (IndexNames::TEXT == indexedFieldType) {
+ return (exprtype == MatchExpression::TEXT);
+ } else if (IndexNames::GEO_HAYSTACK == indexedFieldType) {
+ return false;
+ } else {
+ warning() << "Unknown indexing for node " << node->toString() << " and field "
+ << elt.toString() << endl;
+ verify(0);
+ }
+}
+
+// static
+void QueryPlannerIXSelect::rateIndices(MatchExpression* node,
+ string prefix,
+ const vector<IndexEntry>& indices) {
+ // Do not traverse tree beyond logical NOR node
+ MatchExpression::MatchType exprtype = node->matchType();
+ if (exprtype == MatchExpression::NOR) {
+ return;
}
- // static
- void QueryPlannerIXSelect::rateIndices(MatchExpression* node,
- string prefix,
- const vector<IndexEntry>& indices) {
- // Do not traverse tree beyond logical NOR node
- MatchExpression::MatchType exprtype = node->matchType();
- if (exprtype == MatchExpression::NOR) {
- return;
- }
-
- // Every indexable node is tagged even when no compatible index is
- // available.
- if (Indexability::isBoundsGenerating(node)) {
- string fullPath;
- if (MatchExpression::NOT == node->matchType()) {
- fullPath = prefix + node->getChild(0)->path().toString();
- }
- else {
- fullPath = prefix + node->path().toString();
- }
+ // Every indexable node is tagged even when no compatible index is
+ // available.
+ if (Indexability::isBoundsGenerating(node)) {
+ string fullPath;
+ if (MatchExpression::NOT == node->matchType()) {
+ fullPath = prefix + node->getChild(0)->path().toString();
+ } else {
+ fullPath = prefix + node->path().toString();
+ }
- verify(NULL == node->getTag());
- RelevantTag* rt = new RelevantTag();
- node->setTag(rt);
- rt->path = fullPath;
+ verify(NULL == node->getTag());
+ RelevantTag* rt = new RelevantTag();
+ node->setTag(rt);
+ rt->path = fullPath;
- // TODO: This is slow, with all the string compares.
- for (size_t i = 0; i < indices.size(); ++i) {
- BSONObjIterator it(indices[i].keyPattern);
- BSONElement elt = it.next();
+ // TODO: This is slow, with all the string compares.
+ for (size_t i = 0; i < indices.size(); ++i) {
+ BSONObjIterator it(indices[i].keyPattern);
+ BSONElement elt = it.next();
+ if (elt.fieldName() == fullPath && compatible(elt, indices[i], node)) {
+ rt->first.push_back(i);
+ }
+ while (it.more()) {
+ elt = it.next();
if (elt.fieldName() == fullPath && compatible(elt, indices[i], node)) {
- rt->first.push_back(i);
- }
- while (it.more()) {
- elt = it.next();
- if (elt.fieldName() == fullPath && compatible(elt, indices[i], node)) {
- rt->notFirst.push_back(i);
- }
+ rt->notFirst.push_back(i);
}
}
+ }
- // If this is a NOT, we have to clone the tag and attach
- // it to the NOT's child.
- if (MatchExpression::NOT == node->matchType()) {
- RelevantTag* childRt = static_cast<RelevantTag*>(rt->clone());
- childRt->path = rt->path;
- node->getChild(0)->setTag(childRt);
- }
+ // If this is a NOT, we have to clone the tag and attach
+ // it to the NOT's child.
+ if (MatchExpression::NOT == node->matchType()) {
+ RelevantTag* childRt = static_cast<RelevantTag*>(rt->clone());
+ childRt->path = rt->path;
+ node->getChild(0)->setTag(childRt);
}
- else if (Indexability::arrayUsesIndexOnChildren(node)) {
- // See comment in getFields about all/elemMatch and paths.
- if (!node->path().empty()) {
- prefix += node->path().toString() + ".";
- }
- for (size_t i = 0; i < node->numChildren(); ++i) {
- rateIndices(node->getChild(i), prefix, indices);
- }
+ } else if (Indexability::arrayUsesIndexOnChildren(node)) {
+ // See comment in getFields about all/elemMatch and paths.
+ if (!node->path().empty()) {
+ prefix += node->path().toString() + ".";
}
- else if (node->isLogical()) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- rateIndices(node->getChild(i), prefix, indices);
- }
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ rateIndices(node->getChild(i), prefix, indices);
+ }
+ } else if (node->isLogical()) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ rateIndices(node->getChild(i), prefix, indices);
}
}
+}
- // static
- void QueryPlannerIXSelect::stripInvalidAssignments(MatchExpression* node,
- const vector<IndexEntry>& indices) {
+// static
+void QueryPlannerIXSelect::stripInvalidAssignments(MatchExpression* node,
+ const vector<IndexEntry>& indices) {
+ stripInvalidAssignmentsToTextIndexes(node, indices);
- stripInvalidAssignmentsToTextIndexes(node, indices);
+ if (MatchExpression::GEO != node->matchType() &&
+ MatchExpression::GEO_NEAR != node->matchType()) {
+ stripInvalidAssignmentsTo2dsphereIndices(node, indices);
+ }
+}
- if (MatchExpression::GEO != node->matchType() &&
- MatchExpression::GEO_NEAR != node->matchType()) {
+namespace {
- stripInvalidAssignmentsTo2dsphereIndices(node, indices);
- }
+/**
+ * For every node in the subtree rooted at 'node' that has a RelevantTag, removes index
+ * assignments from that tag.
+ *
+ * Used as a helper for stripUnneededAssignments().
+ */
+void clearAssignments(MatchExpression* node) {
+ if (node->getTag()) {
+ RelevantTag* rt = static_cast<RelevantTag*>(node->getTag());
+ rt->first.clear();
+ rt->notFirst.clear();
}
- namespace {
-
- /**
- * For every node in the subtree rooted at 'node' that has a RelevantTag, removes index
- * assignments from that tag.
- *
- * Used as a helper for stripUnneededAssignments().
- */
- void clearAssignments(MatchExpression* node) {
- if (node->getTag()) {
- RelevantTag* rt = static_cast<RelevantTag*>(node->getTag());
- rt->first.clear();
- rt->notFirst.clear();
- }
+ for (size_t i = 0; i < node->numChildren(); i++) {
+ clearAssignments(node->getChild(i));
+ }
+}
- for (size_t i = 0; i < node->numChildren(); i++) {
- clearAssignments(node->getChild(i));
+} // namespace
+
+// static
+void QueryPlannerIXSelect::stripUnneededAssignments(MatchExpression* node,
+ const std::vector<IndexEntry>& indices) {
+ if (MatchExpression::AND == node->matchType()) {
+ for (size_t i = 0; i < node->numChildren(); i++) {
+ MatchExpression* child = node->getChild(i);
+
+ if (MatchExpression::EQ != child->matchType()) {
+ continue;
}
- }
- } // namespace
+ if (!child->getTag()) {
+ continue;
+ }
- // static
- void QueryPlannerIXSelect::stripUnneededAssignments(MatchExpression* node,
- const std::vector<IndexEntry>& indices) {
- if (MatchExpression::AND == node->matchType()) {
- for (size_t i = 0; i < node->numChildren(); i++) {
- MatchExpression* child = node->getChild(i);
+ // We found a EQ child of an AND which is tagged.
+ RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
- if (MatchExpression::EQ != child->matchType()) {
- continue;
- }
+ // Look through all of the indices for which this predicate can be answered with
+ // the leading field of the index.
+ for (std::vector<size_t>::const_iterator i = rt->first.begin(); i != rt->first.end();
+ ++i) {
+ size_t index = *i;
- if (!child->getTag()) {
- continue;
- }
+ if (indices[index].unique && 1 == indices[index].keyPattern.nFields()) {
+ // Found an EQ predicate which can use a single-field unique index.
+ // Clear assignments from the entire tree, and add back a single assignment
+ // for 'child' to the unique index.
+ clearAssignments(node);
+ RelevantTag* newRt = static_cast<RelevantTag*>(child->getTag());
+ newRt->first.push_back(index);
- // We found a EQ child of an AND which is tagged.
- RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
-
- // Look through all of the indices for which this predicate can be answered with
- // the leading field of the index.
- for (std::vector<size_t>::const_iterator i = rt->first.begin();
- i != rt->first.end(); ++i) {
- size_t index = *i;
-
- if (indices[index].unique && 1 == indices[index].keyPattern.nFields()) {
- // Found an EQ predicate which can use a single-field unique index.
- // Clear assignments from the entire tree, and add back a single assignment
- // for 'child' to the unique index.
- clearAssignments(node);
- RelevantTag* newRt = static_cast<RelevantTag*>(child->getTag());
- newRt->first.push_back(index);
-
- // Tag state has been reset in the entire subtree at 'root'; nothing
- // else for us to do.
- return;
- }
+ // Tag state has been reset in the entire subtree at 'root'; nothing
+ // else for us to do.
+ return;
}
}
}
+ }
- for (size_t i = 0; i < node->numChildren(); i++) {
- stripUnneededAssignments(node->getChild(i), indices);
- }
+ for (size_t i = 0; i < node->numChildren(); i++) {
+ stripUnneededAssignments(node->getChild(i), indices);
}
+}
- //
- // Helpers used by stripInvalidAssignments
- //
+//
+// Helpers used by stripInvalidAssignments
+//
- /**
- * Remove 'idx' from the RelevantTag lists for 'node'. 'node' must be a leaf.
- */
- static void removeIndexRelevantTag(MatchExpression* node, size_t idx) {
- RelevantTag* tag = static_cast<RelevantTag*>(node->getTag());
- verify(tag);
- vector<size_t>::iterator firstIt = std::find(tag->first.begin(),
- tag->first.end(),
- idx);
- if (firstIt != tag->first.end()) {
- tag->first.erase(firstIt);
- }
-
- vector<size_t>::iterator notFirstIt = std::find(tag->notFirst.begin(),
- tag->notFirst.end(),
- idx);
- if (notFirstIt != tag->notFirst.end()) {
- tag->notFirst.erase(notFirstIt);
- }
+/**
+ * Remove 'idx' from the RelevantTag lists for 'node'. 'node' must be a leaf.
+ */
+static void removeIndexRelevantTag(MatchExpression* node, size_t idx) {
+ RelevantTag* tag = static_cast<RelevantTag*>(node->getTag());
+ verify(tag);
+ vector<size_t>::iterator firstIt = std::find(tag->first.begin(), tag->first.end(), idx);
+ if (firstIt != tag->first.end()) {
+ tag->first.erase(firstIt);
}
- //
- // Text index quirks
- //
-
- /**
- * Traverse the subtree rooted at 'node' to remove invalid RelevantTag assignments to text index
- * 'idx', which has prefix paths 'prefixPaths'.
- */
- static void stripInvalidAssignmentsToTextIndex(MatchExpression* node,
- size_t idx,
- const unordered_set<StringData, StringData::Hasher>& prefixPaths) {
+ vector<size_t>::iterator notFirstIt =
+ std::find(tag->notFirst.begin(), tag->notFirst.end(), idx);
+ if (notFirstIt != tag->notFirst.end()) {
+ tag->notFirst.erase(notFirstIt);
+ }
+}
- // If we're here, there are prefixPaths and node is either:
- // 1. a text pred which we can't use as we have nothing over its prefix, or
- // 2. a non-text pred which we can't use as we don't have a text pred AND-related.
- if (Indexability::nodeCanUseIndexOnOwnField(node)) {
- removeIndexRelevantTag(node, idx);
- return;
- }
+//
+// Text index quirks
+//
- // Do not traverse tree beyond negation node.
- if (node->matchType() == MatchExpression::NOT
- || node->matchType() == MatchExpression::NOR) {
+/**
+ * Traverse the subtree rooted at 'node' to remove invalid RelevantTag assignments to text index
+ * 'idx', which has prefix paths 'prefixPaths'.
+ */
+static void stripInvalidAssignmentsToTextIndex(
+ MatchExpression* node,
+ size_t idx,
+ const unordered_set<StringData, StringData::Hasher>& prefixPaths) {
+ // If we're here, there are prefixPaths and node is either:
+ // 1. a text pred which we can't use as we have nothing over its prefix, or
+ // 2. a non-text pred which we can't use as we don't have a text pred AND-related.
+ if (Indexability::nodeCanUseIndexOnOwnField(node)) {
+ removeIndexRelevantTag(node, idx);
+ return;
+ }
- return;
- }
+ // Do not traverse tree beyond negation node.
+ if (node->matchType() == MatchExpression::NOT || node->matchType() == MatchExpression::NOR) {
+ return;
+ }
- // For anything to use a text index with prefixes, we require that:
- // 1. The text pred exists in an AND,
- // 2. The non-text preds that use the text index's prefixes are also in that AND.
+ // For anything to use a text index with prefixes, we require that:
+ // 1. The text pred exists in an AND,
+ // 2. The non-text preds that use the text index's prefixes are also in that AND.
- if (node->matchType() != MatchExpression::AND) {
- // It's an OR or some kind of array operator.
- for (size_t i = 0; i < node->numChildren(); ++i) {
- stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths);
- }
- return;
+ if (node->matchType() != MatchExpression::AND) {
+ // It's an OR or some kind of array operator.
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths);
}
+ return;
+ }
- // If we're here, we're an AND. Determine whether the children satisfy the index prefix for
- // the text index.
- invariant(node->matchType() == MatchExpression::AND);
+ // If we're here, we're an AND. Determine whether the children satisfy the index prefix for
+ // the text index.
+ invariant(node->matchType() == MatchExpression::AND);
- bool hasText = false;
+ bool hasText = false;
- // The AND must have an EQ predicate for each prefix path. When we encounter a child with a
- // tag we remove it from childrenPrefixPaths. All children exist if this set is empty at
- // the end.
- unordered_set<StringData, StringData::Hasher> childrenPrefixPaths = prefixPaths;
+ // The AND must have an EQ predicate for each prefix path. When we encounter a child with a
+ // tag we remove it from childrenPrefixPaths. All children exist if this set is empty at
+ // the end.
+ unordered_set<StringData, StringData::Hasher> childrenPrefixPaths = prefixPaths;
- for (size_t i = 0; i < node->numChildren(); ++i) {
- MatchExpression* child = node->getChild(i);
- RelevantTag* tag = static_cast<RelevantTag*>(child->getTag());
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ MatchExpression* child = node->getChild(i);
+ RelevantTag* tag = static_cast<RelevantTag*>(child->getTag());
- if (NULL == tag) {
- // 'child' could be a logical operator. Maybe there are some assignments hiding
- // inside.
- stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths);
- continue;
- }
+ if (NULL == tag) {
+ // 'child' could be a logical operator. Maybe there are some assignments hiding
+ // inside.
+ stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths);
+ continue;
+ }
- bool inFirst = tag->first.end() != std::find(tag->first.begin(),
- tag->first.end(),
- idx);
+ bool inFirst = tag->first.end() != std::find(tag->first.begin(), tag->first.end(), idx);
- bool inNotFirst = tag->notFirst.end() != std::find(tag->notFirst.begin(),
- tag->notFirst.end(),
- idx);
+ bool inNotFirst =
+ tag->notFirst.end() != std::find(tag->notFirst.begin(), tag->notFirst.end(), idx);
- if (inFirst || inNotFirst) {
- // Great! 'child' was assigned to our index.
- if (child->matchType() == MatchExpression::TEXT) {
- hasText = true;
- }
- else {
- childrenPrefixPaths.erase(child->path());
- // One fewer prefix we're looking for, possibly. Note that we could have a
- // suffix assignment on the index and wind up here. In this case the erase
- // above won't do anything since a suffix isn't a prefix.
- }
- }
- else {
- // Recurse on the children to ensure that they're not hiding any assignments
- // to idx.
- stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths);
+ if (inFirst || inNotFirst) {
+ // Great! 'child' was assigned to our index.
+ if (child->matchType() == MatchExpression::TEXT) {
+ hasText = true;
+ } else {
+ childrenPrefixPaths.erase(child->path());
+ // One fewer prefix we're looking for, possibly. Note that we could have a
+ // suffix assignment on the index and wind up here. In this case the erase
+ // above won't do anything since a suffix isn't a prefix.
}
+ } else {
+ // Recurse on the children to ensure that they're not hiding any assignments
+ // to idx.
+ stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths);
}
+ }
- // Our prereqs for using the text index were not satisfied so we remove the assignments from
- // all children of the AND.
- if (!hasText || !childrenPrefixPaths.empty()) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths);
- }
+ // Our prereqs for using the text index were not satisfied so we remove the assignments from
+ // all children of the AND.
+ if (!hasText || !childrenPrefixPaths.empty()) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths);
}
}
+}
- // static
- void QueryPlannerIXSelect::stripInvalidAssignmentsToTextIndexes(
- MatchExpression* node,
- const vector<IndexEntry>& indices) {
+// static
+void QueryPlannerIXSelect::stripInvalidAssignmentsToTextIndexes(MatchExpression* node,
+ const vector<IndexEntry>& indices) {
+ for (size_t i = 0; i < indices.size(); ++i) {
+ const IndexEntry& index = indices[i];
- for (size_t i = 0; i < indices.size(); ++i) {
- const IndexEntry& index = indices[i];
+ // We only care about text indices.
+ if (INDEX_TEXT != index.type) {
+ continue;
+ }
- // We only care about text indices.
- if (INDEX_TEXT != index.type) {
- continue;
- }
+ // Gather the set of paths that comprise the index prefix for this text index.
+ // Each of those paths must have an equality assignment, otherwise we can't assign
+ // *anything* to this index.
+ unordered_set<StringData, StringData::Hasher> textIndexPrefixPaths;
+ BSONObjIterator it(index.keyPattern);
- // Gather the set of paths that comprise the index prefix for this text index.
- // Each of those paths must have an equality assignment, otherwise we can't assign
- // *anything* to this index.
- unordered_set<StringData, StringData::Hasher> textIndexPrefixPaths;
- BSONObjIterator it(index.keyPattern);
-
- // We stop when we see the first string in the key pattern. We know that
- // the prefix precedes "text".
- for (BSONElement elt = it.next(); elt.type() != String; elt = it.next()) {
- textIndexPrefixPaths.insert(elt.fieldName());
- verify(it.more());
- }
+ // We stop when we see the first string in the key pattern. We know that
+ // the prefix precedes "text".
+ for (BSONElement elt = it.next(); elt.type() != String; elt = it.next()) {
+ textIndexPrefixPaths.insert(elt.fieldName());
+ verify(it.more());
+ }
- // If the index prefix is non-empty, remove invalid assignments to it.
- if (!textIndexPrefixPaths.empty()) {
- stripInvalidAssignmentsToTextIndex(node, i, textIndexPrefixPaths);
- }
+ // If the index prefix is non-empty, remove invalid assignments to it.
+ if (!textIndexPrefixPaths.empty()) {
+ stripInvalidAssignmentsToTextIndex(node, i, textIndexPrefixPaths);
}
}
+}
+
+//
+// 2dsphere V2 sparse quirks
+//
+
+static void stripInvalidAssignmentsTo2dsphereIndex(MatchExpression* node, size_t idx) {
+ if (Indexability::nodeCanUseIndexOnOwnField(node) &&
+ MatchExpression::GEO != node->matchType() &&
+ MatchExpression::GEO_NEAR != node->matchType()) {
+ // We found a non-geo predicate tagged to use a V2 2dsphere index which is not
+ // and-related to a geo predicate that can use the index.
+ removeIndexRelevantTag(node, idx);
+ return;
+ }
- //
- // 2dsphere V2 sparse quirks
- //
+ const MatchExpression::MatchType nodeType = node->matchType();
- static void stripInvalidAssignmentsTo2dsphereIndex(MatchExpression* node, size_t idx) {
+ // Don't bother peeking inside of negations.
+ if (MatchExpression::NOT == nodeType || MatchExpression::NOR == nodeType) {
+ return;
+ }
- if (Indexability::nodeCanUseIndexOnOwnField(node)
- && MatchExpression::GEO != node->matchType()
- && MatchExpression::GEO_NEAR != node->matchType()) {
- // We found a non-geo predicate tagged to use a V2 2dsphere index which is not
- // and-related to a geo predicate that can use the index.
- removeIndexRelevantTag(node, idx);
- return;
+ if (MatchExpression::AND != nodeType) {
+ // It's an OR or some kind of array operator.
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx);
}
+ return;
+ }
- const MatchExpression::MatchType nodeType = node->matchType();
+ bool hasGeoField = false;
- // Don't bother peeking inside of negations.
- if (MatchExpression::NOT == nodeType || MatchExpression::NOR == nodeType) {
- return;
- }
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ MatchExpression* child = node->getChild(i);
+ RelevantTag* tag = static_cast<RelevantTag*>(child->getTag());
- if (MatchExpression::AND != nodeType) {
- // It's an OR or some kind of array operator.
- for (size_t i = 0; i < node->numChildren(); ++i) {
- stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx);
- }
- return;
+ if (NULL == tag) {
+ // 'child' could be a logical operator. Maybe there are some assignments hiding
+ // inside.
+ stripInvalidAssignmentsTo2dsphereIndex(child, idx);
+ continue;
}
- bool hasGeoField = false;
+ bool inFirst = tag->first.end() != std::find(tag->first.begin(), tag->first.end(), idx);
- for (size_t i = 0; i < node->numChildren(); ++i) {
- MatchExpression* child = node->getChild(i);
- RelevantTag* tag = static_cast<RelevantTag*>(child->getTag());
+ bool inNotFirst =
+ tag->notFirst.end() != std::find(tag->notFirst.begin(), tag->notFirst.end(), idx);
- if (NULL == tag) {
- // 'child' could be a logical operator. Maybe there are some assignments hiding
- // inside.
- stripInvalidAssignmentsTo2dsphereIndex(child, idx);
- continue;
- }
-
- bool inFirst = tag->first.end() != std::find(tag->first.begin(),
- tag->first.end(),
- idx);
-
- bool inNotFirst = tag->notFirst.end() != std::find(tag->notFirst.begin(),
- tag->notFirst.end(),
- idx);
-
- // If there is an index assignment...
- if (inFirst || inNotFirst) {
- // And it's a geo predicate...
- if (MatchExpression::GEO == child->matchType() ||
- MatchExpression::GEO_NEAR == child->matchType()) {
-
- hasGeoField = true;
- }
- }
- else {
- // Recurse on the children to ensure that they're not hiding any assignments
- // to idx.
- stripInvalidAssignmentsTo2dsphereIndex(child, idx);
+ // If there is an index assignment...
+ if (inFirst || inNotFirst) {
+ // And it's a geo predicate...
+ if (MatchExpression::GEO == child->matchType() ||
+ MatchExpression::GEO_NEAR == child->matchType()) {
+ hasGeoField = true;
}
+ } else {
+ // Recurse on the children to ensure that they're not hiding any assignments
+ // to idx.
+ stripInvalidAssignmentsTo2dsphereIndex(child, idx);
}
+ }
- // If there isn't a geo predicate our results aren't a subset of what's in the geo index, so
- // if we use the index we'll miss results.
- if (!hasGeoField) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx);
- }
+ // If there isn't a geo predicate our results aren't a subset of what's in the geo index, so
+ // if we use the index we'll miss results.
+ if (!hasGeoField) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx);
}
}
+}
- // static
- void QueryPlannerIXSelect::stripInvalidAssignmentsTo2dsphereIndices(
- MatchExpression* node,
- const vector<IndexEntry>& indices) {
-
- for (size_t i = 0; i < indices.size(); ++i) {
- const IndexEntry& index = indices[i];
+// static
+void QueryPlannerIXSelect::stripInvalidAssignmentsTo2dsphereIndices(
+ MatchExpression* node, const vector<IndexEntry>& indices) {
+ for (size_t i = 0; i < indices.size(); ++i) {
+ const IndexEntry& index = indices[i];
- // We only worry about 2dsphere indices.
- if (INDEX_2DSPHERE != index.type) {
- continue;
- }
+ // We only worry about 2dsphere indices.
+ if (INDEX_2DSPHERE != index.type) {
+ continue;
+ }
- // They also have to be V2. Both ignore the sparse flag but V1 is
- // never-sparse, V2 geo-sparse.
- BSONElement elt = index.infoObj["2dsphereIndexVersion"];
- if (elt.eoo()) {
- continue;
- }
- if (!elt.isNumber()) {
- continue;
- }
- if (2 != elt.numberInt()) {
- continue;
- }
+ // They also have to be V2. Both ignore the sparse flag but V1 is
+ // never-sparse, V2 geo-sparse.
+ BSONElement elt = index.infoObj["2dsphereIndexVersion"];
+ if (elt.eoo()) {
+ continue;
+ }
+ if (!elt.isNumber()) {
+ continue;
+ }
+ if (2 != elt.numberInt()) {
+ continue;
+ }
- // If every field is geo don't bother doing anything.
- bool allFieldsGeo = true;
- BSONObjIterator it(index.keyPattern);
- while (it.more()) {
- BSONElement elt = it.next();
- if (String != elt.type()) {
- allFieldsGeo = false;
- break;
- }
- }
- if (allFieldsGeo) {
- continue;
+ // If every field is geo don't bother doing anything.
+ bool allFieldsGeo = true;
+ BSONObjIterator it(index.keyPattern);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ if (String != elt.type()) {
+ allFieldsGeo = false;
+ break;
}
-
- // Remove bad assignments from this index.
- stripInvalidAssignmentsTo2dsphereIndex(node, i);
}
+ if (allFieldsGeo) {
+ continue;
+ }
+
+ // Remove bad assignments from this index.
+ stripInvalidAssignmentsTo2dsphereIndex(node, i);
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/planner_ixselect.h b/src/mongo/db/query/planner_ixselect.h
index ad5912222a1..bbef9748d3a 100644
--- a/src/mongo/db/query/planner_ixselect.h
+++ b/src/mongo/db/query/planner_ixselect.h
@@ -34,151 +34,149 @@
namespace mongo {
+/**
+ * Methods for determining what fields and predicates can use indices.
+ */
+class QueryPlannerIXSelect {
+public:
/**
- * Methods for determining what fields and predicates can use indices.
+ * Return all the fields in the tree rooted at 'node' that we can use an index on
+ * in order to answer the query.
+ *
+ * The 'prefix' argument is a path prefix to be prepended to any fields mentioned in
+ * predicates encountered. Some array operators specify a path prefix.
*/
- class QueryPlannerIXSelect {
- public:
- /**
- * Return all the fields in the tree rooted at 'node' that we can use an index on
- * in order to answer the query.
- *
- * The 'prefix' argument is a path prefix to be prepended to any fields mentioned in
- * predicates encountered. Some array operators specify a path prefix.
- */
- static void getFields(const MatchExpression* node,
- std::string prefix,
- unordered_set<std::string>* out);
+ static void getFields(const MatchExpression* node,
+ std::string prefix,
+ unordered_set<std::string>* out);
- /**
- * Find all indices prefixed by fields we have predicates over. Only these indices are
- * useful in answering the query.
- */
- static void findRelevantIndices(const unordered_set<std::string>& fields,
- const std::vector<IndexEntry>& indices,
- std::vector<IndexEntry>* out);
+ /**
+ * Find all indices prefixed by fields we have predicates over. Only these indices are
+ * useful in answering the query.
+ */
+ static void findRelevantIndices(const unordered_set<std::string>& fields,
+ const std::vector<IndexEntry>& indices,
+ std::vector<IndexEntry>* out);
- /**
- * Return true if the index key pattern field 'elt' (which belongs to 'index') can be used
- * to answer the predicate 'node'.
- *
- * For example, {field: "hashed"} can only be used with sets of equalities.
- * {field: "2d"} can only be used with some geo predicates.
- * {field: "2dsphere"} can only be used with some other geo predicates.
- */
- static bool compatible(const BSONElement& elt,
- const IndexEntry& index,
- MatchExpression* node);
+ /**
+ * Return true if the index key pattern field 'elt' (which belongs to 'index') can be used
+ * to answer the predicate 'node'.
+ *
+ * For example, {field: "hashed"} can only be used with sets of equalities.
+ * {field: "2d"} can only be used with some geo predicates.
+ * {field: "2dsphere"} can only be used with some other geo predicates.
+ */
+ static bool compatible(const BSONElement& elt, const IndexEntry& index, MatchExpression* node);
+
+ /**
+ * Determine how useful all of our relevant 'indices' are to all predicates in the subtree
+ * rooted at 'node'. Affixes a RelevantTag to all predicate nodes which can use an index.
+ *
+ * 'prefix' is a path prefix that should be prepended to any path (certain array operators
+ * imply a path prefix).
+ *
+ * For an index to be useful to a predicate, the index must be compatible (see above).
+ *
+ * If an index is prefixed by the predicate's path, it's always useful.
+ *
+ * If an index is compound but not prefixed by a predicate's path, it's only useful if
+ * there exists another predicate that 1. will use that index and 2. is related to the
+ * original predicate by having an AND as a parent.
+ */
+ static void rateIndices(MatchExpression* node,
+ std::string prefix,
+ const std::vector<IndexEntry>& indices);
- /**
- * Determine how useful all of our relevant 'indices' are to all predicates in the subtree
- * rooted at 'node'. Affixes a RelevantTag to all predicate nodes which can use an index.
- *
- * 'prefix' is a path prefix that should be prepended to any path (certain array operators
- * imply a path prefix).
- *
- * For an index to be useful to a predicate, the index must be compatible (see above).
- *
- * If an index is prefixed by the predicate's path, it's always useful.
- *
- * If an index is compound but not prefixed by a predicate's path, it's only useful if
- * there exists another predicate that 1. will use that index and 2. is related to the
- * original predicate by having an AND as a parent.
- */
- static void rateIndices(MatchExpression* node,
- std::string prefix,
- const std::vector<IndexEntry>& indices);
+ /**
+ * Amend the RelevantTag lists for all predicates in the subtree rooted at 'node' to remove
+ * invalid assignments to text and geo indices.
+ *
+ * See the body of this function and the specific stripInvalidAssignments functions for details.
+ */
+ static void stripInvalidAssignments(MatchExpression* node,
+ const std::vector<IndexEntry>& indices);
- /**
- * Amend the RelevantTag lists for all predicates in the subtree rooted at 'node' to remove
- * invalid assignments to text and geo indices.
- *
- * See the body of this function and the specific stripInvalidAssignments functions for details.
- */
- static void stripInvalidAssignments(MatchExpression* node,
- const std::vector<IndexEntry>& indices);
+ /**
+ * In some special cases, we can strip most of the index assignments from the tree early
+ * on. Specifically, if we find an AND which has a child tagged for equality over a
+ * single-field unique index, then all other predicate-to-index assignments can be
+ * stripped off the subtree rooted at 'node'.
+ *
+ * This is used to ensure that we always favor key-value lookup plans over any
+ * more complex plan.
+ *
+ * Example:
+ * Suppose you have match expression OR (AND (a==1, b==2), AND (c==3, d==4)).
+ * There are indices on fields, 'a', 'b', 'c', and 'd'. The index on 'd' is
+ * the only unique index.
+ *
+ * This code will find that the subtree AND (c==3, d==4) can be answered by
+ * looking up the value of 'd' in the unique index. Since no better plan than
+ * a single key lookup is ever available, all assignments in this subtree
+ * are stripped, except for the assignment of d==4 to the unique 'd' index.
+ *
+ * Stripping the assignment for 'c' causes the planner to generate just two
+ * possible plans:
+ * 1) an OR of an index scan over 'a' and an index scan over 'd'
+ * 2) an OR of an index scan over 'b' and an index scan over 'd'
+ */
+ static void stripUnneededAssignments(MatchExpression* node,
+ const std::vector<IndexEntry>& indices);
- /**
- * In some special cases, we can strip most of the index assignments from the tree early
- * on. Specifically, if we find an AND which has a child tagged for equality over a
- * single-field unique index, then all other predicate-to-index assignments can be
- * stripped off the subtree rooted at 'node'.
- *
- * This is used to ensure that we always favor key-value lookup plans over any
- * more complex plan.
- *
- * Example:
- * Suppose you have match expression OR (AND (a==1, b==2), AND (c==3, d==4)).
- * There are indices on fields, 'a', 'b', 'c', and 'd'. The index on 'd' is
- * the only unique index.
- *
- * This code will find that the subtree AND (c==3, d==4) can be answered by
- * looking up the value of 'd' in the unique index. Since no better plan than
- * a single key lookup is ever available, all assignments in this subtree
- * are stripped, except for the assignment of d==4 to the unique 'd' index.
- *
- * Stripping the assignment for 'c' causes the planner to generate just two
- * possible plans:
- * 1) an OR of an index scan over 'a' and an index scan over 'd'
- * 2) an OR of an index scan over 'b' and an index scan over 'd'
- */
- static void stripUnneededAssignments(MatchExpression* node,
- const std::vector<IndexEntry>& indices);
+private:
+ /**
+ * Amend the RelevantTag lists for all predicates in the subtree rooted at 'node' to remove
+ * invalid assignments to text indexes.
+ *
+ * A predicate on a field from a compound text index with a non-empty index prefix
+ * (e.g. pred {a: 1, b: 1} on index {a: 1, b: 1, c: "text"}) is only considered valid to
+ * assign to the text index if it is a direct child of an AND with the following properties:
+ * - it has a TEXT child
+ * - for every index prefix component, it has an EQ child on that component's path
+ *
+ * Note that compatible() enforces the precondition that only EQ nodes are considered
+ * relevant to text index prefixes.
+ * If there is a relevant compound text index with a non-empty "index prefix" (e.g. the
+ * prefix {a: 1, b: 1} for the index {a: 1, b: 1, c: "text"}), amend the RelevantTag(s)
+ * created above to remove assignments to the text index where the query does not have
+ * predicates over each indexed field of the prefix.
+ *
+ * This is necessary because text indices do not obey the normal rules of sparseness, in
+ * that they generate no index keys for documents without indexable text data in at least
+ * one text field (in fact, text indices ignore the sparse option entirely). For example,
+ * given the text index {a: 1, b: 1, c: "text"}:
+ *
+ * - Document {a: 1, b: 6, c: "hello world"} generates 2 index keys
+ * - Document {a: 1, b: 7, c: {d: 1}} generates 0 index keys
+ * - Document {a: 1, b: 8} generates 0 index keys
+ *
+ * As a result, the query {a: 1} *cannot* be satisfied by the text index {a: 1, b: 1, c:
+ * "text"}, since documents without indexed text data would not be returned by the query.
+ * rateIndices() above will eagerly annotate the pred {a: 1} as relevant to the text index;
+ * those annotations get removed here.
+ */
+ static void stripInvalidAssignmentsToTextIndexes(MatchExpression* node,
+ const std::vector<IndexEntry>& indices);
- private:
- /**
- * Amend the RelevantTag lists for all predicates in the subtree rooted at 'node' to remove
- * invalid assignments to text indexes.
- *
- * A predicate on a field from a compound text index with a non-empty index prefix
- * (e.g. pred {a: 1, b: 1} on index {a: 1, b: 1, c: "text"}) is only considered valid to
- * assign to the text index if it is a direct child of an AND with the following properties:
- * - it has a TEXT child
- * - for every index prefix component, it has an EQ child on that component's path
- *
- * Note that compatible() enforces the precondition that only EQ nodes are considered
- * relevant to text index prefixes.
- * If there is a relevant compound text index with a non-empty "index prefix" (e.g. the
- * prefix {a: 1, b: 1} for the index {a: 1, b: 1, c: "text"}), amend the RelevantTag(s)
- * created above to remove assignments to the text index where the query does not have
- * predicates over each indexed field of the prefix.
- *
- * This is necessary because text indices do not obey the normal rules of sparseness, in
- * that they generate no index keys for documents without indexable text data in at least
- * one text field (in fact, text indices ignore the sparse option entirely). For example,
- * given the text index {a: 1, b: 1, c: "text"}:
- *
- * - Document {a: 1, b: 6, c: "hello world"} generates 2 index keys
- * - Document {a: 1, b: 7, c: {d: 1}} generates 0 index keys
- * - Document {a: 1, b: 8} generates 0 index keys
- *
- * As a result, the query {a: 1} *cannot* be satisfied by the text index {a: 1, b: 1, c:
- * "text"}, since documents without indexed text data would not be returned by the query.
- * rateIndices() above will eagerly annotate the pred {a: 1} as relevant to the text index;
- * those annotations get removed here.
- */
- static void stripInvalidAssignmentsToTextIndexes(MatchExpression* node,
+ /**
+ * For V1 2dsphere indices we ignore the sparse option. As such we can use an index
+ * like {nongeo: 1, geo: "2dsphere"} to answer queries only involving nongeo.
+ *
+ * For V2 2dsphere indices also ignore the sparse flag but indexing behavior as compared to
+ * V1 is different. If all of the geo fields are missing from the document we do not index
+ * it. As such we cannot use V2 sparse indices unless we have a predicate over a geo
+ * field.
+ *
+ * 2dsphere indices V2 are "geo-sparse." That is, if there aren't any geo-indexed fields in
+ * a document it won't be indexed. As such we can't use an index like {foo:1, geo:
+ * "2dsphere"} to answer a query on 'foo' if the index is V2 as it will not contain the
+ * document {foo:1}.
+ *
+ * We *can* use it to answer a query on 'foo' if the predicate on 'foo' is AND-related to a
+ * predicate on every geo field in the index.
+ */
+ static void stripInvalidAssignmentsTo2dsphereIndices(MatchExpression* node,
const std::vector<IndexEntry>& indices);
-
- /**
- * For V1 2dsphere indices we ignore the sparse option. As such we can use an index
- * like {nongeo: 1, geo: "2dsphere"} to answer queries only involving nongeo.
- *
- * For V2 2dsphere indices also ignore the sparse flag but indexing behavior as compared to
- * V1 is different. If all of the geo fields are missing from the document we do not index
- * it. As such we cannot use V2 sparse indices unless we have a predicate over a geo
- * field.
- *
- * 2dsphere indices V2 are "geo-sparse." That is, if there aren't any geo-indexed fields in
- * a document it won't be indexed. As such we can't use an index like {foo:1, geo:
- * "2dsphere"} to answer a query on 'foo' if the index is V2 as it will not contain the
- * document {foo:1}.
- *
- * We *can* use it to answer a query on 'foo' if the predicate on 'foo' is AND-related to a
- * predicate on every geo field in the index.
- */
- static void stripInvalidAssignmentsTo2dsphereIndices(MatchExpression* node,
- const std::vector<IndexEntry>& indices);
- };
+};
} // namespace mongo
diff --git a/src/mongo/db/query/planner_ixselect_test.cpp b/src/mongo/db/query/planner_ixselect_test.cpp
index ba1f11e93d0..555a9db11ea 100644
--- a/src/mongo/db/query/planner_ixselect_test.cpp
+++ b/src/mongo/db/query/planner_ixselect_test.cpp
@@ -43,227 +43,230 @@ using namespace mongo;
namespace {
- using std::unique_ptr;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::string;
+using std::vector;
- /**
- * Utility function to create MatchExpression
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- ASSERT_TRUE(status.isOK());
- MatchExpression* expr(status.getValue());
- return expr;
- }
+/**
+ * Utility function to create MatchExpression
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ ASSERT_TRUE(status.isOK());
+ MatchExpression* expr(status.getValue());
+ return expr;
+}
- /**
- * Utility function to join elements in iterator range with comma
- */
- template <typename Iter> string toString(Iter begin, Iter end) {
- mongoutils::str::stream ss;
- ss << "[";
- for (Iter i = begin; i != end; i++) {
- if (i != begin) {
- ss << " ";
- }
- ss << *i;
+/**
+ * Utility function to join elements in iterator range with comma
+ */
+template <typename Iter>
+string toString(Iter begin, Iter end) {
+ mongoutils::str::stream ss;
+ ss << "[";
+ for (Iter i = begin; i != end; i++) {
+ if (i != begin) {
+ ss << " ";
}
- ss << "]";
- return ss;
+ ss << *i;
}
+ ss << "]";
+ return ss;
+}
- /**
- * Test function for getFields()
- * Parses query string to obtain MatchExpression which is passed together with prefix
- * to QueryPlannerIXSelect::getFields()
- * Results are compared with expected fields (parsed from expectedFieldsStr)
- */
- void testGetFields(const char* query, const char* prefix, const char* expectedFieldsStr) {
- BSONObj obj = fromjson(query);
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- unordered_set<string> fields;
- QueryPlannerIXSelect::getFields(expr.get(), prefix, &fields);
-
- // Verify results
- // First, check that results contain a superset of expected fields.
- vector<string> expectedFields = StringSplitter::split(expectedFieldsStr, ",");
- for (vector<string>::const_iterator i = expectedFields.begin(); i != expectedFields.end();
- i++) {
- if (fields.find(*i) == fields.end()) {
- mongoutils::str::stream ss;
- ss << "getFields(query=" << query << ", prefix=" << prefix << "): unable to find "
- << *i << " in result: " << toString(fields.begin(), fields.end());
- FAIL(ss);
- }
- }
+/**
+ * Test function for getFields()
+ * Parses query string to obtain MatchExpression which is passed together with prefix
+ * to QueryPlannerIXSelect::getFields()
+ * Results are compared with expected fields (parsed from expectedFieldsStr)
+ */
+void testGetFields(const char* query, const char* prefix, const char* expectedFieldsStr) {
+ BSONObj obj = fromjson(query);
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ unordered_set<string> fields;
+ QueryPlannerIXSelect::getFields(expr.get(), prefix, &fields);
- // Next, confirm that results do not contain any unexpected fields.
- if (fields.size() != expectedFields.size()) {
+ // Verify results
+ // First, check that results contain a superset of expected fields.
+ vector<string> expectedFields = StringSplitter::split(expectedFieldsStr, ",");
+ for (vector<string>::const_iterator i = expectedFields.begin(); i != expectedFields.end();
+ i++) {
+ if (fields.find(*i) == fields.end()) {
mongoutils::str::stream ss;
- ss << "getFields(query=" << query << ", prefix=" << prefix
- << "): unexpected fields in result. expected: "
- << toString(expectedFields.begin(), expectedFields.end())
- << ". actual: " << toString(fields.begin(), fields.end());
+ ss << "getFields(query=" << query << ", prefix=" << prefix << "): unable to find " << *i
+ << " in result: " << toString(fields.begin(), fields.end());
FAIL(ss);
}
}
- /**
- * Basic test cases for getFields()
- * Includes logical operators
- */
- TEST(QueryPlannerIXSelectTest, GetFieldsBasic) {
- // Arguments to test function: query, prefix, comma-delimited list of expected fields
- testGetFields("{}", "", "");
- testGetFields("{a: 1}", "", "a");
- testGetFields("{a: 1}", "c.", "c.a");
- testGetFields("{a: 1, b: 1}", "", "a,b");
- testGetFields("{a: {$in: [1]}}", "", "a");
- testGetFields("{$or: [{a: 1}, {b: 1}]}", "", "a,b");
+ // Next, confirm that results do not contain any unexpected fields.
+ if (fields.size() != expectedFields.size()) {
+ mongoutils::str::stream ss;
+ ss << "getFields(query=" << query << ", prefix=" << prefix
+ << "): unexpected fields in result. expected: "
+ << toString(expectedFields.begin(), expectedFields.end())
+ << ". actual: " << toString(fields.begin(), fields.end());
+ FAIL(ss);
}
+}
- /**
- * Array test cases for getFields
- */
- TEST(QueryPlannerIXSelectTest, GetFieldsArray) {
- testGetFields("{a: {$elemMatch: {b: 1}}}", "", "a.b");
- testGetFields("{a: {$all: [{$elemMatch: {b: 1}}]}}", "", "a.b");
- }
+/**
+ * Basic test cases for getFields()
+ * Includes logical operators
+ */
+TEST(QueryPlannerIXSelectTest, GetFieldsBasic) {
+ // Arguments to test function: query, prefix, comma-delimited list of expected fields
+ testGetFields("{}", "", "");
+ testGetFields("{a: 1}", "", "a");
+ testGetFields("{a: 1}", "c.", "c.a");
+ testGetFields("{a: 1, b: 1}", "", "a,b");
+ testGetFields("{a: {$in: [1]}}", "", "a");
+ testGetFields("{$or: [{a: 1}, {b: 1}]}", "", "a,b");
+}
- /**
- * Negation test cases for getFields()
- * $ne, $nin, $nor
- */
- TEST(QueryPlannerIXSelectTest, GetFieldsNegation) {
- testGetFields("{a: {$ne: 1}}", "", "a");
- testGetFields("{a: {$nin: [1]}}", "", "a");
- testGetFields("{$nor: [{a: 1}, {b: 1}]}", "", "");
- testGetFields("{$and: [{a: 1}, {a: {$ne: 2}}]}", "", "a");
- }
+/**
+ * Array test cases for getFields
+ */
+TEST(QueryPlannerIXSelectTest, GetFieldsArray) {
+ testGetFields("{a: {$elemMatch: {b: 1}}}", "", "a.b");
+ testGetFields("{a: {$all: [{$elemMatch: {b: 1}}]}}", "", "a.b");
+}
- /**
- * Array negation test cases for getFields
- */
- TEST(QueryPlannerIXSelectTest, GetFieldsArrayNegation) {
- testGetFields("{a: {$elemMatch: {b: {$ne: 1}}}}", "", "a.b");
- testGetFields("{a: {$all: [{$elemMatch: {b: {$ne: 1}}}]}}", "", "a.b");
- }
+/**
+ * Negation test cases for getFields()
+ * $ne, $nin, $nor
+ */
+TEST(QueryPlannerIXSelectTest, GetFieldsNegation) {
+ testGetFields("{a: {$ne: 1}}", "", "a");
+ testGetFields("{a: {$nin: [1]}}", "", "a");
+ testGetFields("{$nor: [{a: 1}, {b: 1}]}", "", "");
+ testGetFields("{$and: [{a: 1}, {a: {$ne: 2}}]}", "", "a");
+}
- /**
- * Performs a pre-order traversal of expression tree. Validates
- * that all tagged nodes contain an instance of RelevantTag.
- */
- void findRelevantTaggedNodePaths(MatchExpression* root, vector<string>* paths) {
- MatchExpression::TagData* tag = root->getTag();
- if (tag) {
- StringBuilder buf;
- tag->debugString(&buf);
- RelevantTag* r = dynamic_cast<RelevantTag*>(tag);
- if (!r) {
- mongoutils::str::stream ss;
- ss << "tag is not instance of RelevantTag. tree: " << root->toString()
- << "; tag: " << buf.str();
- FAIL(ss);
- }
- paths->push_back(r->path);
- }
- for (size_t i = 0; i < root->numChildren(); ++i) {
- findRelevantTaggedNodePaths(root->getChild(i), paths);
+/**
+ * Array negation test cases for getFields
+ */
+TEST(QueryPlannerIXSelectTest, GetFieldsArrayNegation) {
+ testGetFields("{a: {$elemMatch: {b: {$ne: 1}}}}", "", "a.b");
+ testGetFields("{a: {$all: [{$elemMatch: {b: {$ne: 1}}}]}}", "", "a.b");
+}
+
+/**
+ * Performs a pre-order traversal of expression tree. Validates
+ * that all tagged nodes contain an instance of RelevantTag.
+ */
+void findRelevantTaggedNodePaths(MatchExpression* root, vector<string>* paths) {
+ MatchExpression::TagData* tag = root->getTag();
+ if (tag) {
+ StringBuilder buf;
+ tag->debugString(&buf);
+ RelevantTag* r = dynamic_cast<RelevantTag*>(tag);
+ if (!r) {
+ mongoutils::str::stream ss;
+ ss << "tag is not instance of RelevantTag. tree: " << root->toString()
+ << "; tag: " << buf.str();
+ FAIL(ss);
}
+ paths->push_back(r->path);
+ }
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ findRelevantTaggedNodePaths(root->getChild(i), paths);
}
-
- /**
- * Parses a MatchExpression from query string and passes that along with
- * prefix to rateIndices.
- * Verifies results against list of expected paths.
- * For now, we're only interested in which nodes are tagged.
- * In future, we may expand this test function to include
- * validate which indices are assigned to a node.
- */
- void testRateIndicesTaggedNodePaths(const char* query, const char* prefix,
- const char* expectedPathsStr) {
- // Parse and rate query. Some of the nodes in the rated tree
- // will be tagged after the rating process.
- BSONObj obj = fromjson(query);
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+}
- // Currently, we tag every indexable node even when no compatible
- // index is available. Hence, it is fine to pass an empty vector of
- // indices to rateIndices().
- vector<IndexEntry> indices;
- QueryPlannerIXSelect::rateIndices(expr.get(), prefix, indices);
+/**
+ * Parses a MatchExpression from query string and passes that along with
+ * prefix to rateIndices.
+ * Verifies results against list of expected paths.
+ * For now, we're only interested in which nodes are tagged.
+ * In future, we may expand this test function to include
+ * validate which indices are assigned to a node.
+ */
+void testRateIndicesTaggedNodePaths(const char* query,
+ const char* prefix,
+ const char* expectedPathsStr) {
+ // Parse and rate query. Some of the nodes in the rated tree
+ // will be tagged after the rating process.
+ BSONObj obj = fromjson(query);
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- // Retrieve a list of paths embedded in
- // tagged nodes.
- vector<string> paths;
- findRelevantTaggedNodePaths(expr.get(), &paths);
+ // Currently, we tag every indexable node even when no compatible
+ // index is available. Hence, it is fine to pass an empty vector of
+ // indices to rateIndices().
+ vector<IndexEntry> indices;
+ QueryPlannerIXSelect::rateIndices(expr.get(), prefix, indices);
- // Compare with expected list of paths.
- // First verify number of paths retrieved.
- vector<string> expectedPaths = StringSplitter::split(expectedPathsStr, ",");
- if (paths.size() != expectedPaths.size()) {
- mongoutils::str::stream ss;
- ss << "rateIndices(query=" << query << ", prefix=" << prefix
- << "): unexpected number of tagged nodes found. expected: "
- << toString(expectedPaths.begin(), expectedPaths.end()) << ". actual: "
- << toString(paths.begin(), paths.end());
- FAIL(ss);
- }
+ // Retrieve a list of paths embedded in
+ // tagged nodes.
+ vector<string> paths;
+ findRelevantTaggedNodePaths(expr.get(), &paths);
- // Next, check that value and order of each element match between the two lists.
- for (vector<string>::const_iterator i = paths.begin(), j = expectedPaths.begin();
- i != paths.end(); i++, j++) {
- if (*i == *j) {
- continue;
- }
- mongoutils::str::stream ss;
- ss << "rateIndices(query=" << query << ", prefix=" << prefix
- << "): unexpected path found. expected: " << *j << " "
- << toString(expectedPaths.begin(), expectedPaths.end()) << ". actual: "
- << *i << " " << toString(paths.begin(), paths.end());
- FAIL(ss);
- }
+ // Compare with expected list of paths.
+ // First verify number of paths retrieved.
+ vector<string> expectedPaths = StringSplitter::split(expectedPathsStr, ",");
+ if (paths.size() != expectedPaths.size()) {
+ mongoutils::str::stream ss;
+ ss << "rateIndices(query=" << query << ", prefix=" << prefix
+ << "): unexpected number of tagged nodes found. expected: "
+ << toString(expectedPaths.begin(), expectedPaths.end())
+ << ". actual: " << toString(paths.begin(), paths.end());
+ FAIL(ss);
}
- /**
- * Basic test cases for rateIndices().
- * Includes logical operators.
- */
- TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathsBasic) {
- // Test arguments: query, prefix, comma-delimited list of expected paths
- testRateIndicesTaggedNodePaths("{}", "", "");
- testRateIndicesTaggedNodePaths("{a: 1}", "", "a");
- testRateIndicesTaggedNodePaths("{a: 1}", "c.", "c.a");
- testRateIndicesTaggedNodePaths("{a: 1, b: 1}", "", "a,b");
- testRateIndicesTaggedNodePaths("{a: {$in: [1]}}", "", "a");
- testRateIndicesTaggedNodePaths("{$or: [{a: 1}, {b: 1}]}", "", "a,b");
+ // Next, check that value and order of each element match between the two lists.
+ for (vector<string>::const_iterator i = paths.begin(), j = expectedPaths.begin();
+ i != paths.end();
+ i++, j++) {
+ if (*i == *j) {
+ continue;
+ }
+ mongoutils::str::stream ss;
+ ss << "rateIndices(query=" << query << ", prefix=" << prefix
+ << "): unexpected path found. expected: " << *j << " "
+ << toString(expectedPaths.begin(), expectedPaths.end()) << ". actual: " << *i << " "
+ << toString(paths.begin(), paths.end());
+ FAIL(ss);
}
+}
- /**
- * Array test cases for rateIndices().
- */
- TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathArray) {
- testRateIndicesTaggedNodePaths("{a: {$elemMatch: {b: 1}}}", "", "a.b");
- testRateIndicesTaggedNodePaths("{a: {$all: [{$elemMatch: {b: 1}}]}}", "", "a.b");
- }
+/**
+ * Basic test cases for rateIndices().
+ * Includes logical operators.
+ */
+TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathsBasic) {
+ // Test arguments: query, prefix, comma-delimited list of expected paths
+ testRateIndicesTaggedNodePaths("{}", "", "");
+ testRateIndicesTaggedNodePaths("{a: 1}", "", "a");
+ testRateIndicesTaggedNodePaths("{a: 1}", "c.", "c.a");
+ testRateIndicesTaggedNodePaths("{a: 1, b: 1}", "", "a,b");
+ testRateIndicesTaggedNodePaths("{a: {$in: [1]}}", "", "a");
+ testRateIndicesTaggedNodePaths("{$or: [{a: 1}, {b: 1}]}", "", "a,b");
+}
- /**
- * Negation test cases for rateIndices().
- */
- TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathsNegation) {
- testRateIndicesTaggedNodePaths("{a: {$ne: 1}}", "", "a,a");
- testRateIndicesTaggedNodePaths("{a: {$nin: [1]}}", "", "a,a");
- testRateIndicesTaggedNodePaths("{$nor: [{a: 1}, {b: 1}]}", "", "");
- testRateIndicesTaggedNodePaths("{$and: [{a: 1}, {a: {$ne: 2}}]}", "", "a,a,a");
- }
+/**
+ * Array test cases for rateIndices().
+ */
+TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathArray) {
+ testRateIndicesTaggedNodePaths("{a: {$elemMatch: {b: 1}}}", "", "a.b");
+ testRateIndicesTaggedNodePaths("{a: {$all: [{$elemMatch: {b: 1}}]}}", "", "a.b");
+}
- /**
- * Array negation test cases for rateIndices().
- */
- TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathArrayNegation) {
- testRateIndicesTaggedNodePaths("{a: {$elemMatch: {b: {$ne: 1}}}}", "", "a.b,a.b");
- testRateIndicesTaggedNodePaths("{a: {$all: [{$elemMatch: {b: {$ne: 1}}}]}}", "", "a.b,a.b");
- }
+/**
+ * Negation test cases for rateIndices().
+ */
+TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathsNegation) {
+ testRateIndicesTaggedNodePaths("{a: {$ne: 1}}", "", "a,a");
+ testRateIndicesTaggedNodePaths("{a: {$nin: [1]}}", "", "a,a");
+ testRateIndicesTaggedNodePaths("{$nor: [{a: 1}, {b: 1}]}", "", "");
+ testRateIndicesTaggedNodePaths("{$and: [{a: 1}, {a: {$ne: 2}}]}", "", "a,a,a");
+}
+
+/**
+ * Array negation test cases for rateIndices().
+ */
+TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathArrayNegation) {
+ testRateIndicesTaggedNodePaths("{a: {$elemMatch: {b: {$ne: 1}}}}", "", "a.b,a.b");
+ testRateIndicesTaggedNodePaths("{a: {$all: [{$elemMatch: {b: {$ne: 1}}}]}}", "", "a.b,a.b");
+}
} // namespace
diff --git a/src/mongo/db/query/query_knobs.cpp b/src/mongo/db/query/query_knobs.cpp
index fc8ade631c5..c73759cebd4 100644
--- a/src/mongo/db/query/query_knobs.cpp
+++ b/src/mongo/db/query/query_knobs.cpp
@@ -32,40 +32,40 @@
namespace mongo {
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationWorks, int, 10000);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationWorks, int, 10000);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationCollFraction, double, 0.3);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationCollFraction, double, 0.3);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationMaxResults, int, 101);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationMaxResults, int, 101);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheSize, int, 5000);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheSize, int, 5000);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheFeedbacksStored, int, 20);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheFeedbacksStored, int, 20);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheEvictionRatio, double, 10.0);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheEvictionRatio, double, 10.0);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheWriteOpsBetweenFlush, int, 1000);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheWriteOpsBetweenFlush, int, 1000);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerMaxIndexedSolutions, int, 64);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerMaxIndexedSolutions, int, 64);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryEnumerationMaxOrSolutions, int, 10);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryEnumerationMaxOrSolutions, int, 10);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryEnumerationMaxIntersectPerAnd, int, 3);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryEnumerationMaxIntersectPerAnd, int, 3);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryForceIntersectionPlans, bool, false);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryForceIntersectionPlans, bool, false);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerEnableIndexIntersection, bool, true);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerEnableIndexIntersection, bool, true);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerEnableHashIntersection, bool, false);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerEnableHashIntersection, bool, false);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanOrChildrenIndependently, bool, true);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanOrChildrenIndependently, bool, true);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryMaxScansToExplode, int, 200);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryMaxScansToExplode, int, 200);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecMaxBlockingSortBytes, int, 32 * 1024 * 1024);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecMaxBlockingSortBytes, int, 32 * 1024 * 1024);
- // Yield every 128 cycles or 10ms.
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecYieldIterations, int, 128);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecYieldPeriodMS, int, 10);
+// Yield every 128 cycles or 10ms.
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecYieldIterations, int, 128);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecYieldPeriodMS, int, 10);
} // namespace mongo
diff --git a/src/mongo/db/query/query_knobs.h b/src/mongo/db/query/query_knobs.h
index f2b775593d0..c82d6828a29 100644
--- a/src/mongo/db/query/query_knobs.h
+++ b/src/mongo/db/query/query_knobs.h
@@ -30,78 +30,78 @@
namespace mongo {
- //
- // multi-plan ranking
- //
+//
+// multi-plan ranking
+//
- // Max number of times we call work() on plans before comparing them,
- // for small collections.
- extern int internalQueryPlanEvaluationWorks;
+// Max number of times we call work() on plans before comparing them,
+// for small collections.
+extern int internalQueryPlanEvaluationWorks;
- // For large collections, the number times we work() candidate plans is
- // taken as this fraction of the collection size.
- extern double internalQueryPlanEvaluationCollFraction;
+// For large collections, the number times we work() candidate plans is
+// taken as this fraction of the collection size.
+extern double internalQueryPlanEvaluationCollFraction;
- // Stop working plans once a plan returns this many results.
- extern int internalQueryPlanEvaluationMaxResults;
+// Stop working plans once a plan returns this many results.
+extern int internalQueryPlanEvaluationMaxResults;
- // Do we give a big ranking bonus to intersection plans?
- extern bool internalQueryForceIntersectionPlans;
+// Do we give a big ranking bonus to intersection plans?
+extern bool internalQueryForceIntersectionPlans;
- // Do we have ixisect on at all?
- extern bool internalQueryPlannerEnableIndexIntersection;
+// Do we have ixisect on at all?
+extern bool internalQueryPlannerEnableIndexIntersection;
- // Do we use hash-based intersection for rooted $and queries?
- extern bool internalQueryPlannerEnableHashIntersection;
+// Do we use hash-based intersection for rooted $and queries?
+extern bool internalQueryPlannerEnableHashIntersection;
- //
- // plan cache
- //
+//
+// plan cache
+//
- // How many entries in the cache?
- extern int internalQueryCacheSize;
+// How many entries in the cache?
+extern int internalQueryCacheSize;
- // How many feedback entries do we collect before possibly evicting from the cache based on bad
- // performance?
- extern int internalQueryCacheFeedbacksStored;
+// How many feedback entries do we collect before possibly evicting from the cache based on bad
+// performance?
+extern int internalQueryCacheFeedbacksStored;
- // How many times more works must we perform in order to justify plan cache eviction
- // and replanning?
- extern double internalQueryCacheEvictionRatio;
+// How many times more works must we perform in order to justify plan cache eviction
+// and replanning?
+extern double internalQueryCacheEvictionRatio;
- // How many write ops should we allow in a collection before tossing all cache entries?
- extern int internalQueryCacheWriteOpsBetweenFlush;
+// How many write ops should we allow in a collection before tossing all cache entries?
+extern int internalQueryCacheWriteOpsBetweenFlush;
- //
- // Planning and enumeration.
- //
+//
+// Planning and enumeration.
+//
- // How many indexed solutions will QueryPlanner::plan output?
- extern int internalQueryPlannerMaxIndexedSolutions;
+// How many indexed solutions will QueryPlanner::plan output?
+extern int internalQueryPlannerMaxIndexedSolutions;
- // How many solutions will the enumerator consider at each OR?
- extern int internalQueryEnumerationMaxOrSolutions;
+// How many solutions will the enumerator consider at each OR?
+extern int internalQueryEnumerationMaxOrSolutions;
- // How many intersections will the enumerator consider at each AND?
- extern int internalQueryEnumerationMaxIntersectPerAnd;
+// How many intersections will the enumerator consider at each AND?
+extern int internalQueryEnumerationMaxIntersectPerAnd;
- // Do we want to plan each child of the OR independently?
- extern bool internalQueryPlanOrChildrenIndependently;
+// Do we want to plan each child of the OR independently?
+extern bool internalQueryPlanOrChildrenIndependently;
- // How many index scans are we willing to produce in order to obtain a sort order
- // during explodeForSort?
- extern int internalQueryMaxScansToExplode;
+// How many index scans are we willing to produce in order to obtain a sort order
+// during explodeForSort?
+extern int internalQueryMaxScansToExplode;
- //
- // Query execution.
- //
+//
+// Query execution.
+//
- extern int internalQueryExecMaxBlockingSortBytes;
+extern int internalQueryExecMaxBlockingSortBytes;
- // Yield after this many "should yield?" checks.
- extern int internalQueryExecYieldIterations;
+// Yield after this many "should yield?" checks.
+extern int internalQueryExecYieldIterations;
- // Yield if it's been at least this many milliseconds since we last yielded.
- extern int internalQueryExecYieldPeriodMS;
+// Yield if it's been at least this many milliseconds since we last yielded.
+extern int internalQueryExecYieldPeriodMS;
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 852b705c532..6b98e0fae79 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -34,7 +34,7 @@
#include <vector>
-#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
+#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/matcher/expression_text.h"
#include "mongo/db/query/canonical_query.h"
@@ -49,880 +49,855 @@
namespace mongo {
- using std::unique_ptr;
- using std::numeric_limits;
-
- // Copied verbatim from db/index.h
- static bool isIdIndex( const BSONObj &pattern ) {
- BSONObjIterator i(pattern);
- BSONElement e = i.next();
- //_id index must have form exactly {_id : 1} or {_id : -1}.
- //Allows an index of form {_id : "hashed"} to exist but
- //do not consider it to be the primary _id index
- if(! ( strcmp(e.fieldName(), "_id") == 0
- && (e.numberInt() == 1 || e.numberInt() == -1)))
- return false;
- return i.next().eoo();
- }
+using std::unique_ptr;
+using std::numeric_limits;
+
+// Copied verbatim from db/index.h
+static bool isIdIndex(const BSONObj& pattern) {
+ BSONObjIterator i(pattern);
+ BSONElement e = i.next();
+ //_id index must have form exactly {_id : 1} or {_id : -1}.
+ // Allows an index of form {_id : "hashed"} to exist but
+ // do not consider it to be the primary _id index
+ if (!(strcmp(e.fieldName(), "_id") == 0 && (e.numberInt() == 1 || e.numberInt() == -1)))
+ return false;
+ return i.next().eoo();
+}
- static bool is2DIndex(const BSONObj& pattern) {
- BSONObjIterator it(pattern);
- while (it.more()) {
- BSONElement e = it.next();
- if (String == e.type() && str::equals("2d", e.valuestr())) {
- return true;
- }
+static bool is2DIndex(const BSONObj& pattern) {
+ BSONObjIterator it(pattern);
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (String == e.type() && str::equals("2d", e.valuestr())) {
+ return true;
}
- return false;
}
+ return false;
+}
- string optionString(size_t options) {
- mongoutils::str::stream ss;
+string optionString(size_t options) {
+ mongoutils::str::stream ss;
- // These options are all currently mutually exclusive.
- if (QueryPlannerParams::DEFAULT == options) {
- ss << "DEFAULT ";
- }
- if (options & QueryPlannerParams::NO_TABLE_SCAN) {
- ss << "NO_TABLE_SCAN ";
- }
- if (options & QueryPlannerParams::INCLUDE_COLLSCAN) {
- ss << "INCLUDE_COLLSCAN ";
- }
- if (options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- ss << "INCLUDE_SHARD_FILTER ";
- }
- if (options & QueryPlannerParams::NO_BLOCKING_SORT) {
- ss << "NO_BLOCKING_SORT ";
- }
- if (options & QueryPlannerParams::INDEX_INTERSECTION) {
- ss << "INDEX_INTERSECTION ";
- }
- if (options & QueryPlannerParams::KEEP_MUTATIONS) {
- ss << "KEEP_MUTATIONS";
- }
-
- return ss;
+ // These options are all currently mutually exclusive.
+ if (QueryPlannerParams::DEFAULT == options) {
+ ss << "DEFAULT ";
}
-
- static BSONObj getKeyFromQuery(const BSONObj& keyPattern, const BSONObj& query) {
- return query.extractFieldsUnDotted(keyPattern);
+ if (options & QueryPlannerParams::NO_TABLE_SCAN) {
+ ss << "NO_TABLE_SCAN ";
+ }
+ if (options & QueryPlannerParams::INCLUDE_COLLSCAN) {
+ ss << "INCLUDE_COLLSCAN ";
+ }
+ if (options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ ss << "INCLUDE_SHARD_FILTER ";
+ }
+ if (options & QueryPlannerParams::NO_BLOCKING_SORT) {
+ ss << "NO_BLOCKING_SORT ";
+ }
+ if (options & QueryPlannerParams::INDEX_INTERSECTION) {
+ ss << "INDEX_INTERSECTION ";
+ }
+ if (options & QueryPlannerParams::KEEP_MUTATIONS) {
+ ss << "KEEP_MUTATIONS";
}
- static bool indexCompatibleMaxMin(const BSONObj& obj, const BSONObj& keyPattern) {
- BSONObjIterator kpIt(keyPattern);
- BSONObjIterator objIt(obj);
+ return ss;
+}
- for (;;) {
- // Every element up to this point has matched so the KP matches
- if (!kpIt.more() && !objIt.more()) {
- return true;
- }
+static BSONObj getKeyFromQuery(const BSONObj& keyPattern, const BSONObj& query) {
+ return query.extractFieldsUnDotted(keyPattern);
+}
- // If only one iterator is done, it's not a match.
- if (!kpIt.more() || !objIt.more()) {
- return false;
- }
+static bool indexCompatibleMaxMin(const BSONObj& obj, const BSONObj& keyPattern) {
+ BSONObjIterator kpIt(keyPattern);
+ BSONObjIterator objIt(obj);
- // Field names must match and be in the same order.
- BSONElement kpElt = kpIt.next();
- BSONElement objElt = objIt.next();
- if (!mongoutils::str::equals(kpElt.fieldName(), objElt.fieldName())) {
- return false;
- }
+ for (;;) {
+ // Every element up to this point has matched so the KP matches
+ if (!kpIt.more() && !objIt.more()) {
+ return true;
}
- }
- static BSONObj stripFieldNames(const BSONObj& obj) {
- BSONObjIterator it(obj);
- BSONObjBuilder bob;
- while (it.more()) {
- bob.appendAs(it.next(), "");
- }
- return bob.obj();
- }
-
- /**
- * "Finishes" the min object for the $min query option by filling in an empty object with
- * MinKey/MaxKey and stripping field names.
- *
- * In the case that 'minObj' is empty, we "finish" it by filling in either MinKey or MaxKey
- * instead. Choosing whether to use MinKey or MaxKey is done by comparing against 'maxObj'.
- * For instance, suppose 'minObj' is empty, 'maxObj' is { a: 3 }, and the key pattern is
- * { a: -1 }. According to the key pattern ordering, { a: 3 } < MinKey. This means that the
- * proper resulting bounds are
- *
- * start: { '': MaxKey }, end: { '': 3 }
- *
- * as opposed to
- *
- * start: { '': MinKey }, end: { '': 3 }
- *
- * Suppose instead that the key pattern is { a: 1 }, with the same 'minObj' and 'maxObj'
- * (that is, an empty object and { a: 3 } respectively). In this case, { a: 3 } > MinKey,
- * which means that we use range [{'': MinKey}, {'': 3}]. The proper 'minObj' in this case is
- * MinKey, whereas in the previous example it was MaxKey.
- *
- * If 'minObj' is non-empty, then all we do is strip its field names (because index keys always
- * have empty field names).
- */
- static BSONObj finishMinObj(const BSONObj& kp, const BSONObj& minObj, const BSONObj& maxObj) {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- BSONObj minKey = bob.obj();
-
- if (minObj.isEmpty()) {
- if (0 > minKey.woCompare(maxObj, kp, false)) {
- BSONObjBuilder minKeyBuilder;
- minKeyBuilder.appendMinKey("");
- return minKeyBuilder.obj();
- }
- else {
- BSONObjBuilder maxKeyBuilder;
- maxKeyBuilder.appendMaxKey("");
- return maxKeyBuilder.obj();
- }
- }
- else {
- return stripFieldNames(minObj);
+ // If only one iterator is done, it's not a match.
+ if (!kpIt.more() || !objIt.more()) {
+ return false;
}
- }
- /**
- * "Finishes" the max object for the $max query option by filling in an empty object with
- * MinKey/MaxKey and stripping field names.
- *
- * See comment for finishMinObj() for why we need both 'minObj' and 'maxObj'.
- */
- static BSONObj finishMaxObj(const BSONObj& kp, const BSONObj& minObj, const BSONObj& maxObj) {
- BSONObjBuilder bob;
- bob.appendMaxKey("");
- BSONObj maxKey = bob.obj();
-
- if (maxObj.isEmpty()) {
- if (0 < maxKey.woCompare(minObj, kp, false)) {
- BSONObjBuilder maxKeyBuilder;
- maxKeyBuilder.appendMaxKey("");
- return maxKeyBuilder.obj();
- }
- else {
- BSONObjBuilder minKeyBuilder;
- minKeyBuilder.appendMinKey("");
- return minKeyBuilder.obj();
- }
- }
- else {
- return stripFieldNames(maxObj);
+ // Field names must match and be in the same order.
+ BSONElement kpElt = kpIt.next();
+ BSONElement objElt = objIt.next();
+ if (!mongoutils::str::equals(kpElt.fieldName(), objElt.fieldName())) {
+ return false;
}
}
+}
- QuerySolution* buildCollscanSoln(const CanonicalQuery& query,
- bool tailable,
- const QueryPlannerParams& params) {
-
- QuerySolutionNode* solnRoot = QueryPlannerAccess::makeCollectionScan(query, tailable, params);
- return QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+static BSONObj stripFieldNames(const BSONObj& obj) {
+ BSONObjIterator it(obj);
+ BSONObjBuilder bob;
+ while (it.more()) {
+ bob.appendAs(it.next(), "");
}
+ return bob.obj();
+}
- QuerySolution* buildWholeIXSoln(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- int direction = 1) {
-
- QuerySolutionNode* solnRoot = QueryPlannerAccess::scanWholeIndex(index, query, params, direction);
- return QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+/**
+ * "Finishes" the min object for the $min query option by filling in an empty object with
+ * MinKey/MaxKey and stripping field names.
+ *
+ * In the case that 'minObj' is empty, we "finish" it by filling in either MinKey or MaxKey
+ * instead. Choosing whether to use MinKey or MaxKey is done by comparing against 'maxObj'.
+ * For instance, suppose 'minObj' is empty, 'maxObj' is { a: 3 }, and the key pattern is
+ * { a: -1 }. According to the key pattern ordering, { a: 3 } < MinKey. This means that the
+ * proper resulting bounds are
+ *
+ * start: { '': MaxKey }, end: { '': 3 }
+ *
+ * as opposed to
+ *
+ * start: { '': MinKey }, end: { '': 3 }
+ *
+ * Suppose instead that the key pattern is { a: 1 }, with the same 'minObj' and 'maxObj'
+ * (that is, an empty object and { a: 3 } respectively). In this case, { a: 3 } > MinKey,
+ * which means that we use range [{'': MinKey}, {'': 3}]. The proper 'minObj' in this case is
+ * MinKey, whereas in the previous example it was MaxKey.
+ *
+ * If 'minObj' is non-empty, then all we do is strip its field names (because index keys always
+ * have empty field names).
+ */
+static BSONObj finishMinObj(const BSONObj& kp, const BSONObj& minObj, const BSONObj& maxObj) {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ BSONObj minKey = bob.obj();
+
+ if (minObj.isEmpty()) {
+ if (0 > minKey.woCompare(maxObj, kp, false)) {
+ BSONObjBuilder minKeyBuilder;
+ minKeyBuilder.appendMinKey("");
+ return minKeyBuilder.obj();
+ } else {
+ BSONObjBuilder maxKeyBuilder;
+ maxKeyBuilder.appendMaxKey("");
+ return maxKeyBuilder.obj();
+ }
+ } else {
+ return stripFieldNames(minObj);
}
+}
- bool providesSort(const CanonicalQuery& query, const BSONObj& kp) {
- return query.getParsed().getSort().isPrefixOf(kp);
+/**
+ * "Finishes" the max object for the $max query option by filling in an empty object with
+ * MinKey/MaxKey and stripping field names.
+ *
+ * See comment for finishMinObj() for why we need both 'minObj' and 'maxObj'.
+ */
+static BSONObj finishMaxObj(const BSONObj& kp, const BSONObj& minObj, const BSONObj& maxObj) {
+ BSONObjBuilder bob;
+ bob.appendMaxKey("");
+ BSONObj maxKey = bob.obj();
+
+ if (maxObj.isEmpty()) {
+ if (0 < maxKey.woCompare(minObj, kp, false)) {
+ BSONObjBuilder maxKeyBuilder;
+ maxKeyBuilder.appendMaxKey("");
+ return maxKeyBuilder.obj();
+ } else {
+ BSONObjBuilder minKeyBuilder;
+ minKeyBuilder.appendMinKey("");
+ return minKeyBuilder.obj();
+ }
+ } else {
+ return stripFieldNames(maxObj);
+ }
+}
+
+QuerySolution* buildCollscanSoln(const CanonicalQuery& query,
+ bool tailable,
+ const QueryPlannerParams& params) {
+ QuerySolutionNode* solnRoot = QueryPlannerAccess::makeCollectionScan(query, tailable, params);
+ return QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+}
+
+QuerySolution* buildWholeIXSoln(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ int direction = 1) {
+ QuerySolutionNode* solnRoot =
+ QueryPlannerAccess::scanWholeIndex(index, query, params, direction);
+ return QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+}
+
+bool providesSort(const CanonicalQuery& query, const BSONObj& kp) {
+ return query.getParsed().getSort().isPrefixOf(kp);
+}
+
+// static
+const int QueryPlanner::kPlannerVersion = 1;
+
+Status QueryPlanner::cacheDataFromTaggedTree(const MatchExpression* const taggedTree,
+ const vector<IndexEntry>& relevantIndices,
+ PlanCacheIndexTree** out) {
+ // On any early return, the out-parameter must contain NULL.
+ *out = NULL;
+
+ if (NULL == taggedTree) {
+ return Status(ErrorCodes::BadValue, "Cannot produce cache data: tree is NULL.");
}
- // static
- const int QueryPlanner::kPlannerVersion = 1;
+ unique_ptr<PlanCacheIndexTree> indexTree(new PlanCacheIndexTree());
- Status QueryPlanner::cacheDataFromTaggedTree(const MatchExpression* const taggedTree,
- const vector<IndexEntry>& relevantIndices,
- PlanCacheIndexTree** out) {
- // On any early return, the out-parameter must contain NULL.
- *out = NULL;
+ if (NULL != taggedTree->getTag()) {
+ IndexTag* itag = static_cast<IndexTag*>(taggedTree->getTag());
+ if (itag->index >= relevantIndices.size()) {
+ mongoutils::str::stream ss;
+ ss << "Index number is " << itag->index << " but there are only "
+ << relevantIndices.size() << " relevant indices.";
+ return Status(ErrorCodes::BadValue, ss);
+ }
- if (NULL == taggedTree) {
- return Status(ErrorCodes::BadValue, "Cannot produce cache data: tree is NULL.");
+ // Make sure not to cache solutions which use '2d' indices.
+ // A 2d index that doesn't wrap on one query may wrap on another, so we have to
+ // check that the index is OK with the predicate. The only thing we have to do
+ // this for is 2d. For now it's easier to move ahead if we don't cache 2d.
+ //
+ // TODO: revisit with a post-cached-index-assignment compatibility check
+ if (is2DIndex(relevantIndices[itag->index].keyPattern)) {
+ return Status(ErrorCodes::BadValue, "can't cache '2d' index");
}
- unique_ptr<PlanCacheIndexTree> indexTree(new PlanCacheIndexTree());
+ IndexEntry* ientry = new IndexEntry(relevantIndices[itag->index]);
+ indexTree->entry.reset(ientry);
+ indexTree->index_pos = itag->pos;
+ }
- if (NULL != taggedTree->getTag()) {
- IndexTag* itag = static_cast<IndexTag*>(taggedTree->getTag());
- if (itag->index >= relevantIndices.size()) {
- mongoutils::str::stream ss;
- ss << "Index number is " << itag->index
- << " but there are only " << relevantIndices.size()
- << " relevant indices.";
- return Status(ErrorCodes::BadValue, ss);
- }
+ for (size_t i = 0; i < taggedTree->numChildren(); ++i) {
+ MatchExpression* taggedChild = taggedTree->getChild(i);
+ PlanCacheIndexTree* indexTreeChild;
+ Status s = cacheDataFromTaggedTree(taggedChild, relevantIndices, &indexTreeChild);
+ if (!s.isOK()) {
+ return s;
+ }
+ indexTree->children.push_back(indexTreeChild);
+ }
- // Make sure not to cache solutions which use '2d' indices.
- // A 2d index that doesn't wrap on one query may wrap on another, so we have to
- // check that the index is OK with the predicate. The only thing we have to do
- // this for is 2d. For now it's easier to move ahead if we don't cache 2d.
- //
- // TODO: revisit with a post-cached-index-assignment compatibility check
- if (is2DIndex(relevantIndices[itag->index].keyPattern)) {
- return Status(ErrorCodes::BadValue, "can't cache '2d' index");
- }
+ *out = indexTree.release();
+ return Status::OK();
+}
- IndexEntry* ientry = new IndexEntry(relevantIndices[itag->index]);
- indexTree->entry.reset(ientry);
- indexTree->index_pos = itag->pos;
- }
+// static
+Status QueryPlanner::tagAccordingToCache(MatchExpression* filter,
+ const PlanCacheIndexTree* const indexTree,
+ const map<BSONObj, size_t>& indexMap) {
+ if (NULL == filter) {
+ return Status(ErrorCodes::BadValue, "Cannot tag tree: filter is NULL.");
+ }
+ if (NULL == indexTree) {
+ return Status(ErrorCodes::BadValue, "Cannot tag tree: indexTree is NULL.");
+ }
- for (size_t i = 0; i < taggedTree->numChildren(); ++i) {
- MatchExpression* taggedChild = taggedTree->getChild(i);
- PlanCacheIndexTree* indexTreeChild;
- Status s = cacheDataFromTaggedTree(taggedChild, relevantIndices, &indexTreeChild);
- if (!s.isOK()) {
- return s;
- }
- indexTree->children.push_back(indexTreeChild);
- }
+ // We're tagging the tree here, so it shouldn't have
+ // any tags hanging off yet.
+ verify(NULL == filter->getTag());
- *out = indexTree.release();
- return Status::OK();
+ if (filter->numChildren() != indexTree->children.size()) {
+ mongoutils::str::stream ss;
+ ss << "Cache topology and query did not match: "
+ << "query has " << filter->numChildren() << " children "
+ << "and cache has " << indexTree->children.size() << " children.";
+ return Status(ErrorCodes::BadValue, ss);
}
- // static
- Status QueryPlanner::tagAccordingToCache(MatchExpression* filter,
- const PlanCacheIndexTree* const indexTree,
- const map<BSONObj, size_t>& indexMap) {
- if (NULL == filter) {
- return Status(ErrorCodes::BadValue, "Cannot tag tree: filter is NULL.");
- }
- if (NULL == indexTree) {
- return Status(ErrorCodes::BadValue, "Cannot tag tree: indexTree is NULL.");
+ // Continue the depth-first tree traversal.
+ for (size_t i = 0; i < filter->numChildren(); ++i) {
+ Status s = tagAccordingToCache(filter->getChild(i), indexTree->children[i], indexMap);
+ if (!s.isOK()) {
+ return s;
}
+ }
- // We're tagging the tree here, so it shouldn't have
- // any tags hanging off yet.
- verify(NULL == filter->getTag());
-
- if (filter->numChildren() != indexTree->children.size()) {
+ if (NULL != indexTree->entry.get()) {
+ map<BSONObj, size_t>::const_iterator got = indexMap.find(indexTree->entry->keyPattern);
+ if (got == indexMap.end()) {
mongoutils::str::stream ss;
- ss << "Cache topology and query did not match: "
- << "query has " << filter->numChildren() << " children "
- << "and cache has " << indexTree->children.size() << " children.";
+ ss << "Did not find index with keyPattern: " << indexTree->entry->keyPattern.toString();
return Status(ErrorCodes::BadValue, ss);
}
-
- // Continue the depth-first tree traversal.
- for (size_t i = 0; i < filter->numChildren(); ++i) {
- Status s = tagAccordingToCache(filter->getChild(i), indexTree->children[i], indexMap);
- if (!s.isOK()) {
- return s;
- }
- }
-
- if (NULL != indexTree->entry.get()) {
- map<BSONObj, size_t>::const_iterator got = indexMap.find(indexTree->entry->keyPattern);
- if (got == indexMap.end()) {
- mongoutils::str::stream ss;
- ss << "Did not find index with keyPattern: " << indexTree->entry->keyPattern.toString();
- return Status(ErrorCodes::BadValue, ss);
- }
- filter->setTag(new IndexTag(got->second, indexTree->index_pos));
- }
-
- return Status::OK();
+ filter->setTag(new IndexTag(got->second, indexTree->index_pos));
}
- // static
- Status QueryPlanner::planFromCache(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- const CachedSolution& cachedSoln,
- QuerySolution** out) {
- invariant(!cachedSoln.plannerData.empty());
- invariant(out);
+ return Status::OK();
+}
- // A query not suitable for caching should not have made its way into the cache.
- invariant(PlanCache::shouldCacheQuery(query));
+// static
+Status QueryPlanner::planFromCache(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ const CachedSolution& cachedSoln,
+ QuerySolution** out) {
+ invariant(!cachedSoln.plannerData.empty());
+ invariant(out);
- // Look up winning solution in cached solution's array.
- const SolutionCacheData& winnerCacheData = *cachedSoln.plannerData[0];
+ // A query not suitable for caching should not have made its way into the cache.
+ invariant(PlanCache::shouldCacheQuery(query));
- if (SolutionCacheData::WHOLE_IXSCAN_SOLN == winnerCacheData.solnType) {
- // The solution can be constructed by a scan over the entire index.
- QuerySolution* soln = buildWholeIXSoln(*winnerCacheData.tree->entry,
- query,
- params,
- winnerCacheData.wholeIXSolnDir);
- if (soln == NULL) {
- return Status(ErrorCodes::BadValue,
- "plan cache error: soln that uses index to provide sort");
- }
- else {
- *out = soln;
- return Status::OK();
- }
+ // Look up winning solution in cached solution's array.
+ const SolutionCacheData& winnerCacheData = *cachedSoln.plannerData[0];
+
+ if (SolutionCacheData::WHOLE_IXSCAN_SOLN == winnerCacheData.solnType) {
+ // The solution can be constructed by a scan over the entire index.
+ QuerySolution* soln = buildWholeIXSoln(
+ *winnerCacheData.tree->entry, query, params, winnerCacheData.wholeIXSolnDir);
+ if (soln == NULL) {
+ return Status(ErrorCodes::BadValue,
+ "plan cache error: soln that uses index to provide sort");
+ } else {
+ *out = soln;
+ return Status::OK();
}
- else if (SolutionCacheData::COLLSCAN_SOLN == winnerCacheData.solnType) {
- // The cached solution is a collection scan. We don't cache collscans
- // with tailable==true, hence the false below.
- QuerySolution* soln = buildCollscanSoln(query, false, params);
- if (soln == NULL) {
- return Status(ErrorCodes::BadValue, "plan cache error: collection scan soln");
- }
- else {
- *out = soln;
- return Status::OK();
- }
+ } else if (SolutionCacheData::COLLSCAN_SOLN == winnerCacheData.solnType) {
+ // The cached solution is a collection scan. We don't cache collscans
+ // with tailable==true, hence the false below.
+ QuerySolution* soln = buildCollscanSoln(query, false, params);
+ if (soln == NULL) {
+ return Status(ErrorCodes::BadValue, "plan cache error: collection scan soln");
+ } else {
+ *out = soln;
+ return Status::OK();
}
+ }
- // SolutionCacheData::USE_TAGS_SOLN == cacheData->solnType
- // If we're here then this is neither the whole index scan or collection scan
- // cases, and we proceed by using the PlanCacheIndexTree to tag the query tree.
+ // SolutionCacheData::USE_TAGS_SOLN == cacheData->solnType
+ // If we're here then this is neither the whole index scan or collection scan
+ // cases, and we proceed by using the PlanCacheIndexTree to tag the query tree.
+
+ // Create a copy of the expression tree. We use cachedSoln to annotate this with indices.
+ MatchExpression* clone = query.root()->shallowClone();
+
+ LOG(5) << "Tagging the match expression according to cache data: " << endl
+ << "Filter:" << endl
+ << clone->toString() << "Cache data:" << endl
+ << winnerCacheData.toString();
+
+ // Map from index name to index number.
+ // TODO: can we assume that the index numbering has the same lifetime
+ // as the cache state?
+ map<BSONObj, size_t> indexMap;
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ const IndexEntry& ie = params.indices[i];
+ indexMap[ie.keyPattern] = i;
+ LOG(5) << "Index " << i << ": " << ie.keyPattern.toString() << endl;
+ }
- // Create a copy of the expression tree. We use cachedSoln to annotate this with indices.
- MatchExpression* clone = query.root()->shallowClone();
+ Status s = tagAccordingToCache(clone, winnerCacheData.tree.get(), indexMap);
+ if (!s.isOK()) {
+ return s;
+ }
- LOG(5) << "Tagging the match expression according to cache data: " << endl
- << "Filter:" << endl << clone->toString()
- << "Cache data:" << endl << winnerCacheData.toString();
+ // The planner requires a defined sort order.
+ sortUsingTags(clone);
- // Map from index name to index number.
- // TODO: can we assume that the index numbering has the same lifetime
- // as the cache state?
- map<BSONObj, size_t> indexMap;
- for (size_t i = 0; i < params.indices.size(); ++i) {
- const IndexEntry& ie = params.indices[i];
- indexMap[ie.keyPattern] = i;
- LOG(5) << "Index " << i << ": " << ie.keyPattern.toString() << endl;
- }
+ LOG(5) << "Tagged tree:" << endl
+ << clone->toString();
- Status s = tagAccordingToCache(clone, winnerCacheData.tree.get(), indexMap);
- if (!s.isOK()) {
- return s;
- }
+ // Use the cached index assignments to build solnRoot. Takes ownership of clone.
+ QuerySolutionNode* solnRoot =
+ QueryPlannerAccess::buildIndexedDataAccess(query, clone, false, params.indices, params);
- // The planner requires a defined sort order.
- sortUsingTags(clone);
+ if (!solnRoot) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Failed to create data access plan from cache. Query: "
+ << query.toStringShort());
+ }
- LOG(5) << "Tagged tree:" << endl << clone->toString();
+ // Takes ownership of 'solnRoot'.
+ QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+ if (!soln) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Failed to analyze plan from cache. Query: " << query.toStringShort());
+ }
- // Use the cached index assignments to build solnRoot. Takes ownership of clone.
- QuerySolutionNode* solnRoot =
- QueryPlannerAccess::buildIndexedDataAccess(query, clone, false, params.indices, params);
+ LOG(5) << "Planner: solution constructed from the cache:\n" << soln->toString();
+ *out = soln;
+ return Status::OK();
+}
+
+// static
+Status QueryPlanner::plan(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolution*>* out) {
+ LOG(5) << "Beginning planning..." << endl
+ << "=============================" << endl
+ << "Options = " << optionString(params.options) << endl
+ << "Canonical query:" << endl
+ << query.toString() << "=============================" << endl;
+
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ LOG(5) << "Index " << i << " is " << params.indices[i].toString() << endl;
+ }
- if (!solnRoot) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Failed to create data access plan from cache. Query: "
- << query.toStringShort());
- }
+ bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN);
- // Takes ownership of 'solnRoot'.
- QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
- if (!soln) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Failed to analyze plan from cache. Query: "
- << query.toStringShort());
+ // If the query requests a tailable cursor, the only solution is a collscan + filter with
+ // tailable set on the collscan. TODO: This is a policy departure. Previously I think you
+ // could ask for a tailable cursor and it just tried to give you one. Now, we fail if we
+ // can't provide one. Is this what we want?
+ if (query.getParsed().isTailable()) {
+ if (!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) && canTableScan) {
+ QuerySolution* soln = buildCollscanSoln(query, true, params);
+ if (NULL != soln) {
+ out->push_back(soln);
+ }
}
-
- LOG(5) << "Planner: solution constructed from the cache:\n" << soln->toString();
- *out = soln;
return Status::OK();
}
- // static
- Status QueryPlanner::plan(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- std::vector<QuerySolution*>* out) {
-
- LOG(5) << "Beginning planning..." << endl
- << "=============================" << endl
- << "Options = " << optionString(params.options) << endl
- << "Canonical query:" << endl << query.toString()
- << "=============================" << endl;
-
- for (size_t i = 0; i < params.indices.size(); ++i) {
- LOG(5) << "Index " << i << " is " << params.indices[i].toString() << endl;
- }
-
- bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN);
-
- // If the query requests a tailable cursor, the only solution is a collscan + filter with
- // tailable set on the collscan. TODO: This is a policy departure. Previously I think you
- // could ask for a tailable cursor and it just tried to give you one. Now, we fail if we
- // can't provide one. Is this what we want?
- if (query.getParsed().isTailable()) {
- if (!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
- && canTableScan) {
- QuerySolution* soln = buildCollscanSoln(query, true, params);
+ // The hint or sort can be $natural: 1. If this happens, output a collscan. If both
+ // a $natural hint and a $natural sort are specified, then the direction of the collscan
+ // is determined by the sign of the sort (not the sign of the hint).
+ if (!query.getParsed().getHint().isEmpty() || !query.getParsed().getSort().isEmpty()) {
+ BSONObj hintObj = query.getParsed().getHint();
+ BSONObj sortObj = query.getParsed().getSort();
+ BSONElement naturalHint = hintObj.getFieldDotted("$natural");
+ BSONElement naturalSort = sortObj.getFieldDotted("$natural");
+
+ // A hint overrides a $natural sort. This means that we don't force a table
+ // scan if there is a $natural sort with a non-$natural hint.
+ if (!naturalHint.eoo() || (!naturalSort.eoo() && hintObj.isEmpty())) {
+ LOG(5) << "Forcing a table scan due to hinted $natural\n";
+ // min/max are incompatible with $natural.
+ if (canTableScan && query.getParsed().getMin().isEmpty() &&
+ query.getParsed().getMax().isEmpty()) {
+ QuerySolution* soln = buildCollscanSoln(query, false, params);
if (NULL != soln) {
out->push_back(soln);
}
}
return Status::OK();
}
+ }
- // The hint or sort can be $natural: 1. If this happens, output a collscan. If both
- // a $natural hint and a $natural sort are specified, then the direction of the collscan
- // is determined by the sign of the sort (not the sign of the hint).
- if (!query.getParsed().getHint().isEmpty() || !query.getParsed().getSort().isEmpty()) {
- BSONObj hintObj = query.getParsed().getHint();
- BSONObj sortObj = query.getParsed().getSort();
- BSONElement naturalHint = hintObj.getFieldDotted("$natural");
- BSONElement naturalSort = sortObj.getFieldDotted("$natural");
-
- // A hint overrides a $natural sort. This means that we don't force a table
- // scan if there is a $natural sort with a non-$natural hint.
- if (!naturalHint.eoo() || (!naturalSort.eoo() && hintObj.isEmpty())) {
- LOG(5) << "Forcing a table scan due to hinted $natural\n";
- // min/max are incompatible with $natural.
- if (canTableScan && query.getParsed().getMin().isEmpty()
- && query.getParsed().getMax().isEmpty()) {
- QuerySolution* soln = buildCollscanSoln(query, false, params);
- if (NULL != soln) {
- out->push_back(soln);
- }
- }
- return Status::OK();
- }
- }
+ // Figure out what fields we care about.
+ unordered_set<string> fields;
+ QueryPlannerIXSelect::getFields(query.root(), "", &fields);
- // Figure out what fields we care about.
- unordered_set<string> fields;
- QueryPlannerIXSelect::getFields(query.root(), "", &fields);
+ for (unordered_set<string>::const_iterator it = fields.begin(); it != fields.end(); ++it) {
+ LOG(5) << "Predicate over field '" << *it << "'" << endl;
+ }
- for (unordered_set<string>::const_iterator it = fields.begin(); it != fields.end(); ++it) {
- LOG(5) << "Predicate over field '" << *it << "'" << endl;
- }
+ // Filter our indices so we only look at indices that are over our predicates.
+ vector<IndexEntry> relevantIndices;
- // Filter our indices so we only look at indices that are over our predicates.
- vector<IndexEntry> relevantIndices;
+ // Hints require us to only consider the hinted index.
+ // If index filters in the query settings were used to override
+ // the allowed indices for planning, we should not use the hinted index
+ // requested in the query.
+ BSONObj hintIndex;
+ if (!params.indexFiltersApplied) {
+ hintIndex = query.getParsed().getHint();
+ }
- // Hints require us to only consider the hinted index.
- // If index filters in the query settings were used to override
- // the allowed indices for planning, we should not use the hinted index
- // requested in the query.
- BSONObj hintIndex;
- if (!params.indexFiltersApplied) {
- hintIndex = query.getParsed().getHint();
+ // Snapshot is a form of a hint. If snapshot is set, try to use _id index to make a real
+ // plan. If that fails, just scan the _id index.
+ if (query.getParsed().isSnapshot()) {
+ // Find the ID index in indexKeyPatterns. It's our hint.
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ if (isIdIndex(params.indices[i].keyPattern)) {
+ hintIndex = params.indices[i].keyPattern;
+ break;
+ }
}
+ }
- // Snapshot is a form of a hint. If snapshot is set, try to use _id index to make a real
- // plan. If that fails, just scan the _id index.
- if (query.getParsed().isSnapshot()) {
- // Find the ID index in indexKeyPatterns. It's our hint.
+ size_t hintIndexNumber = numeric_limits<size_t>::max();
+
+ if (hintIndex.isEmpty()) {
+ QueryPlannerIXSelect::findRelevantIndices(fields, params.indices, &relevantIndices);
+ } else {
+ // Sigh. If the hint is specified it might be using the index name.
+ BSONElement firstHintElt = hintIndex.firstElement();
+ if (str::equals("$hint", firstHintElt.fieldName()) && String == firstHintElt.type()) {
+ string hintName = firstHintElt.String();
for (size_t i = 0; i < params.indices.size(); ++i) {
- if (isIdIndex(params.indices[i].keyPattern)) {
+ if (params.indices[i].name == hintName) {
+ LOG(5) << "Hint by name specified, restricting indices to "
+ << params.indices[i].keyPattern.toString() << endl;
+ relevantIndices.clear();
+ relevantIndices.push_back(params.indices[i]);
+ hintIndexNumber = i;
hintIndex = params.indices[i].keyPattern;
break;
}
}
- }
-
- size_t hintIndexNumber = numeric_limits<size_t>::max();
-
- if (hintIndex.isEmpty()) {
- QueryPlannerIXSelect::findRelevantIndices(fields, params.indices, &relevantIndices);
- }
- else {
- // Sigh. If the hint is specified it might be using the index name.
- BSONElement firstHintElt = hintIndex.firstElement();
- if (str::equals("$hint", firstHintElt.fieldName()) && String == firstHintElt.type()) {
- string hintName = firstHintElt.String();
- for (size_t i = 0; i < params.indices.size(); ++i) {
- if (params.indices[i].name == hintName) {
- LOG(5) << "Hint by name specified, restricting indices to "
- << params.indices[i].keyPattern.toString() << endl;
- relevantIndices.clear();
- relevantIndices.push_back(params.indices[i]);
- hintIndexNumber = i;
- hintIndex = params.indices[i].keyPattern;
- break;
- }
- }
- }
- else {
- for (size_t i = 0; i < params.indices.size(); ++i) {
- if (0 == params.indices[i].keyPattern.woCompare(hintIndex)) {
- relevantIndices.clear();
- relevantIndices.push_back(params.indices[i]);
- LOG(5) << "Hint specified, restricting indices to " << hintIndex.toString()
- << endl;
- hintIndexNumber = i;
- break;
- }
+ } else {
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ if (0 == params.indices[i].keyPattern.woCompare(hintIndex)) {
+ relevantIndices.clear();
+ relevantIndices.push_back(params.indices[i]);
+ LOG(5) << "Hint specified, restricting indices to " << hintIndex.toString()
+ << endl;
+ hintIndexNumber = i;
+ break;
}
}
-
- if (hintIndexNumber == numeric_limits<size_t>::max()) {
- return Status(ErrorCodes::BadValue, "bad hint");
- }
}
- // Deal with the .min() and .max() query options. If either exist we can only use an index
- // that matches the object inside.
- if (!query.getParsed().getMin().isEmpty() || !query.getParsed().getMax().isEmpty()) {
- BSONObj minObj = query.getParsed().getMin();
- BSONObj maxObj = query.getParsed().getMax();
-
- // The unfinished siblings of these objects may not be proper index keys because they
- // may be empty objects or have field names. When an index is picked to use for the
- // min/max query, these "finished" objects will always be valid index keys for the
- // index's key pattern.
- BSONObj finishedMinObj;
- BSONObj finishedMaxObj;
-
- // This is the index into params.indices[...] that we use.
- size_t idxNo = numeric_limits<size_t>::max();
-
- // If there's an index hinted we need to be able to use it.
- if (!hintIndex.isEmpty()) {
- if (!minObj.isEmpty() && !indexCompatibleMaxMin(minObj, hintIndex)) {
- LOG(5) << "Minobj doesn't work with hint";
- return Status(ErrorCodes::BadValue,
- "hint provided does not work with min query");
- }
+ if (hintIndexNumber == numeric_limits<size_t>::max()) {
+ return Status(ErrorCodes::BadValue, "bad hint");
+ }
+ }
- if (!maxObj.isEmpty() && !indexCompatibleMaxMin(maxObj, hintIndex)) {
- LOG(5) << "Maxobj doesn't work with hint";
- return Status(ErrorCodes::BadValue,
- "hint provided does not work with max query");
- }
+ // Deal with the .min() and .max() query options. If either exist we can only use an index
+ // that matches the object inside.
+ if (!query.getParsed().getMin().isEmpty() || !query.getParsed().getMax().isEmpty()) {
+ BSONObj minObj = query.getParsed().getMin();
+ BSONObj maxObj = query.getParsed().getMax();
- const BSONObj& kp = params.indices[hintIndexNumber].keyPattern;
- finishedMinObj = finishMinObj(kp, minObj, maxObj);
- finishedMaxObj = finishMaxObj(kp, minObj, maxObj);
+ // The unfinished siblings of these objects may not be proper index keys because they
+ // may be empty objects or have field names. When an index is picked to use for the
+ // min/max query, these "finished" objects will always be valid index keys for the
+ // index's key pattern.
+ BSONObj finishedMinObj;
+ BSONObj finishedMaxObj;
- // The min must be less than the max for the hinted index ordering.
- if (0 <= finishedMinObj.woCompare(finishedMaxObj, kp, false)) {
- LOG(5) << "Minobj/Maxobj don't work with hint";
- return Status(ErrorCodes::BadValue,
- "hint provided does not work with min/max query");
- }
+ // This is the index into params.indices[...] that we use.
+ size_t idxNo = numeric_limits<size_t>::max();
- idxNo = hintIndexNumber;
+ // If there's an index hinted we need to be able to use it.
+ if (!hintIndex.isEmpty()) {
+ if (!minObj.isEmpty() && !indexCompatibleMaxMin(minObj, hintIndex)) {
+ LOG(5) << "Minobj doesn't work with hint";
+ return Status(ErrorCodes::BadValue, "hint provided does not work with min query");
}
- else {
- // No hinted index, look for one that is compatible (has same field names and
- // ordering thereof).
- for (size_t i = 0; i < params.indices.size(); ++i) {
- const BSONObj& kp = params.indices[i].keyPattern;
-
- BSONObj toUse = minObj.isEmpty() ? maxObj : minObj;
- if (indexCompatibleMaxMin(toUse, kp)) {
- // In order to be fully compatible, the min has to be less than the max
- // according to the index key pattern ordering. The first step in verifying
- // this is "finish" the min and max by replacing empty objects and stripping
- // field names.
- finishedMinObj = finishMinObj(kp, minObj, maxObj);
- finishedMaxObj = finishMaxObj(kp, minObj, maxObj);
-
- // Now we have the final min and max. This index is only relevant for
- // the min/max query if min < max.
- if (0 >= finishedMinObj.woCompare(finishedMaxObj, kp, false)) {
- // Found a relevant index.
- idxNo = i;
- break;
- }
-
- // This index is not relevant; move on to the next.
- }
- }
+
+ if (!maxObj.isEmpty() && !indexCompatibleMaxMin(maxObj, hintIndex)) {
+ LOG(5) << "Maxobj doesn't work with hint";
+ return Status(ErrorCodes::BadValue, "hint provided does not work with max query");
}
- if (idxNo == numeric_limits<size_t>::max()) {
- LOG(5) << "Can't find relevant index to use for max/min query";
- // Can't find an index to use, bail out.
+ const BSONObj& kp = params.indices[hintIndexNumber].keyPattern;
+ finishedMinObj = finishMinObj(kp, minObj, maxObj);
+ finishedMaxObj = finishMaxObj(kp, minObj, maxObj);
+
+ // The min must be less than the max for the hinted index ordering.
+ if (0 <= finishedMinObj.woCompare(finishedMaxObj, kp, false)) {
+ LOG(5) << "Minobj/Maxobj don't work with hint";
return Status(ErrorCodes::BadValue,
- "unable to find relevant index for max/min query");
+ "hint provided does not work with min/max query");
}
- LOG(5) << "Max/min query using index " << params.indices[idxNo].toString() << endl;
-
- // Make our scan and output.
- QuerySolutionNode* solnRoot = QueryPlannerAccess::makeIndexScan(params.indices[idxNo],
- query,
- params,
- finishedMinObj,
- finishedMaxObj);
+ idxNo = hintIndexNumber;
+ } else {
+ // No hinted index, look for one that is compatible (has same field names and
+ // ordering thereof).
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ const BSONObj& kp = params.indices[i].keyPattern;
+
+ BSONObj toUse = minObj.isEmpty() ? maxObj : minObj;
+ if (indexCompatibleMaxMin(toUse, kp)) {
+ // In order to be fully compatible, the min has to be less than the max
+ // according to the index key pattern ordering. The first step in verifying
+ // this is "finish" the min and max by replacing empty objects and stripping
+ // field names.
+ finishedMinObj = finishMinObj(kp, minObj, maxObj);
+ finishedMaxObj = finishMaxObj(kp, minObj, maxObj);
+
+ // Now we have the final min and max. This index is only relevant for
+ // the min/max query if min < max.
+ if (0 >= finishedMinObj.woCompare(finishedMaxObj, kp, false)) {
+ // Found a relevant index.
+ idxNo = i;
+ break;
+ }
- QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
- if (NULL != soln) {
- out->push_back(soln);
+ // This index is not relevant; move on to the next.
+ }
}
+ }
- return Status::OK();
+ if (idxNo == numeric_limits<size_t>::max()) {
+ LOG(5) << "Can't find relevant index to use for max/min query";
+ // Can't find an index to use, bail out.
+ return Status(ErrorCodes::BadValue, "unable to find relevant index for max/min query");
}
- for (size_t i = 0; i < relevantIndices.size(); ++i) {
- LOG(2) << "Relevant index " << i << " is " << relevantIndices[i].toString() << endl;
+ LOG(5) << "Max/min query using index " << params.indices[idxNo].toString() << endl;
+
+ // Make our scan and output.
+ QuerySolutionNode* solnRoot = QueryPlannerAccess::makeIndexScan(
+ params.indices[idxNo], query, params, finishedMinObj, finishedMaxObj);
+
+ QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+ if (NULL != soln) {
+ out->push_back(soln);
}
- // Figure out how useful each index is to each predicate.
- QueryPlannerIXSelect::rateIndices(query.root(), "", relevantIndices);
- QueryPlannerIXSelect::stripInvalidAssignments(query.root(), relevantIndices);
+ return Status::OK();
+ }
+
+ for (size_t i = 0; i < relevantIndices.size(); ++i) {
+ LOG(2) << "Relevant index " << i << " is " << relevantIndices[i].toString() << endl;
+ }
- // Unless we have GEO_NEAR, TEXT, or a projection, we may be able to apply an optimization
- // in which we strip unnecessary index assignments.
- //
- // Disallowed with projection because assignment to a non-unique index can allow the plan
- // to be covered.
- //
- // TEXT and GEO_NEAR are special because they require the use of a text/geo index in order
- // to be evaluated correctly. Stripping these "mandatory assignments" is therefore invalid.
- if (query.getParsed().getProj().isEmpty()
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
- QueryPlannerIXSelect::stripUnneededAssignments(query.root(), relevantIndices);
+ // Figure out how useful each index is to each predicate.
+ QueryPlannerIXSelect::rateIndices(query.root(), "", relevantIndices);
+ QueryPlannerIXSelect::stripInvalidAssignments(query.root(), relevantIndices);
+
+ // Unless we have GEO_NEAR, TEXT, or a projection, we may be able to apply an optimization
+ // in which we strip unnecessary index assignments.
+ //
+ // Disallowed with projection because assignment to a non-unique index can allow the plan
+ // to be covered.
+ //
+ // TEXT and GEO_NEAR are special because they require the use of a text/geo index in order
+ // to be evaluated correctly. Stripping these "mandatory assignments" is therefore invalid.
+ if (query.getParsed().getProj().isEmpty() &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
+ QueryPlannerIXSelect::stripUnneededAssignments(query.root(), relevantIndices);
+ }
+
+ // query.root() is now annotated with RelevantTag(s).
+ LOG(5) << "Rated tree:" << endl
+ << query.root()->toString();
+
+ // If there is a GEO_NEAR it must have an index it can use directly.
+ MatchExpression* gnNode = NULL;
+ if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) {
+ // No index for GEO_NEAR? No query.
+ RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag());
+ if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
+ LOG(5) << "Unable to find index for $geoNear query." << endl;
+ // Don't leave tags on query tree.
+ query.root()->resetTag();
+ return Status(ErrorCodes::BadValue, "unable to find index for $geoNear query");
}
- // query.root() is now annotated with RelevantTag(s).
- LOG(5) << "Rated tree:" << endl << query.root()->toString();
-
- // If there is a GEO_NEAR it must have an index it can use directly.
- MatchExpression* gnNode = NULL;
- if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) {
- // No index for GEO_NEAR? No query.
- RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag());
- if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
- LOG(5) << "Unable to find index for $geoNear query." << endl;
- // Don't leave tags on query tree.
- query.root()->resetTag();
- return Status(ErrorCodes::BadValue, "unable to find index for $geoNear query");
+ LOG(5) << "Rated tree after geonear processing:" << query.root()->toString();
+ }
+
+ // Likewise, if there is a TEXT it must have an index it can use directly.
+ MatchExpression* textNode = NULL;
+ if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) {
+ RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag());
+
+ // Exactly one text index required for TEXT. We need to check this explicitly because
+ // the text stage can't be built if no text index exists or there is an ambiguity as to
+ // which one to use.
+ size_t textIndexCount = 0;
+ for (size_t i = 0; i < params.indices.size(); i++) {
+ if (INDEX_TEXT == params.indices[i].type) {
+ textIndexCount++;
}
+ }
+ if (textIndexCount != 1) {
+ // Don't leave tags on query tree.
+ query.root()->resetTag();
+ return Status(ErrorCodes::BadValue, "need exactly one text index for $text query");
+ }
- LOG(5) << "Rated tree after geonear processing:" << query.root()->toString();
+ // Error if the text node is tagged with zero indices.
+ if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
+ // Don't leave tags on query tree.
+ query.root()->resetTag();
+ return Status(ErrorCodes::BadValue,
+ "failed to use text index to satisfy $text query (if text index is "
+ "compound, are equality predicates given for all prefix fields?)");
}
- // Likewise, if there is a TEXT it must have an index it can use directly.
- MatchExpression* textNode = NULL;
- if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) {
- RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag());
-
- // Exactly one text index required for TEXT. We need to check this explicitly because
- // the text stage can't be built if no text index exists or there is an ambiguity as to
- // which one to use.
- size_t textIndexCount = 0;
- for (size_t i = 0; i < params.indices.size(); i++) {
- if (INDEX_TEXT == params.indices[i].type) {
- textIndexCount++;
- }
- }
- if (textIndexCount != 1) {
- // Don't leave tags on query tree.
- query.root()->resetTag();
- return Status(ErrorCodes::BadValue, "need exactly one text index for $text query");
- }
+ // At this point, we know that there is only one text index and that the TEXT node is
+ // assigned to it.
+ invariant(1 == tag->first.size() + tag->notFirst.size());
- // Error if the text node is tagged with zero indices.
- if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
- // Don't leave tags on query tree.
- query.root()->resetTag();
- return Status(ErrorCodes::BadValue,
- "failed to use text index to satisfy $text query (if text index is "
- "compound, are equality predicates given for all prefix fields?)");
- }
+ LOG(5) << "Rated tree after text processing:" << query.root()->toString();
+ }
- // At this point, we know that there is only one text index and that the TEXT node is
- // assigned to it.
- invariant(1 == tag->first.size() + tag->notFirst.size());
+ // If we have any relevant indices, we try to create indexed plans.
+ if (0 < relevantIndices.size()) {
+ // The enumerator spits out trees tagged with IndexTag(s).
+ PlanEnumeratorParams enumParams;
+ enumParams.intersect = params.options & QueryPlannerParams::INDEX_INTERSECTION;
+ enumParams.root = query.root();
+ enumParams.indices = &relevantIndices;
- LOG(5) << "Rated tree after text processing:" << query.root()->toString();
- }
+ PlanEnumerator isp(enumParams);
+ isp.init();
- // If we have any relevant indices, we try to create indexed plans.
- if (0 < relevantIndices.size()) {
- // The enumerator spits out trees tagged with IndexTag(s).
- PlanEnumeratorParams enumParams;
- enumParams.intersect = params.options & QueryPlannerParams::INDEX_INTERSECTION;
- enumParams.root = query.root();
- enumParams.indices = &relevantIndices;
-
- PlanEnumerator isp(enumParams);
- isp.init();
-
- MatchExpression* rawTree;
- while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
- LOG(5) << "About to build solntree from tagged tree:" << endl
- << rawTree->toString();
-
- // The tagged tree produced by the plan enumerator is not guaranteed
- // to be canonically sorted. In order to be compatible with the cached
- // data, sort the tagged tree according to CanonicalQuery ordering.
- std::unique_ptr<MatchExpression> clone(rawTree->shallowClone());
- CanonicalQuery::sortTree(clone.get());
-
- PlanCacheIndexTree* cacheData;
- Status indexTreeStatus = cacheDataFromTaggedTree(clone.get(), relevantIndices, &cacheData);
- if (!indexTreeStatus.isOK()) {
- LOG(5) << "Query is not cachable: " << indexTreeStatus.reason() << endl;
- }
- unique_ptr<PlanCacheIndexTree> autoData(cacheData);
+ MatchExpression* rawTree;
+ while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
+ LOG(5) << "About to build solntree from tagged tree:" << endl
+ << rawTree->toString();
- // This can fail if enumeration makes a mistake.
- QuerySolutionNode* solnRoot =
- QueryPlannerAccess::buildIndexedDataAccess(query, rawTree, false,
- relevantIndices, params);
+ // The tagged tree produced by the plan enumerator is not guaranteed
+ // to be canonically sorted. In order to be compatible with the cached
+ // data, sort the tagged tree according to CanonicalQuery ordering.
+ std::unique_ptr<MatchExpression> clone(rawTree->shallowClone());
+ CanonicalQuery::sortTree(clone.get());
- if (NULL == solnRoot) { continue; }
+ PlanCacheIndexTree* cacheData;
+ Status indexTreeStatus =
+ cacheDataFromTaggedTree(clone.get(), relevantIndices, &cacheData);
+ if (!indexTreeStatus.isOK()) {
+ LOG(5) << "Query is not cachable: " << indexTreeStatus.reason() << endl;
+ }
+ unique_ptr<PlanCacheIndexTree> autoData(cacheData);
- QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query,
- params,
- solnRoot);
- if (NULL != soln) {
- LOG(5) << "Planner: adding solution:" << endl << soln->toString();
- if (indexTreeStatus.isOK()) {
- SolutionCacheData* scd = new SolutionCacheData();
- scd->tree.reset(autoData.release());
- soln->cacheData.reset(scd);
- }
- out->push_back(soln);
+ // This can fail if enumeration makes a mistake.
+ QuerySolutionNode* solnRoot = QueryPlannerAccess::buildIndexedDataAccess(
+ query, rawTree, false, relevantIndices, params);
+
+ if (NULL == solnRoot) {
+ continue;
+ }
+
+ QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+ if (NULL != soln) {
+ LOG(5) << "Planner: adding solution:" << endl
+ << soln->toString();
+ if (indexTreeStatus.isOK()) {
+ SolutionCacheData* scd = new SolutionCacheData();
+ scd->tree.reset(autoData.release());
+ soln->cacheData.reset(scd);
}
+ out->push_back(soln);
}
}
+ }
- // Don't leave tags on query tree.
- query.root()->resetTag();
-
- LOG(5) << "Planner: outputted " << out->size() << " indexed solutions.\n";
-
- // Produce legible error message for failed OR planning with a TEXT child.
- // TODO: support collection scan for non-TEXT children of OR.
- if (out->size() == 0 && textNode != NULL &&
- MatchExpression::OR == query.root()->matchType()) {
- MatchExpression* root = query.root();
- for (size_t i = 0; i < root->numChildren(); ++i) {
- if (textNode == root->getChild(i)) {
- return Status(ErrorCodes::BadValue,
- "Failed to produce a solution for TEXT under OR - "
- "other non-TEXT clauses under OR have to be indexed as well.");
- }
+ // Don't leave tags on query tree.
+ query.root()->resetTag();
+
+ LOG(5) << "Planner: outputted " << out->size() << " indexed solutions.\n";
+
+ // Produce legible error message for failed OR planning with a TEXT child.
+ // TODO: support collection scan for non-TEXT children of OR.
+ if (out->size() == 0 && textNode != NULL && MatchExpression::OR == query.root()->matchType()) {
+ MatchExpression* root = query.root();
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ if (textNode == root->getChild(i)) {
+ return Status(ErrorCodes::BadValue,
+ "Failed to produce a solution for TEXT under OR - "
+ "other non-TEXT clauses under OR have to be indexed as well.");
}
}
+ }
- // An index was hinted. If there are any solutions, they use the hinted index. If not, we
- // scan the entire index to provide results and output that as our plan. This is the
- // desired behavior when an index is hinted that is not relevant to the query.
- if (!hintIndex.isEmpty()) {
- if (0 == out->size()) {
- QuerySolution* soln = buildWholeIXSoln(params.indices[hintIndexNumber],
- query, params);
- verify(NULL != soln);
- LOG(5) << "Planner: outputting soln that uses hinted index as scan." << endl;
- out->push_back(soln);
+ // An index was hinted. If there are any solutions, they use the hinted index. If not, we
+ // scan the entire index to provide results and output that as our plan. This is the
+ // desired behavior when an index is hinted that is not relevant to the query.
+ if (!hintIndex.isEmpty()) {
+ if (0 == out->size()) {
+ QuerySolution* soln = buildWholeIXSoln(params.indices[hintIndexNumber], query, params);
+ verify(NULL != soln);
+ LOG(5) << "Planner: outputting soln that uses hinted index as scan." << endl;
+ out->push_back(soln);
+ }
+ return Status::OK();
+ }
+
+ // If a sort order is requested, there may be an index that provides it, even if that
+ // index is not over any predicates in the query.
+ //
+ if (!query.getParsed().getSort().isEmpty() &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
+ // See if we have a sort provided from an index already.
+ // This is implied by the presence of a non-blocking solution.
+ bool usingIndexToSort = false;
+ for (size_t i = 0; i < out->size(); ++i) {
+ QuerySolution* soln = (*out)[i];
+ if (!soln->hasBlockingStage) {
+ usingIndexToSort = true;
+ break;
}
- return Status::OK();
}
- // If a sort order is requested, there may be an index that provides it, even if that
- // index is not over any predicates in the query.
- //
- if (!query.getParsed().getSort().isEmpty()
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
-
- // See if we have a sort provided from an index already.
- // This is implied by the presence of a non-blocking solution.
- bool usingIndexToSort = false;
- for (size_t i = 0; i < out->size(); ++i) {
- QuerySolution* soln = (*out)[i];
- if (!soln->hasBlockingStage) {
- usingIndexToSort = true;
- break;
+ if (!usingIndexToSort) {
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ const IndexEntry& index = params.indices[i];
+ // Only regular (non-plugin) indexes can be used to provide a sort.
+ if (index.type != INDEX_BTREE) {
+ continue;
+ }
+ // Only non-sparse indexes can be used to provide a sort.
+ if (index.sparse) {
+ continue;
}
- }
- if (!usingIndexToSort) {
- for (size_t i = 0; i < params.indices.size(); ++i) {
- const IndexEntry& index = params.indices[i];
- // Only regular (non-plugin) indexes can be used to provide a sort.
- if (index.type != INDEX_BTREE) {
- continue;
- }
- // Only non-sparse indexes can be used to provide a sort.
- if (index.sparse) {
- continue;
- }
+ // TODO: Sparse indexes can't normally provide a sort, because non-indexed
+ // documents could potentially be missing from the result set. However, if the
+ // query predicate can be used to guarantee that all documents to be returned
+ // are indexed, then the index should be able to provide the sort.
+ //
+ // For example:
+ // - Sparse index {a: 1, b: 1} should be able to provide a sort for
+ // find({b: 1}).sort({a: 1}). SERVER-13908.
+ // - Index {a: 1, b: "2dsphere"} (which is "geo-sparse", if
+ // 2dsphereIndexVersion=2) should be able to provide a sort for
+ // find({b: GEO}).sort({a:1}). SERVER-10801.
+
+ const BSONObj kp = QueryPlannerAnalysis::getSortPattern(index.keyPattern);
+ if (providesSort(query, kp)) {
+ LOG(5) << "Planner: outputting soln that uses index to provide sort." << endl;
+ QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params);
+ if (NULL != soln) {
+ PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
+ indexTree->setIndexEntry(params.indices[i]);
+ SolutionCacheData* scd = new SolutionCacheData();
+ scd->tree.reset(indexTree);
+ scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
+ scd->wholeIXSolnDir = 1;
- // TODO: Sparse indexes can't normally provide a sort, because non-indexed
- // documents could potentially be missing from the result set. However, if the
- // query predicate can be used to guarantee that all documents to be returned
- // are indexed, then the index should be able to provide the sort.
- //
- // For example:
- // - Sparse index {a: 1, b: 1} should be able to provide a sort for
- // find({b: 1}).sort({a: 1}). SERVER-13908.
- // - Index {a: 1, b: "2dsphere"} (which is "geo-sparse", if
- // 2dsphereIndexVersion=2) should be able to provide a sort for
- // find({b: GEO}).sort({a:1}). SERVER-10801.
-
- const BSONObj kp = QueryPlannerAnalysis::getSortPattern(index.keyPattern);
- if (providesSort(query, kp)) {
- LOG(5) << "Planner: outputting soln that uses index to provide sort."
- << endl;
- QuerySolution* soln = buildWholeIXSoln(params.indices[i],
- query, params);
- if (NULL != soln) {
- PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
- indexTree->setIndexEntry(params.indices[i]);
- SolutionCacheData* scd = new SolutionCacheData();
- scd->tree.reset(indexTree);
- scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
- scd->wholeIXSolnDir = 1;
-
- soln->cacheData.reset(scd);
- out->push_back(soln);
- break;
- }
+ soln->cacheData.reset(scd);
+ out->push_back(soln);
+ break;
}
- if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) {
- LOG(5) << "Planner: outputting soln that uses (reverse) index "
- << "to provide sort." << endl;
- QuerySolution* soln = buildWholeIXSoln(params.indices[i], query,
- params, -1);
- if (NULL != soln) {
- PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
- indexTree->setIndexEntry(params.indices[i]);
- SolutionCacheData* scd = new SolutionCacheData();
- scd->tree.reset(indexTree);
- scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
- scd->wholeIXSolnDir = -1;
-
- soln->cacheData.reset(scd);
- out->push_back(soln);
- break;
- }
+ }
+ if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) {
+ LOG(5) << "Planner: outputting soln that uses (reverse) index "
+ << "to provide sort." << endl;
+ QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params, -1);
+ if (NULL != soln) {
+ PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
+ indexTree->setIndexEntry(params.indices[i]);
+ SolutionCacheData* scd = new SolutionCacheData();
+ scd->tree.reset(indexTree);
+ scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
+ scd->wholeIXSolnDir = -1;
+
+ soln->cacheData.reset(scd);
+ out->push_back(soln);
+ break;
}
}
}
}
+ }
- // geoNear and text queries *require* an index.
- // Also, if a hint is specified it indicates that we MUST use it.
- bool possibleToCollscan = !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
- && hintIndex.isEmpty();
-
- // The caller can explicitly ask for a collscan.
- bool collscanRequested = (params.options & QueryPlannerParams::INCLUDE_COLLSCAN);
-
- // No indexed plans? We must provide a collscan if possible or else we can't run the query.
- bool collscanNeeded = (0 == out->size() && canTableScan);
-
- if (possibleToCollscan && (collscanRequested || collscanNeeded)) {
- QuerySolution* collscan = buildCollscanSoln(query, false, params);
- if (NULL != collscan) {
- SolutionCacheData* scd = new SolutionCacheData();
- scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
- collscan->cacheData.reset(scd);
- out->push_back(collscan);
- LOG(5) << "Planner: outputting a collscan:" << endl
- << collscan->toString();
- }
- }
+ // geoNear and text queries *require* an index.
+ // Also, if a hint is specified it indicates that we MUST use it.
+ bool possibleToCollscan =
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT) && hintIndex.isEmpty();
- return Status::OK();
+ // The caller can explicitly ask for a collscan.
+ bool collscanRequested = (params.options & QueryPlannerParams::INCLUDE_COLLSCAN);
+
+ // No indexed plans? We must provide a collscan if possible or else we can't run the query.
+ bool collscanNeeded = (0 == out->size() && canTableScan);
+
+ if (possibleToCollscan && (collscanRequested || collscanNeeded)) {
+ QuerySolution* collscan = buildCollscanSoln(query, false, params);
+ if (NULL != collscan) {
+ SolutionCacheData* scd = new SolutionCacheData();
+ scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
+ collscan->cacheData.reset(scd);
+ out->push_back(collscan);
+ LOG(5) << "Planner: outputting a collscan:" << endl
+ << collscan->toString();
+ }
}
+ return Status::OK();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner.h b/src/mongo/db/query/query_planner.h
index 80d41bef1e8..0b33b109b4e 100644
--- a/src/mongo/db/query/query_planner.h
+++ b/src/mongo/db/query/query_planner.h
@@ -34,84 +34,84 @@
namespace mongo {
- class CachedSolution;
- class Collection;
+class CachedSolution;
+class Collection;
+
+/**
+ * QueryPlanner's job is to provide an entry point to the query planning and optimization
+ * process.
+ */
+class QueryPlanner {
+public:
+ // Identifies the version of the query planner module. Reported in explain.
+ static const int kPlannerVersion;
/**
- * QueryPlanner's job is to provide an entry point to the query planning and optimization
- * process.
+ * Outputs a series of possible solutions for the provided 'query' into 'out'. Uses the
+ * indices and other data in 'params' to plan with.
+ *
+ * Caller owns pointers in *out.
*/
- class QueryPlanner {
- public:
- // Identifies the version of the query planner module. Reported in explain.
- static const int kPlannerVersion;
+ static Status plan(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolution*>* out);
- /**
- * Outputs a series of possible solutions for the provided 'query' into 'out'. Uses the
- * indices and other data in 'params' to plan with.
- *
- * Caller owns pointers in *out.
- */
- static Status plan(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- std::vector<QuerySolution*>* out);
-
- /**
- * Attempt to generate a query solution, given data retrieved
- * from the plan cache.
- *
- * @param query -- query for which we are generating a plan
- * @param params -- planning parameters
- * @param cachedSoln -- the CachedSolution retrieved from the plan cache.
- * @param out -- an out-parameter which will be filled in with the solution
- * generated from the cache data
- *
- * On success, the caller is responsible for deleting *out.
- */
- static Status planFromCache(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- const CachedSolution& cachedSoln,
- QuerySolution** out);
+ /**
+ * Attempt to generate a query solution, given data retrieved
+ * from the plan cache.
+ *
+ * @param query -- query for which we are generating a plan
+ * @param params -- planning parameters
+ * @param cachedSoln -- the CachedSolution retrieved from the plan cache.
+ * @param out -- an out-parameter which will be filled in with the solution
+ * generated from the cache data
+ *
+ * On success, the caller is responsible for deleting *out.
+ */
+ static Status planFromCache(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ const CachedSolution& cachedSoln,
+ QuerySolution** out);
- /**
- * Used to generated the index tag tree that will be inserted
- * into the plan cache. This data gets stashed inside a QuerySolution
- * until it can be inserted into the cache proper.
- *
- * @param taggedTree -- a MatchExpression with index tags that has been
- * produced by the enumerator.
- * @param relevantIndices -- a list of the index entries used to tag
- * the tree (i.e. index numbers in the tags refer to entries in this vector)
- *
- * On success, a new tagged tree is returned through the out-parameter 'out'.
- * The caller has ownership of both taggedTree and *out.
- *
- * On failure, 'out' is set to NULL.
- */
- static Status cacheDataFromTaggedTree(const MatchExpression* const taggedTree,
- const std::vector<IndexEntry>& relevantIndices,
- PlanCacheIndexTree** out);
+ /**
+ * Used to generated the index tag tree that will be inserted
+ * into the plan cache. This data gets stashed inside a QuerySolution
+ * until it can be inserted into the cache proper.
+ *
+ * @param taggedTree -- a MatchExpression with index tags that has been
+ * produced by the enumerator.
+ * @param relevantIndices -- a list of the index entries used to tag
+ * the tree (i.e. index numbers in the tags refer to entries in this vector)
+ *
+ * On success, a new tagged tree is returned through the out-parameter 'out'.
+ * The caller has ownership of both taggedTree and *out.
+ *
+ * On failure, 'out' is set to NULL.
+ */
+ static Status cacheDataFromTaggedTree(const MatchExpression* const taggedTree,
+ const std::vector<IndexEntry>& relevantIndices,
+ PlanCacheIndexTree** out);
- /**
- * @param filter -- an untagged MatchExpression
- * @param indexTree -- a tree structure retrieved from the
- * cache with index tags that indicates how 'filter' should
- * be tagged.
- * @param indexMap -- needed in order to put the proper index
- * numbers inside the index tags
- *
- * On success, 'filter' is mutated so that it has all the
- * index tags needed in order for the access planner to recreate
- * the cached plan.
- *
- * On failure, the tag state attached to the nodes of 'filter'
- * is invalid. Planning from the cache should be aborted.
- *
- * Does not take ownership of either filter or indexTree.
- */
- static Status tagAccordingToCache(MatchExpression* filter,
- const PlanCacheIndexTree* const indexTree,
- const std::map<BSONObj, size_t>& indexMap);
- };
+ /**
+ * @param filter -- an untagged MatchExpression
+ * @param indexTree -- a tree structure retrieved from the
+ * cache with index tags that indicates how 'filter' should
+ * be tagged.
+ * @param indexMap -- needed in order to put the proper index
+ * numbers inside the index tags
+ *
+ * On success, 'filter' is mutated so that it has all the
+ * index tags needed in order for the access planner to recreate
+ * the cached plan.
+ *
+ * On failure, the tag state attached to the nodes of 'filter'
+ * is invalid. Planning from the cache should be aborted.
+ *
+ * Does not take ownership of either filter or indexTree.
+ */
+ static Status tagAccordingToCache(MatchExpression* filter,
+ const PlanCacheIndexTree* const indexTree,
+ const std::map<BSONObj, size_t>& indexMap);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_array_test.cpp b/src/mongo/db/query/query_planner_array_test.cpp
index 09d2506fae4..e4c3a69dca1 100644
--- a/src/mongo/db/query/query_planner_array_test.cpp
+++ b/src/mongo/db/query/query_planner_array_test.cpp
@@ -35,921 +35,991 @@
namespace {
- using namespace mongo;
-
- TEST_F(QueryPlannerTest, ElemMatchOneField) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a : {$elemMatch: {b:1}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a:{$elemMatch:{b:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:1}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchTwoFields) {
- addIndex(BSON("a.b" << 1));
- addIndex(BSON("a.c" << 1));
- runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a:{$elemMatch:{b:1,c:1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.c': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicAllElemMatch) {
- addIndex(BSON("foo.a" << 1));
- addIndex(BSON("foo.b" << 1));
- runQuery(fromjson("{foo: {$all: [ {$elemMatch: {a:1, b:1}}, {$elemMatch: {a:2, b:2}}]}}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$all:"
- "[{$elemMatch:{a:1,b:1}},{$elemMatch:{a:2,b:2}}]}}}}");
-
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'foo.a': 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'foo.b': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicAllElemMatch2) {
- // true means multikey
- addIndex(BSON("a.x" << 1), true);
-
- runQuery(fromjson("{a: {$all: [{$elemMatch: {x: 3}}, {$elemMatch: {y: 5}}]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$all:[{$elemMatch:{x:3}},{$elemMatch:{y:5}}]}},"
- "node: {ixscan: {pattern: {'a.x': 1},"
- "bounds: {'a.x': [[3,3,true,true]]}}}}}");
- }
-
- // SERVER-16256
- TEST_F(QueryPlannerTest, AllElemMatchCompound) {
- // true means multikey
- addIndex(BSON("d" << 1 << "a.b" << 1 << "a.c" << 1), true);
-
- runQuery(fromjson("{d: 1, a: {$all: [{$elemMatch: {b: 2, c: 2}},"
- "{$elemMatch: {b: 3, c: 3}}]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{a: {$elemMatch: {b: 2, c: 2}}},"
- "{a: {$elemMatch: {b: 3, c: 3}}}]},"
- "node: {ixscan: {filter: null, pattern: {d:1,'a.b':1,'a.c':1},"
- "bounds: {d: [[1,1,true,true]],"
- "'a.b': [[2,2,true,true]],"
- "'a.c': [[2,2,true,true]]}}}}}");
- }
-
- // SERVER-13677
- TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild) {
- addIndex(BSON("a.b.c.d" << 1));
- runQuery(fromjson("{z: 1, 'a.b': {$elemMatch: {c: {$all: [{$elemMatch: {d: 0}}]}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c.d': 1}}}}}");
- }
-
- // SERVER-13677
- TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild2) {
- // true means multikey
- addIndex(BSON("a.b.c.d" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {c: {$all: "
- "[{$elemMatch: {d: {$gt: 1, $lt: 3}}}]}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c.d': 1}, "
- "bounds: {'a.b.c.d': [[-Infinity,3,true,false]]}}}}}");
- }
-
- // SERVER-13677
- TEST_F(QueryPlannerTest, ElemMatchWithAllChild) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1), true);
- runQuery(fromjson("{z: 1, 'a.b': {$elemMatch: {c: {$all: [4, 5, 6]}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, "
- "bounds: {'a.b.c': [[4,4,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchValueMatch) {
- addIndex(BSON("foo" << 1));
- addIndex(BSON("foo" << 1 << "bar" << 1));
- runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $lt: 10}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$lt:10}}}}}");
- assertSolutionExists("{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
- "{ixscan: {filter: null, pattern: {foo: 1}}}}}");
- assertSolutionExists("{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
- "{ixscan: {filter: null, pattern: {foo: 1, bar: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchValueIndexability) {
- addIndex(BSON("foo" << 1));
-
- // An ELEM_MATCH_VALUE can be indexed if all of its child predicates
- // are "index bounds generating".
- runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $lt: 10}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$lt:10}}}}}");
- assertSolutionExists("{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
- "{ixscan: {filter: null, pattern: {foo: 1}}}}}");
-
- // We cannot build index bounds for the $size predicate. This means that the
- // ELEM_MATCH_VALUE is not indexable, and we get no indexed solutions.
- runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $size: 10}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$size:10}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchNested) {
- addIndex(BSON("a.b.c" << 1));
- runQuery(fromjson("{ a:{ $elemMatch:{ b:{ $elemMatch:{ c:{ $gte:1, $lte:1 } } } } }}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoElemMatchNested) {
- addIndex(BSON("a.d.e" << 1));
- addIndex(BSON("a.b.c" << 1));
- runQuery(fromjson("{ a:{ $elemMatch:{ d:{ $elemMatch:{ e:{ $lte:1 } } },"
- "b:{ $elemMatch:{ c:{ $gte:1 } } } } } }"));
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.d.e': 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchCompoundTwoFields) {
- addIndex(BSON("a.b" << 1 << "a.c" << 1));
- runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1, 'a.c': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ArrayEquality) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a : [1, 2, 3]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a:[1,2,3]}}}");
- assertSolutionExists("{fetch: {filter: {a:[1,2,3]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- // SERVER-13664
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedAnd) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$gte: 2, $lt: 4}, c: 25}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:{$gte:2,$lt: 4},c:25}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, "
- "bounds: {'a.b': [[-Infinity,4,true,false]], "
- "'a.c': [[25,25,true,true]]}}}}}");
- }
-
- // SERVER-13664
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedOr) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- // true means multikey
- addIndex(BSON("a.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {$or: [{b: 3}, {c: 4}]}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{$or:[{b:3},{c:4}]}}}, "
- "node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}, "
- "{ixscan: {filter: null, pattern: {'a.c': 1}}}]}}}}");
- }
-
- // SERVER-13664
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegex) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a: {$elemMatch: {b: /foo/}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:/foo/}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- // SERVER-14180
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegexAnd) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a: {$elemMatch: {b: /foo/}}, z: 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:/foo/}}, z:1}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- // SERVER-14180
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegexAnd2) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a: {$elemMatch: {b: /foo/, b: 3}}, z: 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:/foo/,b:3}}, z:1}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- // $not can appear as a value operator inside of an elemMatch (value). We shouldn't crash if we
- // see it.
- TEST_F(QueryPlannerTest, ElemMatchWithNotInside) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a: {$elemMatch: {$not: {$gte: 6}}}}"));
- }
-
- // SERVER-14625: Make sure we construct bounds properly for $elemMatch object with a
- // negation inside.
- TEST_F(QueryPlannerTest, ElemMatchWithNotInside2) {
- addIndex(BSON("a.b" << 1 << "a.c" << 1));
- runQuery(fromjson("{d: 1, a: {$elemMatch: {c: {$ne: 3}, b: 4}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {d: 1, a: {$elemMatch: {c: {$ne: 3}, b: 4}}}, node:"
- "{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, bounds:"
- "{'a.b': [[4,4,true,true]],"
- " 'a.c': [['MinKey',3,true,false],"
- "[3,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-13789
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOr) {
- addIndex(BSON("bar.baz" << 1));
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2}]}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{foo:1},"
- "{bar:{$elemMatch:{$or:[{baz:2}]}}}]}, "
- "node: {ixscan: {pattern: {'bar.baz': 1}, "
- "bounds: {'bar.baz': [[2,2,true,true]]}}}}}");
- }
-
- // SERVER-13789
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOrMultiplePreds) {
- addIndex(BSON("bar.baz" << 1));
- addIndex(BSON("bar.z" << 1));
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2}, {z: 3}]}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{foo:1},"
- "{bar:{$elemMatch:{$or:[{baz:2},{z:3}]}}}]}, "
- "node: {or: {nodes: ["
- "{ixscan: {pattern: {'bar.baz': 1}, "
- "bounds: {'bar.baz': [[2,2,true,true]]}}},"
- "{ixscan: {pattern: {'bar.z': 1}, "
- "bounds: {'bar.z': [[3,3,true,true]]}}}]}}}}");
- }
-
- // SERVER-13789: Ensure that we properly compound in the multikey case when an
- // $or is beneath an $elemMatch.
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOrMultikey) {
- // true means multikey
- addIndex(BSON("bar.baz" << 1 << "bar.z" << 1), true);
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2, z: 3}]}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{foo:1},"
- "{bar: {$elemMatch: {$or: [{$and: [{baz:2}, {z:3}]}]}}}]},"
- "node: {ixscan: {pattern: {'bar.baz': 1, 'bar.z': 1}, "
- "bounds: {'bar.baz': [[2,2,true,true]],"
- "'bar.z': [[3,3,true,true]]}}}}}");
- }
-
- // SERVER-13789: Right now we don't index $nor, but make sure that the planner
- // doesn't get confused by a $nor beneath an $elemMatch.
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedNor) {
- addIndex(BSON("bar.baz" << 1));
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$nor: [{baz: 2}, {baz: 3}]}}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // SERVER-13789
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedNE) {
- addIndex(BSON("bar.baz" << 1));
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {baz: {$ne: 2}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{foo:1},"
- "{bar:{$elemMatch:{baz:{$ne:2}}}}]}, "
- "node: {ixscan: {pattern: {'bar.baz': 1}, "
- "bounds: {'bar.baz': [['MinKey',2,true,false], "
- "[2,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-13789: Make sure we properly handle an $or below $elemMatch that is not
- // tagged by the enumerator to use an index.
- TEST_F(QueryPlannerTest, ElemMatchNestedOrNotIndexed) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{c: 1, a: {$elemMatch: {b: 3, $or: [{c: 4}, {c: 5}]}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[3,3,true,true]]}}}}}");
- }
-
- // The index bounds can be compounded because the index is not multikey.
- TEST_F(QueryPlannerTest, CompoundBoundsElemMatchNotMultikey) {
- addIndex(BSON("a.x" << 1 << "a.b.c" << 1));
- runQuery(fromjson("{'a.x': 1, a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:{$elemMatch:{c:{$gte:1}}}}}}, "
- "node: {ixscan: {pattern: {'a.x':1, 'a.b.c':1}, bounds: "
- "{'a.x': [[1,1,true,true]], "
- " 'a.b.c': [[1,Infinity,true,true]]}}}}}");
- }
-
- // The index bounds cannot be compounded because the predicates over 'a.x' and
- // 'a.b.c' 1) share the prefix "a", and 2) are not conjoined by an $elemMatch
- // over the prefix "a".
- TEST_F(QueryPlannerTest, CompoundMultikeyBoundsElemMatch) {
- // true means multikey
- addIndex(BSON("a.x" << 1 << "a.b.c" << 1), true);
- runQuery(fromjson("{'a.x': 1, a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.x':1, 'a.b.c':1}, bounds: "
- "{'a.x': [[1,1,true,true]], "
- " 'a.b.c': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // The index bounds cannot be intersected because the index is multikey.
- // The bounds could be intersected if there was an $elemMatch applied to path
- // "a.b.c". However, the $elemMatch is applied to the path "a.b" rather than
- // the full path of the indexed field.
- TEST_F(QueryPlannerTest, MultikeyNestedElemMatch) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $lte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
- "{'a.b.c': [[-Infinity, 1, true, true]]}}}}}");
- }
-
- // The index bounds cannot be intersected because the index is multikey.
- // The bounds could be intersected if there was an $elemMatch applied to path
- // "a.b.c". However, the $elemMatch is applied to the path "a.b" rather than
- // the full path of the indexed field.
- TEST_F(QueryPlannerTest, MultikeyNestedElemMatchIn) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $in:[2]}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
- "{'a.b.c': [[1, Infinity, true, true]]}}}}}");
- }
-
- // The bounds can be compounded because the index is not multikey.
- TEST_F(QueryPlannerTest, TwoNestedElemMatchBounds) {
- addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1));
- runQuery(fromjson("{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
- "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.d.e': 1, 'a.b.c': 1}, bounds: "
- "{'a.d.e': [[-Infinity, 1, true, true]],"
- "'a.b.c': [[1, Infinity, true, true]]}}}}}");
- }
-
- // The bounds cannot be compounded. Although there is an $elemMatch over the
- // shared path prefix 'a', the predicates must be conjoined by the same $elemMatch,
- // without nested $elemMatch's intervening. The bounds could be compounded if
- // the query were rewritten as {a: {$elemMatch: {'d.e': {$lte: 1}, 'b.c': {$gte: 1}}}}.
- TEST_F(QueryPlannerTest, MultikeyTwoNestedElemMatchBounds) {
- // true means multikey
- addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
- "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.d.e': 1, 'a.b.c': 1}, bounds: "
- "{'a.d.e': [[-Infinity, 1, true, true]],"
- "'a.b.c': [['MinKey', 'MaxKey', true, true]]}}}}}");
- }
-
- // Bounds can be intersected for a multikey index when the predicates are
- // joined by an $elemMatch over the full path of the index field.
- TEST_F(QueryPlannerTest, MultikeyElemMatchValue) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {$gte: 1, $lte: 1}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[1, 1, true, true]]}}}}}");
- }
-
- // We can intersect the bounds for all three predicates because
- // the index is not multikey.
- TEST_F(QueryPlannerTest, ElemMatchInterectBoundsNotMultikey) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
- "'a.b': {$in: [2,5]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[2, 2, true, true]]}}}}}");
- }
-
- // Bounds can be intersected for a multikey index when the predicates are
- // joined by an $elemMatch over the full path of the index field. The bounds
- // from the $in predicate are not intersected with the bounds from the
- // remaining to predicates because the $in is not joined to the other
- // predicates with an $elemMatch.
- TEST_F(QueryPlannerTest, ElemMatchInterectBoundsMultikey) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
- "'a.b': {$in: [2,5]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[1, 4, true, true]]}}}}}");
- }
-
- // Bounds can be intersected because the predicates are joined by an
- // $elemMatch over the path "a.b.c", the full path of the multikey
- // index field.
- TEST_F(QueryPlannerTest, MultikeyNestedElemMatchValue) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {'b.c': {$elemMatch: {$gte: 1, $lte: 1}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
- "{'a.b.c': [[1, 1, true, true]]}}}}}");
- }
-
- // Bounds cannot be compounded for a multikey compound index when
- // the predicates share a prefix (and there is no $elemMatch).
- TEST_F(QueryPlannerTest, MultikeySharedPrefixNoElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.b': 1, 'a.c': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Bounds can be compounded because there is an $elemMatch applied to the
- // shared prefix "a".
- TEST_F(QueryPlannerTest, MultikeySharedPrefixElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], 'a.c': [[1,1,true,true]]}}}}}");
- }
-
- // Bounds cannot be compounded for the multikey index even though there is an
- // $elemMatch, because the $elemMatch does not join the two predicates. This
- // query is semantically indentical to {'a.b': 1, 'a.c': 1}.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixElemMatchNotShared) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.b': 1, a: {$elemMatch: {c: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Bounds cannot be compounded for the multikey index even though there are
- // $elemMatch's, because there is not an $elemMatch which joins the two
- // predicates. This query is semantically indentical to {'a.b': 1, 'a.c': 1}.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixTwoElemMatches) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{$and: [{a: {$elemMatch: {b: 1}}}, {a: {$elemMatch: {c: 1}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Bounds for the predicates joined by the $elemMatch over the shared prefix
- // "a" can be combined. However, the predicate 'a.b'==1 cannot also be combined
- // given that it is outside of the $elemMatch.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.b': 1, a: {$elemMatch: {b: {$gt: 0}, c: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[0,Infinity,false,true]], "
- " 'a.c': [[1,1,true,true]]}}}}}");
- }
-
- // Bounds for the predicates joined by the $elemMatch over the shared prefix
- // "a" can be combined. However, the predicate outside the $elemMatch
- // cannot also be combined.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch2) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, 'a.b': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [[1,1,true,true]]}}}}}");
- }
-
- // Bounds for the predicates joined by the $elemMatch over the shared prefix
- // "a" can be combined. However, the predicate outside the $elemMatch
- // cannot also be combined.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch3) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.c': 2, a: {$elemMatch: {b: 1, c: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [[1,1,true,true]]}}}}}");
- }
-
- // There are two sets of fields that share a prefix: {'a.b', 'a.c'} and
- // {'d.e', 'd.f'}. Since the index is multikey, we can only use the bounds from
- // one member of each of these sets.
- TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesBasic) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
- runQuery(fromjson("{'a.b': 1, 'a.c': 1, 'd.e': 1, 'd.f': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
- "bounds: {'a.b':[[1,1,true,true]], "
- " 'a.c':[['MinKey','MaxKey',true,true]], "
- " 'd.e':[[1,1,true,true]], "
- " 'd.f':[['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // All bounds can be combined. Although, 'a.b' and 'a.c' share prefix 'a', the
- // relevant predicates are joined by an $elemMatch on 'a'. Similarly, predicates
- // over 'd.e' and 'd.f' are joined by an $elemMatch on 'd'.
- TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesTwoElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, d: {$elemMatch: {e: 1, f: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{a: {$elemMatch: {b: 1, c: 1}}},"
- "{d: {$elemMatch: {e: 1, f: 1}}}]},"
- "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
- "bounds: {'a.b':[[1,1,true,true]], "
- " 'a.c':[[1,1,true,true]], "
- " 'd.e':[[1,1,true,true]], "
- " 'd.f':[[1,1,true,true]]}}}}}");
- }
-
- // Bounds for 'a.b' and 'a.c' can be combined because of the $elemMatch on 'a'.
- // Since predicates an 'd.e' and 'd.f' have no $elemMatch, we use the bounds
- // for only one of the two.
- TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesOneElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, 'd.e': 1, 'd.f': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and:[{a:{$elemMatch:{b:1,c:1}}}, {'d.f':1}]},"
- "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
- "bounds: {'a.b':[[1,1,true,true]], "
- " 'a.c':[[1,1,true,true]], "
- " 'd.e':[[1,1,true,true]], "
- " 'd.f':[['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Bounds for 'd.e' and 'd.f' can be combined because of the $elemMatch on 'd'.
- // Since predicates an 'a.b' and 'a.c' have no $elemMatch, we use the bounds
- // for only one of the two.
- TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesOneElemMatch2) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
- runQuery(fromjson("{'a.b': 1, 'a.c': 1, d: {$elemMatch: {e: 1, f: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and:[{d:{$elemMatch:{e:1,f:1}}}, {'a.c':1}]},"
- "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
- "bounds: {'a.b':[[1,1,true,true]], "
- " 'a.c':[['MinKey','MaxKey',true,true]], "
- " 'd.e':[[1,1,true,true]], "
- " 'd.f':[[1,1,true,true]]}}}}}");
- }
-
- // The bounds cannot be compounded because 'a.b.x' and 'a.b.y' share prefix
- // 'a.b' (and there is no $elemMatch).
- TEST_F(QueryPlannerTest, MultikeyDoubleDottedNoElemMatch) {
- // true means multikey
- addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
- runQuery(fromjson("{'a.b.y': 1, 'a.b.x': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
- "{'a.b.x': [[1,1,true,true]], "
- " 'a.b.y': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // The bounds can be compounded because the predicates are joined by an
- // $elemMatch on the shared prefix "a.b".
- TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatch) {
- // true means multikey
- addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {x: 1, y: 1}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
- "{'a.b.x': [[1,1,true,true]], "
- " 'a.b.y': [[1,1,true,true]]}}}}}");
- }
-
- // The bounds cannot be compounded. Although there is an $elemMatch that appears
- // to join the predicates, the path to which the $elemMatch is applied is "a".
- // Therefore, the predicates contained in the $elemMatch are over "b.x" and "b.y".
- // They cannot be compounded due to shared prefix "b".
- TEST_F(QueryPlannerTest, MultikeyDoubleDottedUnhelpfulElemMatch) {
- // true means multikey
- addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {'b.x': 1, 'b.y': 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
- "{'a.b.x': [[1,1,true,true]], "
- " 'a.b.y': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // The bounds can be compounded because the predicates are joined by an
- // $elemMatch on the shared prefix "a.b".
- TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatchOnDotted) {
- // true means multikey
- addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {x: 1, y: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
- "{'a.b.x': [[1,1,true,true]], "
- " 'a.b.y': [[1,1,true,true]]}}}}}");
- }
-
- // This one is subtle. Say we compound the bounds for predicates over "a.b.c" and
- // "a.b.d". This is okay because of the predicate over the shared prefix "a.b".
- // It might seem like we can do the same for the $elemMatch over shared prefix "a.e",
- // thus combining all bounds. But in fact, we can't combine any more bounds because
- // we have already used prefix "a". In other words, this query is like having predicates
- // over "a.b" and "a.e", so we can only use bounds from one of the two.
- TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1 << "a.e.f" << 1 << "a.b.d" << 1 << "a.e.g" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {c: 1, d: 1}}, "
- "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c':1,'a.e.f':1,'a.b.d':1,'a.e.g':1},"
- "bounds: {'a.b.c':[[1,1,true,true]], "
- " 'a.e.f':[['MinKey','MaxKey',true,true]], "
- " 'a.b.d':[[1,1,true,true]], "
- " 'a.e.g':[['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Similar to MultikeyComplexDoubleDotted above.
- TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted2) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1 << "a.e.c" << 1 << "a.b.d" << 1 << "a.e.d" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {c: 1, d: 1}}, "
- "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c':1,'a.e.c':1,'a.b.d':1,'a.e.d':1},"
- "bounds: {'a.b.c':[[1,1,true,true]], "
- " 'a.e.c':[['MinKey','MaxKey',true,true]], "
- " 'a.b.d':[[1,1,true,true]], "
- " 'a.e.d':[['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // SERVER-13422: check that we plan $elemMatch object correctly with
- // index intersection.
- TEST_F(QueryPlannerTest, ElemMatchIndexIntersection) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("shortId" << 1));
- // true means multikey
- addIndex(BSON("a.b.startDate" << 1), true);
- addIndex(BSON("a.b.endDate" << 1), true);
-
- runQuery(fromjson("{shortId: 3, 'a.b': {$elemMatch: {startDate: {$lte: 3},"
- "endDate: {$gt: 6}}}}"));
-
- assertNumSolutions(6U);
-
- // 3 single index solutions.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {shortId: 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.startDate': 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.endDate': 1}}}}}");
-
- // 3 index intersection solutions. The last one has to intersect two
- // predicates within the $elemMatch object.
- assertSolutionExists("{fetch: {node: {andHash: {nodes: ["
- "{ixscan: {pattern: {shortId: 1}}},"
- "{ixscan: {pattern: {'a.b.startDate': 1}}}]}}}}");
- assertSolutionExists("{fetch: {node: {andHash: {nodes: ["
- "{ixscan: {pattern: {shortId: 1}}},"
- "{ixscan: {pattern: {'a.b.endDate': 1}}}]}}}}");
- assertSolutionExists("{fetch: {node: {andHash: {nodes: ["
- "{ixscan: {pattern: {'a.b.startDate': 1}}},"
- "{ixscan: {pattern: {'a.b.endDate': 1}}}]}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, NegationBelowElemMatchValue) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
-
- runQuery(fromjson("{a: {$elemMatch: {$ne: 2}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{$ne:2}}}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}, bounds: "
- "{a: [['MinKey',2,true,false], [2,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, AndWithNegationBelowElemMatchValue) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
- addIndex(BSON("b" << 1), true);
-
- runQuery(fromjson("{b: 10, a: {$elemMatch: {$not: {$gt: 4}}}}"));
-
- // One solution using index on 'b' and one using index on 'a'.
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {b: 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: {a: "
- "[['MinKey',4,true,true],[Infinity,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, AndWithNegationBelowElemMatchValue2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
-
- runQuery(fromjson("{b: 10, a: {$elemMatch: {$not: {$gt: 4}, $gt: 2}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: "
- "{a: [[2, 4, false, true]]}}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, NegationBelowElemMatchValueBelowElemMatchObject) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
-
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {$ne: 4}}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.b': 1}, bounds: "
- "{'a.b': [['MinKey',4,true,false],[4,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, NegationBelowElemMatchValueBelowOrBelowAnd) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
- addIndex(BSON("b" << 1));
-
- runQuery(fromjson("{c: 3, $or: [{a: {$elemMatch: {$ne: 4, $ne: 3}}}, {b: 5}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {c:3}, node: {or: {nodes: ["
- "{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: "
- "{a: [['MinKey',3,true,false],"
- "[3,4,false,false],"
- "[4,'MaxKey',false,true]]}}}}}, "
- "{ixscan: {filter: null, pattern: {b: 1}, bounds: "
- "{b: [[5,5,true,true]]}}}]}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, CantIndexNegationBelowElemMatchValue) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
-
- runQuery(fromjson("{a: {$elemMatch: {$not: {$mod: [2, 0]}}}}"));
-
- // There are no indexed solutions, because negations of $mod are not indexable.
- assertNumSolutions(0);
- }
-
- /**
- * Index bounds constraints on a field should not be intersected
- * if the index is multikey.
- */
- TEST_F(QueryPlannerTest, MultikeyTwoConstraintsSameField) {
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: {$gt: 0, $lt: 5}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {filter: {$and: [{a: {$lt: 5}}, {a: {$gt: 0}}]}, dir: 1}}");
-
- std::vector<std::string> alternates;
- alternates.push_back("{fetch: {filter: {a: {$lt: 5}}, node: {ixscan: {filter: null, "
- "pattern: {a: 1}, bounds: {a: [[0, Infinity, false, true]]}}}}}");
- alternates.push_back("{fetch: {filter: {a: {$gt: 0}}, node: {ixscan: {filter: null, "
- "pattern: {a: 1}, bounds: {a: [[-Infinity, 5, true, false]]}}}}}");
- assertHasOneSolutionOf(alternates);
- }
-
- /**
- * Constraints on fields with a shared parent should not be intersected
- * if the index is multikey.
- */
- TEST_F(QueryPlannerTest, MultikeyTwoConstraintsDifferentFields) {
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.b': 2, 'a.c': 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {filter: {$and: [{'a.b': 2}, {'a.c': 3}]}, dir: 1}}");
-
- std::vector<std::string> alternates;
- alternates.push_back("{fetch: {filter: {'a.c': 3}, node: {ixscan: {filter: null, "
- "pattern: {'a.b': 1, 'a.c': 1}, bounds: "
- "{'a.b': [[2,2,true,true]], "
- " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
- alternates.push_back("{fetch: {filter: {'a.b': 2}, node: {ixscan: {filter: null, "
- "pattern: {'a.b': 1, 'a.c': 1}, bounds: "
- "{'a.b': [['MinKey','MaxKey',true,true]], "
- " 'a.c': [[3,3,true,true]]}}}}}");
- assertHasOneSolutionOf(alternates);
- }
+using namespace mongo;
+
+TEST_F(QueryPlannerTest, ElemMatchOneField) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{a : {$elemMatch: {b:1}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a:{$elemMatch:{b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:1}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchTwoFields) {
+ addIndex(BSON("a.b" << 1));
+ addIndex(BSON("a.c" << 1));
+ runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a:{$elemMatch:{b:1,c:1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.c': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicAllElemMatch) {
+ addIndex(BSON("foo.a" << 1));
+ addIndex(BSON("foo.b" << 1));
+ runQuery(fromjson("{foo: {$all: [ {$elemMatch: {a:1, b:1}}, {$elemMatch: {a:2, b:2}}]}}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{cscan: {dir: 1, filter: {foo:{$all:"
+ "[{$elemMatch:{a:1,b:1}},{$elemMatch:{a:2,b:2}}]}}}}");
+
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'foo.a': 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'foo.b': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicAllElemMatch2) {
+ // true means multikey
+ addIndex(BSON("a.x" << 1), true);
+
+ runQuery(fromjson("{a: {$all: [{$elemMatch: {x: 3}}, {$elemMatch: {y: 5}}]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$all:[{$elemMatch:{x:3}},{$elemMatch:{y:5}}]}},"
+ "node: {ixscan: {pattern: {'a.x': 1},"
+ "bounds: {'a.x': [[3,3,true,true]]}}}}}");
+}
+
+// SERVER-16256
+TEST_F(QueryPlannerTest, AllElemMatchCompound) {
+ // true means multikey
+ addIndex(BSON("d" << 1 << "a.b" << 1 << "a.c" << 1), true);
+
+ runQuery(fromjson(
+ "{d: 1, a: {$all: [{$elemMatch: {b: 2, c: 2}},"
+ "{$elemMatch: {b: 3, c: 3}}]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{a: {$elemMatch: {b: 2, c: 2}}},"
+ "{a: {$elemMatch: {b: 3, c: 3}}}]},"
+ "node: {ixscan: {filter: null, pattern: {d:1,'a.b':1,'a.c':1},"
+ "bounds: {d: [[1,1,true,true]],"
+ "'a.b': [[2,2,true,true]],"
+ "'a.c': [[2,2,true,true]]}}}}}");
+}
+
+// SERVER-13677
+TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild) {
+ addIndex(BSON("a.b.c.d" << 1));
+ runQuery(fromjson("{z: 1, 'a.b': {$elemMatch: {c: {$all: [{$elemMatch: {d: 0}}]}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c.d': 1}}}}}");
+}
+
+// SERVER-13677
+TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild2) {
+ // true means multikey
+ addIndex(BSON("a.b.c.d" << 1), true);
+ runQuery(fromjson(
+ "{'a.b': {$elemMatch: {c: {$all: "
+ "[{$elemMatch: {d: {$gt: 1, $lt: 3}}}]}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c.d': 1}, "
+ "bounds: {'a.b.c.d': [[-Infinity,3,true,false]]}}}}}");
+}
+
+// SERVER-13677
+TEST_F(QueryPlannerTest, ElemMatchWithAllChild) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1), true);
+ runQuery(fromjson("{z: 1, 'a.b': {$elemMatch: {c: {$all: [4, 5, 6]}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, "
+ "bounds: {'a.b.c': [[4,4,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchValueMatch) {
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("foo" << 1 << "bar" << 1));
+ runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $lt: 10}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$lt:10}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
+ "{ixscan: {filter: null, pattern: {foo: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
+ "{ixscan: {filter: null, pattern: {foo: 1, bar: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchValueIndexability) {
+ addIndex(BSON("foo" << 1));
+
+ // An ELEM_MATCH_VALUE can be indexed if all of its child predicates
+ // are "index bounds generating".
+ runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $lt: 10}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$lt:10}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
+ "{ixscan: {filter: null, pattern: {foo: 1}}}}}");
+
+ // We cannot build index bounds for the $size predicate. This means that the
+ // ELEM_MATCH_VALUE is not indexable, and we get no indexed solutions.
+ runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $size: 10}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$size:10}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchNested) {
+ addIndex(BSON("a.b.c" << 1));
+ runQuery(fromjson("{ a:{ $elemMatch:{ b:{ $elemMatch:{ c:{ $gte:1, $lte:1 } } } } }}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoElemMatchNested) {
+ addIndex(BSON("a.d.e" << 1));
+ addIndex(BSON("a.b.c" << 1));
+ runQuery(fromjson(
+ "{ a:{ $elemMatch:{ d:{ $elemMatch:{ e:{ $lte:1 } } },"
+ "b:{ $elemMatch:{ c:{ $gte:1 } } } } } }"));
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.d.e': 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchCompoundTwoFields) {
+ addIndex(BSON("a.b" << 1 << "a.c" << 1));
+ runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1, 'a.c': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ArrayEquality) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a : [1, 2, 3]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a:[1,2,3]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:[1,2,3]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+// SERVER-13664
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedAnd) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: {$gte: 2, $lt: 4}, c: 25}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:{$gte:2,$lt: 4},c:25}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, "
+ "bounds: {'a.b': [[-Infinity,4,true,false]], "
+ "'a.c': [[25,25,true,true]]}}}}}");
+}
+
+// SERVER-13664
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedOr) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ // true means multikey
+ addIndex(BSON("a.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {$or: [{b: 3}, {c: 4}]}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{$or:[{b:3},{c:4}]}}}, "
+ "node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}, "
+ "{ixscan: {filter: null, pattern: {'a.c': 1}}}]}}}}");
+}
+
+// SERVER-13664
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegex) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{a: {$elemMatch: {b: /foo/}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:/foo/}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+// SERVER-14180
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegexAnd) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{a: {$elemMatch: {b: /foo/}}, z: 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:/foo/}}, z:1}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+// SERVER-14180
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegexAnd2) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{a: {$elemMatch: {b: /foo/, b: 3}}, z: 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:/foo/,b:3}}, z:1}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+// $not can appear as a value operator inside of an elemMatch (value). We shouldn't crash if we
+// see it.
+TEST_F(QueryPlannerTest, ElemMatchWithNotInside) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a: {$elemMatch: {$not: {$gte: 6}}}}"));
+}
+
+// SERVER-14625: Make sure we construct bounds properly for $elemMatch object with a
+// negation inside.
+TEST_F(QueryPlannerTest, ElemMatchWithNotInside2) {
+ addIndex(BSON("a.b" << 1 << "a.c" << 1));
+ runQuery(fromjson("{d: 1, a: {$elemMatch: {c: {$ne: 3}, b: 4}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1, a: {$elemMatch: {c: {$ne: 3}, b: 4}}}, node:"
+ "{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, bounds:"
+ "{'a.b': [[4,4,true,true]],"
+ " 'a.c': [['MinKey',3,true,false],"
+ "[3,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-13789
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOr) {
+ addIndex(BSON("bar.baz" << 1));
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2}]}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{foo:1},"
+ "{bar:{$elemMatch:{$or:[{baz:2}]}}}]}, "
+ "node: {ixscan: {pattern: {'bar.baz': 1}, "
+ "bounds: {'bar.baz': [[2,2,true,true]]}}}}}");
+}
+
+// SERVER-13789
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOrMultiplePreds) {
+ addIndex(BSON("bar.baz" << 1));
+ addIndex(BSON("bar.z" << 1));
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2}, {z: 3}]}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{foo:1},"
+ "{bar:{$elemMatch:{$or:[{baz:2},{z:3}]}}}]}, "
+ "node: {or: {nodes: ["
+ "{ixscan: {pattern: {'bar.baz': 1}, "
+ "bounds: {'bar.baz': [[2,2,true,true]]}}},"
+ "{ixscan: {pattern: {'bar.z': 1}, "
+ "bounds: {'bar.z': [[3,3,true,true]]}}}]}}}}");
+}
+
+// SERVER-13789: Ensure that we properly compound in the multikey case when an
+// $or is beneath an $elemMatch.
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOrMultikey) {
+ // true means multikey
+ addIndex(BSON("bar.baz" << 1 << "bar.z" << 1), true);
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2, z: 3}]}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{foo:1},"
+ "{bar: {$elemMatch: {$or: [{$and: [{baz:2}, {z:3}]}]}}}]},"
+ "node: {ixscan: {pattern: {'bar.baz': 1, 'bar.z': 1}, "
+ "bounds: {'bar.baz': [[2,2,true,true]],"
+ "'bar.z': [[3,3,true,true]]}}}}}");
+}
+
+// SERVER-13789: Right now we don't index $nor, but make sure that the planner
+// doesn't get confused by a $nor beneath an $elemMatch.
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedNor) {
+ addIndex(BSON("bar.baz" << 1));
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$nor: [{baz: 2}, {baz: 3}]}}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// SERVER-13789
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedNE) {
+ addIndex(BSON("bar.baz" << 1));
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {baz: {$ne: 2}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{foo:1},"
+ "{bar:{$elemMatch:{baz:{$ne:2}}}}]}, "
+ "node: {ixscan: {pattern: {'bar.baz': 1}, "
+ "bounds: {'bar.baz': [['MinKey',2,true,false], "
+ "[2,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-13789: Make sure we properly handle an $or below $elemMatch that is not
+// tagged by the enumerator to use an index.
+TEST_F(QueryPlannerTest, ElemMatchNestedOrNotIndexed) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{c: 1, a: {$elemMatch: {b: 3, $or: [{c: 4}, {c: 5}]}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[3,3,true,true]]}}}}}");
+}
+
+// The index bounds can be compounded because the index is not multikey.
+TEST_F(QueryPlannerTest, CompoundBoundsElemMatchNotMultikey) {
+ addIndex(BSON("a.x" << 1 << "a.b.c" << 1));
+ runQuery(fromjson("{'a.x': 1, a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:{$elemMatch:{c:{$gte:1}}}}}}, "
+ "node: {ixscan: {pattern: {'a.x':1, 'a.b.c':1}, bounds: "
+ "{'a.x': [[1,1,true,true]], "
+ " 'a.b.c': [[1,Infinity,true,true]]}}}}}");
+}
+
+// The index bounds cannot be compounded because the predicates over 'a.x' and
+// 'a.b.c' 1) share the prefix "a", and 2) are not conjoined by an $elemMatch
+// over the prefix "a".
+TEST_F(QueryPlannerTest, CompoundMultikeyBoundsElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.x" << 1 << "a.b.c" << 1), true);
+ runQuery(fromjson("{'a.x': 1, a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.x':1, 'a.b.c':1}, bounds: "
+ "{'a.x': [[1,1,true,true]], "
+ " 'a.b.c': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// The index bounds cannot be intersected because the index is multikey.
+// The bounds could be intersected if there was an $elemMatch applied to path
+// "a.b.c". However, the $elemMatch is applied to the path "a.b" rather than
+// the full path of the indexed field.
+TEST_F(QueryPlannerTest, MultikeyNestedElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $lte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
+ "{'a.b.c': [[-Infinity, 1, true, true]]}}}}}");
+}
+
+// The index bounds cannot be intersected because the index is multikey.
+// The bounds could be intersected if there was an $elemMatch applied to path
+// "a.b.c". However, the $elemMatch is applied to the path "a.b" rather than
+// the full path of the indexed field.
+TEST_F(QueryPlannerTest, MultikeyNestedElemMatchIn) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $in:[2]}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
+ "{'a.b.c': [[1, Infinity, true, true]]}}}}}");
+}
+
+// The bounds can be compounded because the index is not multikey.
+TEST_F(QueryPlannerTest, TwoNestedElemMatchBounds) {
+ addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1));
+ runQuery(fromjson(
+ "{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
+ "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.d.e': 1, 'a.b.c': 1}, bounds: "
+ "{'a.d.e': [[-Infinity, 1, true, true]],"
+ "'a.b.c': [[1, Infinity, true, true]]}}}}}");
+}
+
+// The bounds cannot be compounded. Although there is an $elemMatch over the
+// shared path prefix 'a', the predicates must be conjoined by the same $elemMatch,
+// without nested $elemMatch's intervening. The bounds could be compounded if
+// the query were rewritten as {a: {$elemMatch: {'d.e': {$lte: 1}, 'b.c': {$gte: 1}}}}.
+TEST_F(QueryPlannerTest, MultikeyTwoNestedElemMatchBounds) {
+ // true means multikey
+ addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1), true);
+ runQuery(fromjson(
+ "{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
+ "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.d.e': 1, 'a.b.c': 1}, bounds: "
+ "{'a.d.e': [[-Infinity, 1, true, true]],"
+ "'a.b.c': [['MinKey', 'MaxKey', true, true]]}}}}}");
+}
+
+// Bounds can be intersected for a multikey index when the predicates are
+// joined by an $elemMatch over the full path of the index field.
+TEST_F(QueryPlannerTest, MultikeyElemMatchValue) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson("{'a.b': {$elemMatch: {$gte: 1, $lte: 1}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[1, 1, true, true]]}}}}}");
+}
+
+// We can intersect the bounds for all three predicates because
+// the index is not multikey.
+TEST_F(QueryPlannerTest, ElemMatchInterectBoundsNotMultikey) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson(
+ "{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
+ "'a.b': {$in: [2,5]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[2, 2, true, true]]}}}}}");
+}
+
+// Bounds can be intersected for a multikey index when the predicates are
+// joined by an $elemMatch over the full path of the index field. The bounds
+// from the $in predicate are not intersected with the bounds from the
+// remaining to predicates because the $in is not joined to the other
+// predicates with an $elemMatch.
+TEST_F(QueryPlannerTest, ElemMatchInterectBoundsMultikey) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson(
+ "{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
+ "'a.b': {$in: [2,5]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[1, 4, true, true]]}}}}}");
+}
+
+// Bounds can be intersected because the predicates are joined by an
+// $elemMatch over the path "a.b.c", the full path of the multikey
+// index field.
+TEST_F(QueryPlannerTest, MultikeyNestedElemMatchValue) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {'b.c': {$elemMatch: {$gte: 1, $lte: 1}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
+ "{'a.b.c': [[1, 1, true, true]]}}}}}");
+}
+
+// Bounds cannot be compounded for a multikey compound index when
+// the predicates share a prefix (and there is no $elemMatch).
+TEST_F(QueryPlannerTest, MultikeySharedPrefixNoElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.b': 1, 'a.c': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Bounds can be compounded because there is an $elemMatch applied to the
+// shared prefix "a".
+TEST_F(QueryPlannerTest, MultikeySharedPrefixElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], 'a.c': [[1,1,true,true]]}}}}}");
+}
+
+// Bounds cannot be compounded for the multikey index even though there is an
+// $elemMatch, because the $elemMatch does not join the two predicates. This
+// query is semantically indentical to {'a.b': 1, 'a.c': 1}.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixElemMatchNotShared) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.b': 1, a: {$elemMatch: {c: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Bounds cannot be compounded for the multikey index even though there are
+// $elemMatch's, because there is not an $elemMatch which joins the two
+// predicates. This query is semantically indentical to {'a.b': 1, 'a.c': 1}.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixTwoElemMatches) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{$and: [{a: {$elemMatch: {b: 1}}}, {a: {$elemMatch: {c: 1}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Bounds for the predicates joined by the $elemMatch over the shared prefix
+// "a" can be combined. However, the predicate 'a.b'==1 cannot also be combined
+// given that it is outside of the $elemMatch.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.b': 1, a: {$elemMatch: {b: {$gt: 0}, c: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[0,Infinity,false,true]], "
+ " 'a.c': [[1,1,true,true]]}}}}}");
+}
+
+// Bounds for the predicates joined by the $elemMatch over the shared prefix
+// "a" can be combined. However, the predicate outside the $elemMatch
+// cannot also be combined.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch2) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, 'a.b': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [[1,1,true,true]]}}}}}");
+}
+
+// Bounds for the predicates joined by the $elemMatch over the shared prefix
+// "a" can be combined. However, the predicate outside the $elemMatch
+// cannot also be combined.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch3) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.c': 2, a: {$elemMatch: {b: 1, c: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [[1,1,true,true]]}}}}}");
+}
+
+// There are two sets of fields that share a prefix: {'a.b', 'a.c'} and
+// {'d.e', 'd.f'}. Since the index is multikey, we can only use the bounds from
+// one member of each of these sets.
+TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesBasic) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
+ runQuery(fromjson("{'a.b': 1, 'a.c': 1, 'd.e': 1, 'd.f': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
+ "bounds: {'a.b':[[1,1,true,true]], "
+ " 'a.c':[['MinKey','MaxKey',true,true]], "
+ " 'd.e':[[1,1,true,true]], "
+ " 'd.f':[['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// All bounds can be combined. Although, 'a.b' and 'a.c' share prefix 'a', the
+// relevant predicates are joined by an $elemMatch on 'a'. Similarly, predicates
+// over 'd.e' and 'd.f' are joined by an $elemMatch on 'd'.
+TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesTwoElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, d: {$elemMatch: {e: 1, f: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{a: {$elemMatch: {b: 1, c: 1}}},"
+ "{d: {$elemMatch: {e: 1, f: 1}}}]},"
+ "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
+ "bounds: {'a.b':[[1,1,true,true]], "
+ " 'a.c':[[1,1,true,true]], "
+ " 'd.e':[[1,1,true,true]], "
+ " 'd.f':[[1,1,true,true]]}}}}}");
+}
+
+// Bounds for 'a.b' and 'a.c' can be combined because of the $elemMatch on 'a'.
+// Since predicates an 'd.e' and 'd.f' have no $elemMatch, we use the bounds
+// for only one of the two.
+TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesOneElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, 'd.e': 1, 'd.f': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{a:{$elemMatch:{b:1,c:1}}}, {'d.f':1}]},"
+ "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
+ "bounds: {'a.b':[[1,1,true,true]], "
+ " 'a.c':[[1,1,true,true]], "
+ " 'd.e':[[1,1,true,true]], "
+ " 'd.f':[['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Bounds for 'd.e' and 'd.f' can be combined because of the $elemMatch on 'd'.
+// Since predicates an 'a.b' and 'a.c' have no $elemMatch, we use the bounds
+// for only one of the two.
+TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesOneElemMatch2) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
+ runQuery(fromjson("{'a.b': 1, 'a.c': 1, d: {$elemMatch: {e: 1, f: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{d:{$elemMatch:{e:1,f:1}}}, {'a.c':1}]},"
+ "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
+ "bounds: {'a.b':[[1,1,true,true]], "
+ " 'a.c':[['MinKey','MaxKey',true,true]], "
+ " 'd.e':[[1,1,true,true]], "
+ " 'd.f':[[1,1,true,true]]}}}}}");
+}
+
+// The bounds cannot be compounded because 'a.b.x' and 'a.b.y' share prefix
+// 'a.b' (and there is no $elemMatch).
+TEST_F(QueryPlannerTest, MultikeyDoubleDottedNoElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
+ runQuery(fromjson("{'a.b.y': 1, 'a.b.x': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
+ "{'a.b.x': [[1,1,true,true]], "
+ " 'a.b.y': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// The bounds can be compounded because the predicates are joined by an
+// $elemMatch on the shared prefix "a.b".
+TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {x: 1, y: 1}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
+ "{'a.b.x': [[1,1,true,true]], "
+ " 'a.b.y': [[1,1,true,true]]}}}}}");
+}
+
+// The bounds cannot be compounded. Although there is an $elemMatch that appears
+// to join the predicates, the path to which the $elemMatch is applied is "a".
+// Therefore, the predicates contained in the $elemMatch are over "b.x" and "b.y".
+// They cannot be compounded due to shared prefix "b".
+TEST_F(QueryPlannerTest, MultikeyDoubleDottedUnhelpfulElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {'b.x': 1, 'b.y': 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
+ "{'a.b.x': [[1,1,true,true]], "
+ " 'a.b.y': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// The bounds can be compounded because the predicates are joined by an
+// $elemMatch on the shared prefix "a.b".
+TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatchOnDotted) {
+ // true means multikey
+ addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
+ runQuery(fromjson("{'a.b': {$elemMatch: {x: 1, y: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
+ "{'a.b.x': [[1,1,true,true]], "
+ " 'a.b.y': [[1,1,true,true]]}}}}}");
+}
+
+// This one is subtle. Say we compound the bounds for predicates over "a.b.c" and
+// "a.b.d". This is okay because of the predicate over the shared prefix "a.b".
+// It might seem like we can do the same for the $elemMatch over shared prefix "a.e",
+// thus combining all bounds. But in fact, we can't combine any more bounds because
+// we have already used prefix "a". In other words, this query is like having predicates
+// over "a.b" and "a.e", so we can only use bounds from one of the two.
+TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1 << "a.e.f" << 1 << "a.b.d" << 1 << "a.e.g" << 1), true);
+ runQuery(fromjson(
+ "{'a.b': {$elemMatch: {c: 1, d: 1}}, "
+ "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c':1,'a.e.f':1,'a.b.d':1,'a.e.g':1},"
+ "bounds: {'a.b.c':[[1,1,true,true]], "
+ " 'a.e.f':[['MinKey','MaxKey',true,true]], "
+ " 'a.b.d':[[1,1,true,true]], "
+ " 'a.e.g':[['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Similar to MultikeyComplexDoubleDotted above.
+TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted2) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1 << "a.e.c" << 1 << "a.b.d" << 1 << "a.e.d" << 1), true);
+ runQuery(fromjson(
+ "{'a.b': {$elemMatch: {c: 1, d: 1}}, "
+ "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c':1,'a.e.c':1,'a.b.d':1,'a.e.d':1},"
+ "bounds: {'a.b.c':[[1,1,true,true]], "
+ " 'a.e.c':[['MinKey','MaxKey',true,true]], "
+ " 'a.b.d':[[1,1,true,true]], "
+ " 'a.e.d':[['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// SERVER-13422: check that we plan $elemMatch object correctly with
+// index intersection.
+TEST_F(QueryPlannerTest, ElemMatchIndexIntersection) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("shortId" << 1));
+ // true means multikey
+ addIndex(BSON("a.b.startDate" << 1), true);
+ addIndex(BSON("a.b.endDate" << 1), true);
+
+ runQuery(fromjson(
+ "{shortId: 3, 'a.b': {$elemMatch: {startDate: {$lte: 3},"
+ "endDate: {$gt: 6}}}}"));
+
+ assertNumSolutions(6U);
+
+ // 3 single index solutions.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {shortId: 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.startDate': 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.endDate': 1}}}}}");
+
+ // 3 index intersection solutions. The last one has to intersect two
+ // predicates within the $elemMatch object.
+ assertSolutionExists(
+ "{fetch: {node: {andHash: {nodes: ["
+ "{ixscan: {pattern: {shortId: 1}}},"
+ "{ixscan: {pattern: {'a.b.startDate': 1}}}]}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {andHash: {nodes: ["
+ "{ixscan: {pattern: {shortId: 1}}},"
+ "{ixscan: {pattern: {'a.b.endDate': 1}}}]}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {andHash: {nodes: ["
+ "{ixscan: {pattern: {'a.b.startDate': 1}}},"
+ "{ixscan: {pattern: {'a.b.endDate': 1}}}]}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, NegationBelowElemMatchValue) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+
+ runQuery(fromjson("{a: {$elemMatch: {$ne: 2}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{$ne:2}}}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}, bounds: "
+ "{a: [['MinKey',2,true,false], [2,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, AndWithNegationBelowElemMatchValue) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ addIndex(BSON("b" << 1), true);
+
+ runQuery(fromjson("{b: 10, a: {$elemMatch: {$not: {$gt: 4}}}}"));
+
+ // One solution using index on 'b' and one using index on 'a'.
+ assertNumSolutions(2U);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {b: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: {a: "
+ "[['MinKey',4,true,true],[Infinity,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, AndWithNegationBelowElemMatchValue2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+
+ runQuery(fromjson("{b: 10, a: {$elemMatch: {$not: {$gt: 4}, $gt: 2}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: "
+ "{a: [[2, 4, false, true]]}}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, NegationBelowElemMatchValueBelowElemMatchObject) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+
+ runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {$ne: 4}}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter: null, pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [['MinKey',4,true,false],[4,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, NegationBelowElemMatchValueBelowOrBelowAnd) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ addIndex(BSON("b" << 1));
+
+ runQuery(fromjson("{c: 3, $or: [{a: {$elemMatch: {$ne: 4, $ne: 3}}}, {b: 5}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {c:3}, node: {or: {nodes: ["
+ "{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: "
+ "{a: [['MinKey',3,true,false],"
+ "[3,4,false,false],"
+ "[4,'MaxKey',false,true]]}}}}}, "
+ "{ixscan: {filter: null, pattern: {b: 1}, bounds: "
+ "{b: [[5,5,true,true]]}}}]}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, CantIndexNegationBelowElemMatchValue) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+
+ runQuery(fromjson("{a: {$elemMatch: {$not: {$mod: [2, 0]}}}}"));
+
+ // There are no indexed solutions, because negations of $mod are not indexable.
+ assertNumSolutions(0);
+}
+
+/**
+ * Index bounds constraints on a field should not be intersected
+ * if the index is multikey.
+ */
+TEST_F(QueryPlannerTest, MultikeyTwoConstraintsSameField) {
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: {$gt: 0, $lt: 5}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {filter: {$and: [{a: {$lt: 5}}, {a: {$gt: 0}}]}, dir: 1}}");
+
+ std::vector<std::string> alternates;
+ alternates.push_back(
+ "{fetch: {filter: {a: {$lt: 5}}, node: {ixscan: {filter: null, "
+ "pattern: {a: 1}, bounds: {a: [[0, Infinity, false, true]]}}}}}");
+ alternates.push_back(
+ "{fetch: {filter: {a: {$gt: 0}}, node: {ixscan: {filter: null, "
+ "pattern: {a: 1}, bounds: {a: [[-Infinity, 5, true, false]]}}}}}");
+ assertHasOneSolutionOf(alternates);
+}
+
+/**
+ * Constraints on fields with a shared parent should not be intersected
+ * if the index is multikey.
+ */
+TEST_F(QueryPlannerTest, MultikeyTwoConstraintsDifferentFields) {
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.b': 2, 'a.c': 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {filter: {$and: [{'a.b': 2}, {'a.c': 3}]}, dir: 1}}");
+
+ std::vector<std::string> alternates;
+ alternates.push_back(
+ "{fetch: {filter: {'a.c': 3}, node: {ixscan: {filter: null, "
+ "pattern: {'a.b': 1, 'a.c': 1}, bounds: "
+ "{'a.b': [[2,2,true,true]], "
+ " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
+ alternates.push_back(
+ "{fetch: {filter: {'a.b': 2}, node: {ixscan: {filter: null, "
+ "pattern: {'a.b': 1, 'a.c': 1}, bounds: "
+ "{'a.b': [['MinKey','MaxKey',true,true]], "
+ " 'a.c': [[3,3,true,true]]}}}}}");
+ assertHasOneSolutionOf(alternates);
+}
} // namespace
diff --git a/src/mongo/db/query/query_planner_common.cpp b/src/mongo/db/query/query_planner_common.cpp
index e7d5e207959..2ae977a825e 100644
--- a/src/mongo/db/query/query_planner_common.cpp
+++ b/src/mongo/db/query/query_planner_common.cpp
@@ -36,52 +36,49 @@
namespace mongo {
- void QueryPlannerCommon::reverseScans(QuerySolutionNode* node) {
- StageType type = node->getType();
+void QueryPlannerCommon::reverseScans(QuerySolutionNode* node) {
+ StageType type = node->getType();
- if (STAGE_IXSCAN == type) {
- IndexScanNode* isn = static_cast<IndexScanNode*>(node);
- isn->direction *= -1;
+ if (STAGE_IXSCAN == type) {
+ IndexScanNode* isn = static_cast<IndexScanNode*>(node);
+ isn->direction *= -1;
- if (isn->bounds.isSimpleRange) {
- std::swap(isn->bounds.startKey, isn->bounds.endKey);
- // XXX: Not having a startKeyInclusive means that if we reverse a max/min query
- // we have different results with and without the reverse...
- isn->bounds.endKeyInclusive = true;
- }
- else {
- for (size_t i = 0; i < isn->bounds.fields.size(); ++i) {
- std::vector<Interval>& iv = isn->bounds.fields[i].intervals;
- // Step 1: reverse the list.
- std::reverse(iv.begin(), iv.end());
- // Step 2: reverse each interval.
- for (size_t j = 0; j < iv.size(); ++j) {
- iv[j].reverse();
- }
+ if (isn->bounds.isSimpleRange) {
+ std::swap(isn->bounds.startKey, isn->bounds.endKey);
+ // XXX: Not having a startKeyInclusive means that if we reverse a max/min query
+ // we have different results with and without the reverse...
+ isn->bounds.endKeyInclusive = true;
+ } else {
+ for (size_t i = 0; i < isn->bounds.fields.size(); ++i) {
+ std::vector<Interval>& iv = isn->bounds.fields[i].intervals;
+ // Step 1: reverse the list.
+ std::reverse(iv.begin(), iv.end());
+ // Step 2: reverse each interval.
+ for (size_t j = 0; j < iv.size(); ++j) {
+ iv[j].reverse();
}
}
-
- if (!isn->bounds.isValidFor(isn->indexKeyPattern, isn->direction)) {
- LOG(5) << "Invalid bounds: " << isn->bounds.toString() << std::endl;
- invariant(0);
- }
-
- // TODO: we can just negate every value in the already computed properties.
- isn->computeProperties();
- }
- else if (STAGE_SORT_MERGE == type) {
- // reverse direction of comparison for merge
- MergeSortNode* msn = static_cast<MergeSortNode*>(node);
- msn->sort = reverseSortObj(msn->sort);
- }
- else {
- invariant(STAGE_SORT != type);
- // This shouldn't be here...
}
- for (size_t i = 0; i < node->children.size(); ++i) {
- reverseScans(node->children[i]);
+ if (!isn->bounds.isValidFor(isn->indexKeyPattern, isn->direction)) {
+ LOG(5) << "Invalid bounds: " << isn->bounds.toString() << std::endl;
+ invariant(0);
}
+
+ // TODO: we can just negate every value in the already computed properties.
+ isn->computeProperties();
+ } else if (STAGE_SORT_MERGE == type) {
+ // reverse direction of comparison for merge
+ MergeSortNode* msn = static_cast<MergeSortNode*>(node);
+ msn->sort = reverseSortObj(msn->sort);
+ } else {
+ invariant(STAGE_SORT != type);
+ // This shouldn't be here...
+ }
+
+ for (size_t i = 0; i < node->children.size(); ++i) {
+ reverseScans(node->children[i]);
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_common.h b/src/mongo/db/query/query_planner_common.h
index 848e0f00ba1..4fdf8f30815 100644
--- a/src/mongo/db/query/query_planner_common.h
+++ b/src/mongo/db/query/query_planner_common.h
@@ -34,53 +34,53 @@
namespace mongo {
+/**
+ * Methods used by several parts of the planning process.
+ */
+class QueryPlannerCommon {
+public:
/**
- * Methods used by several parts of the planning process.
+ * Does the tree rooted at 'root' have a node with matchType 'type'?
+ *
+ * If 'out' is not NULL, sets 'out' to the first node of type 'type' encountered.
*/
- class QueryPlannerCommon {
- public:
- /**
- * Does the tree rooted at 'root' have a node with matchType 'type'?
- *
- * If 'out' is not NULL, sets 'out' to the first node of type 'type' encountered.
- */
- static bool hasNode(MatchExpression* root, MatchExpression::MatchType type,
- MatchExpression** out = NULL) {
- if (type == root->matchType()) {
- if (NULL != out) {
- *out = root;
- }
- return true;
+ static bool hasNode(MatchExpression* root,
+ MatchExpression::MatchType type,
+ MatchExpression** out = NULL) {
+ if (type == root->matchType()) {
+ if (NULL != out) {
+ *out = root;
}
-
- for (size_t i = 0; i < root->numChildren(); ++i) {
- if (hasNode(root->getChild(i), type, out)) {
- return true;
- }
- }
- return false;
+ return true;
}
- /**
- * Assumes the provided BSONObj is of the form {field1: -+1, ..., field2: -+1}
- * Returns a BSONObj with the values negated.
- */
- static BSONObj reverseSortObj(const BSONObj& sortObj) {
- BSONObjBuilder reverseBob;
- BSONObjIterator it(sortObj);
- while (it.more()) {
- BSONElement elt = it.next();
- reverseBob.append(elt.fieldName(), elt.numberInt() * -1);
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ if (hasNode(root->getChild(i), type, out)) {
+ return true;
}
- return reverseBob.obj();
}
+ return false;
+ }
- /**
- * Traverses the tree rooted at 'node'. For every STAGE_IXSCAN encountered, reverse
- * the scan direction and index bounds.
- */
- static void reverseScans(QuerySolutionNode* node);
+ /**
+ * Assumes the provided BSONObj is of the form {field1: -+1, ..., field2: -+1}
+ * Returns a BSONObj with the values negated.
+ */
+ static BSONObj reverseSortObj(const BSONObj& sortObj) {
+ BSONObjBuilder reverseBob;
+ BSONObjIterator it(sortObj);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ reverseBob.append(elt.fieldName(), elt.numberInt() * -1);
+ }
+ return reverseBob.obj();
+ }
- };
+ /**
+ * Traverses the tree rooted at 'node'. For every STAGE_IXSCAN encountered, reverse
+ * the scan direction and index bounds.
+ */
+ static void reverseScans(QuerySolutionNode* node);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp
index ca405bad41d..11fc175d2ac 100644
--- a/src/mongo/db/query/query_planner_geo_test.cpp
+++ b/src/mongo/db/query/query_planner_geo_test.cpp
@@ -35,703 +35,845 @@
namespace {
- using namespace mongo;
-
- TEST_F(QueryPlannerTest, Basic2DNonNear) {
- // 2d can answer: within poly, within center, within centersphere, within box.
- // And it can use an index (or not) for each of them. As such, 2 solns expected.
- addIndex(BSON("a" << "2d"));
-
- // Polygon
- runQuery(fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Center
- runQuery(fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Centersphere
- runQuery(fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Within box.
- runQuery(fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // TODO: test that we *don't* annotate for things we shouldn't.
- }
-
- TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("loc" << "2dsphere"));
-
- runQuery(fromjson("{loc:{$near:{$geometry:{type:'Point',"
- "coordinates : [-81.513743,28.369947] },"
- " $maxDistance :100}},a: 'mouse'}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {loc: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, Basic2DCompound) {
- addIndex(BSON("loc" << "2d" << "a" << 1));
-
- runQuery(fromjson("{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
- "a: 'mouse' }"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {loc : '2d', a: 1},"
- "filter: {a: 'mouse'},"
- "bounds: {loc: []," // Ignored since complex
- " a: [['MinKey','MaxKey',true,true]]}"
- "}}}}");
- }
-
- TEST_F(QueryPlannerTest, Multikey2DSphereCompound) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1), true);
- addIndex(BSON("loc" << "2dsphere"), true);
-
- runQuery(fromjson("{loc:{$near:{$geometry:{type:'Point',"
- "coordinates : [-81.513743,28.369947] },"
- " $maxDistance :100}},a: 'mouse'}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {loc: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, Basic2DSphereNonNear) {
- // 2dsphere can do: within+geometry, intersects+geometry
- addIndex(BSON("a" << "2dsphere"));
-
- runQuery(fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- runQuery(fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- // TODO: test that we *don't* annotate for things we shouldn't.
- }
-
- TEST_F(QueryPlannerTest, Multikey2DSphereNonNear) {
- // 2dsphere can do: within+geometry, intersects+geometry
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
-
- runQuery(fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- runQuery(fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- // TODO: test that we *don't* annotate for things we shouldn't.
- }
-
- TEST_F(QueryPlannerTest, Basic2DGeoNear) {
- // Can only do near + old point.
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2d: {a: '2d'}}");
- }
-
- TEST_F(QueryPlannerTest, Basic2DSphereGeoNear) {
- // Can do nearSphere + old point, near + new point.
- addIndex(BSON("a" << "2dsphere"));
-
- runQuery(fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
-
- runQuery(fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, Multikey2DSphereGeoNear) {
- // Can do nearSphere + old point, near + new point.
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
-
- runQuery(fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
-
- runQuery(fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, Basic2DSphereGeoNearReverseCompound) {
- addIndex(BSON("x" << 1));
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- runQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, Multikey2DSphereGeoNearReverseCompound) {
- addIndex(BSON("x" << 1), true);
- addIndex(BSON("x" << 1 << "a" << "2dsphere"), true);
- runQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, NearNoIndex) {
- addIndex(BSON("x" << 1));
- runInvalidQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereNoGeoPred) {
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- runQuery(fromjson("{x:1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereNoGeoPredMultikey) {
- addIndex(BSON("x" << 1 << "a" << "2dsphere"), true);
- runQuery(fromjson("{x:1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
- }
-
- // SERVER-14723
- TEST_F(QueryPlannerTest, GeoNearMultipleRelevantIndicesButOnlyOneCompatible) {
- addIndex(BSON("a" << "2dsphere"));
- addIndex(BSON("b" << 1 << "a" << "2dsphere"));
-
- runQuery(fromjson("{a: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0,0]}}},"
- " b: {$exists: false}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- // SERVER-3984, $or 2d index
- TEST_F(QueryPlannerTest, Or2DNonNear) {
- addIndex(BSON("a" << "2d"));
- addIndex(BSON("b" << "2d"));
- runQuery(fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
- }
-
- // SERVER-3984, $or 2d index
- TEST_F(QueryPlannerTest, Or2DSameFieldNonNear) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
- }
-
- // SERVER-3984, $or 2dsphere index
- TEST_F(QueryPlannerTest, Or2DSphereNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- addIndex(BSON("b" << "2dsphere"));
- runQuery(fromjson("{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
- " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
- }
-
- // SERVER-3984, $or 2dsphere index
- TEST_F(QueryPlannerTest, Or2DSphereNonNearMultikey) {
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
- addIndex(BSON("b" << "2dsphere"), true);
- runQuery(fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}},"
- " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: "
- "[{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
- }
-
- TEST_F(QueryPlannerTest, And2DSameFieldNonNear) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- // Bounds of the two 2d geo predicates are combined into
- // a single index scan.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, And2DWith2DNearSameField) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $near : [ 5, 5 ] } } ]}"));
-
- // GEO_NEAR must use the index, and GEO predicate becomes a filter.
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: { node : { geoNear2d: {a: '2d'} } } }");
- }
-
- TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- runQuery(fromjson("{$and: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- // Bounds of the two 2dsphere geo predicates are combined into
- // a single index scan.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNearMultikey) {
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
- runQuery(fromjson("{$and: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- // Bounds of the two 2dsphere geo predicates are combined into
- // a single index scan.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, And2DSphereWithNearSameField) {
- addIndex(BSON("a" << "2dsphere"));
- runQuery(fromjson("{$and: [{a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- "{a: {$near: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
-
- // GEO_NEAR must use the index, and GEO predicate becomes a filter.
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, And2DSphereWithNearSameFieldMultikey) {
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
- runQuery(fromjson("{$and: [{a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- "{a: {$near: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
-
- // GEO_NEAR must use the index, and GEO predicate becomes a filter.
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- runQuery(fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNearMultikey) {
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
- runQuery(fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNear) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << "2dsphere"), true);
- runQuery(fromjson("{a: {$gte: 0}, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {a: 1, b: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearFetchRequired) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << "2dsphere"), true);
- runQuery(fromjson("{a: {$gte: 0, $lt: 5}, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:{$gte:0}}, node: "
- "{geoNear2dsphere: {a: 1, b: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleIndices) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << "2dsphere"), true);
- addIndex(BSON("c" << 1 << "b" << "2dsphere"), true);
- runQuery(fromjson("{a: {$gte: 0}, c: 3, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {c:3}, node: "
- "{geoNear2dsphere: {a: 1, b: '2dsphere'}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$gte:0}}, node: "
- "{geoNear2dsphere: {c: 1, b: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleLeadingFields) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << "2dsphere"), true);
- runQuery(fromjson("{a: {$lt: 5, $gt: 1}, b: 6, c: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:{$gt:1}}, node: "
- "{geoNear2dsphere: {a: 1, b: 1, c: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleGeoPreds) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << "2dsphere"), true);
- runQuery(fromjson("{a: 1, b: 6, $and: ["
- "{c: {$near: {$geometry: {type: 'Point', coordinates: [2, 2]}}}},"
- "{c: {$geoWithin: {$box: [ [1, 1], [3, 3] ] } } } ] }"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a:1, b:1, c:'2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearCompoundTest) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << "2dsphere" << "c" << 1 << "d" << 1), true);
- runQuery(fromjson("{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
- "b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {d:{$gt:1},c:{$gte:0}}, node: "
- "{geoNear2dsphere: {a: 1, b: '2dsphere', c: 1, d: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DNear) {
- // true means multikey
- addIndex(BSON("a" << "2d" << "b" << 1), true);
- runQuery(fromjson("{a: {$near: [0, 0]}, b: {$gte: 0}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: { filter : {b:{$gte: 0}}, node: "
- "{geoNear2d: {a: '2d', b: 1} } } }");
- }
-
- // SERVER-9257
- TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicate) {
- addIndex(BSON("creationDate" << 1 << "foo.bar" << "2dsphere"));
- runQuerySortProj(fromjson("{creationDate: { $gt: 7}}"),
- fromjson("{creationDate: 1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {creationDate: 1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
- }
-
- // SERVER-9257
- TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicateMultikey) {
- // true means multikey
- addIndex(BSON("creationDate" << 1 << "foo.bar" << "2dsphere"), true);
- runQuerySortProj(fromjson("{creationDate: { $gt: 7}}"),
- fromjson("{creationDate: 1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {creationDate: 1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
- }
-
- // Test that a 2dsphere index can satisfy a whole index scan solution if the query has a GEO
- // predicate on at least one of the indexed geo fields.
- // Currently fails. Tracked by SERVER-10801.
- /*
- TEST_F(QueryPlannerTest, SortOnGeoQuery) {
- addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
- runQuerySortProj(query, sort, BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortOnGeoQueryMultikey) {
- // true means multikey
- addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"), true);
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
- "coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
- runQuerySortProj(query, sort, BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: "
- "{timestamp: -1, position: '2dsphere'}}}}}");
- }
- */
-
-
- //
- // Sort
- //
-
- TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSort) {
- addIndex(BSON("x" << "2dsphere"));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSortWithIndexablePred) {
- addIndex(BSON("x" << "2dsphere"));
- runQuerySortProj(fromjson("{x: {$geoIntersects: {$geometry: {type: 'Point',"
- " coordinates: [0, 0]}}}}"),
- BSON("x" << 1),
- BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {x: '2dsphere'}}}}}}}");
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{cscan: {dir: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseCompoundGeoIndexToProvideSortIfNoGeoPred) {
- addIndex(BSON("x" << 1 << "y" << "2dsphere"));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CanUseCompoundGeoIndexToProvideSortWithGeoPred) {
- addIndex(BSON("x" << 1 << "y" << "2dsphere"));
- runQuerySortProj(fromjson("{x: 1, y: {$geoIntersects: {$geometry: {type: 'Point',"
- " coordinates: [0, 0]}}}}"),
- BSON("x" << 1),
- BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{fetch: {node: "
- "{ixscan: {pattern: {x: 1, y: '2dsphere'}}}}}");
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{cscan: {dir: 1}}}}");
- }
-
- //
- // Negation
- //
-
- //
- // 2D geo negation
- // The filter b != 1 is embedded in the geoNear2d node.
+using namespace mongo;
+
+TEST_F(QueryPlannerTest, Basic2DNonNear) {
+ // 2d can answer: within poly, within center, within centersphere, within box.
+ // And it can use an index (or not) for each of them. As such, 2 solns expected.
+ addIndex(BSON("a"
+ << "2d"));
+
+ // Polygon
+ runQuery(fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Center
+ runQuery(fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Centersphere
+ runQuery(fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Within box.
+ runQuery(fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // TODO: test that we *don't* annotate for things we shouldn't.
+}
+
+TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("loc"
+ << "2dsphere"));
+
+ runQuery(fromjson(
+ "{loc:{$near:{$geometry:{type:'Point',"
+ "coordinates : [-81.513743,28.369947] },"
+ " $maxDistance :100}},a: 'mouse'}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {loc: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, Basic2DCompound) {
+ addIndex(BSON("loc"
+ << "2d"
+ << "a" << 1));
+
+ runQuery(fromjson(
+ "{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
+ "a: 'mouse' }"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {loc : '2d', a: 1},"
+ "filter: {a: 'mouse'},"
+ "bounds: {loc: []," // Ignored since complex
+ " a: [['MinKey','MaxKey',true,true]]}"
+ "}}}}");
+}
+
+TEST_F(QueryPlannerTest, Multikey2DSphereCompound) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1), true);
+ addIndex(BSON("loc"
+ << "2dsphere"),
+ true);
+
+ runQuery(fromjson(
+ "{loc:{$near:{$geometry:{type:'Point',"
+ "coordinates : [-81.513743,28.369947] },"
+ " $maxDistance :100}},a: 'mouse'}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {loc: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, Basic2DSphereNonNear) {
+ // 2dsphere can do: within+geometry, intersects+geometry
+ addIndex(BSON("a"
+ << "2dsphere"));
+
+ runQuery(fromjson(
+ "{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ runQuery(fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ // TODO: test that we *don't* annotate for things we shouldn't.
+}
+
+TEST_F(QueryPlannerTest, Multikey2DSphereNonNear) {
+ // 2dsphere can do: within+geometry, intersects+geometry
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+
+ runQuery(fromjson(
+ "{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ runQuery(fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ // TODO: test that we *don't* annotate for things we shouldn't.
+}
+
+TEST_F(QueryPlannerTest, Basic2DGeoNear) {
// Can only do near + old point.
- //
- TEST_F(QueryPlannerTest, Negation2DGeoNear) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{$and: [{a: {$near: [0, 0], $maxDistance: 0.3}}, {b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: { geoNear2d: {a: '2d'} } } }");
- }
-
- //
- // 2DSphere geo negation
- // Filter is embedded in a separate fetch node.
- //
- TEST_F(QueryPlannerTest, Negation2DSphereGeoNear) {
- // Can do nearSphere + old point, near + new point.
- addIndex(BSON("a" << "2dsphere"));
-
- runQuery(fromjson("{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
- "{b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
-
- runQuery(fromjson("{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
- "coordinates: [0, 0]},"
- "$maxDistance: 100}}},"
- "{b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- //
- // 2DSphere geo negation
- // Filter is embedded in a separate fetch node.
- //
- TEST_F(QueryPlannerTest, Negation2DSphereGeoNearMultikey) {
- // Can do nearSphere + old point, near + new point.
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
-
- runQuery(fromjson("{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
- "{b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
-
- runQuery(fromjson("{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
- "coordinates: [0, 0]},"
- "$maxDistance: 100}}},"
- "{b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- //
- // 2dsphere V2 sparse indices, SERVER-9639
- //
-
- // Basic usage of a sparse 2dsphere index. V1 ignores the sparse field. We can use any prefix
- // of the index as every document is indexed.
- TEST_F(QueryPlannerTest, TwoDSphereSparseV1) {
- // Create a V1 index.
- addIndex(BSON("nonGeo" << 1 << "geo" << "2dsphere"),
- BSON("2dsphereIndexVersion" << 1));
-
- // Can use the index for this.
- runQuery(fromjson("{nonGeo: 7}"));
- assertNumSolutions(2);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {nonGeo: 1, geo: '2dsphere'}}}}}");
- }
-
- // V2 is "geo sparse" and removes the nonGeo assignment.
- TEST_F(QueryPlannerTest, TwoDSphereSparseV2CantUse) {
- // Create a V2 index.
- addIndex(BSON("nonGeo" << 1 << "geo" << "2dsphere"),
- BSON("2dsphereIndexVersion" << 2));
-
- // Can't use the index prefix here as it's a V2 index and we have no geo pred.
- runQuery(fromjson("{nonGeo: 7}"));
- assertNumSolutions(1);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereSparseOnePred) {
- // Create a V2 index.
- addIndex(BSON("geo" << "2dsphere"),
- BSON("2dsphereIndexVersion" << 2));
-
- // We can use the index here as we have a geo pred.
- runQuery(fromjson("{geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}}"));
- assertNumSolutions(2);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // V2 is geo-sparse and the planner removes the nonGeo assignment when there's no geo pred
- TEST_F(QueryPlannerTest, TwoDSphereSparseV2TwoPreds) {
- addIndex(BSON("nonGeo" << 1 << "geo" << "2dsphere" << "geo2" << "2dsphere"),
- BSON("2dsphereIndexVersion" << 2));
-
- // Non-geo preds can only use a collscan.
- runQuery(fromjson("{nonGeo: 7}"));
- assertNumSolutions(1);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // One geo pred so we can use the index.
- runQuery(fromjson("{nonGeo: 7, geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}}"));
- ASSERT_EQUALS(getNumSolutions(), 2U);
-
- // Two geo preds, so we can use the index still.
- runQuery(fromjson("{nonGeo: 7, geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] }},"
- " geo2 : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] }}}"));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- }
-
- TEST_F(QueryPlannerTest, TwoDNearCompound) {
- addIndex(BSON("geo" << "2dsphere" << "nongeo" << 1),
- BSON("2dsphereIndexVersion" << 2));
- runQuery(fromjson("{geo: {$nearSphere: [-71.34895, 42.46037]}}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereSparseV2BelowOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("geo1" << "2dsphere" << "a" << 1 << "b" << 1),
- BSON("2dsphereIndexVersion" << 2));
- addIndex(BSON("geo2" << "2dsphere" << "a" << 1 << "b" << 1),
- BSON("2dsphereIndexVersion" << 2));
-
- runQuery(fromjson("{a: 4, b: 5, $or: ["
- "{geo1: {$geoWithin: {$centerSphere: [[10, 20], 0.01]}}},"
- "{geo2: {$geoWithin: {$centerSphere: [[10, 20], 0.01]}}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a: 4, b: 5}, node: {or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {geo1:'2dsphere',a:1,b:1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {geo2:'2dsphere',a:1,b:1}}}}}"
- "]}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereSparseV2BelowElemMatch) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a.b" << "2dsphere" << "a.c" << 1),
- BSON("2dsphereIndexVersion" << 2));
-
- runQuery(fromjson("{a: {$elemMatch: {b: {$geoWithin: {$centerSphere: [[10,20], 0.01]}},"
- "c: {$gt: 3}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': '2dsphere', 'a.c': 1}}}}}");
- }
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2d: {a: '2d'}}");
+}
+
+TEST_F(QueryPlannerTest, Basic2DSphereGeoNear) {
+ // Can do nearSphere + old point, near + new point.
+ addIndex(BSON("a"
+ << "2dsphere"));
+
+ runQuery(fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
+
+ runQuery(fromjson(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, Multikey2DSphereGeoNear) {
+ // Can do nearSphere + old point, near + new point.
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+
+ runQuery(fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
+
+ runQuery(fromjson(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, Basic2DSphereGeoNearReverseCompound) {
+ addIndex(BSON("x" << 1));
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ runQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, Multikey2DSphereGeoNearReverseCompound) {
+ addIndex(BSON("x" << 1), true);
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, NearNoIndex) {
+ addIndex(BSON("x" << 1));
+ runInvalidQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereNoGeoPred) {
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ runQuery(fromjson("{x:1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereNoGeoPredMultikey) {
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson("{x:1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+}
+
+// SERVER-14723
+TEST_F(QueryPlannerTest, GeoNearMultipleRelevantIndicesButOnlyOneCompatible) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ addIndex(BSON("b" << 1 << "a"
+ << "2dsphere"));
+
+ runQuery(fromjson(
+ "{a: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0,0]}}},"
+ " b: {$exists: false}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+// SERVER-3984, $or 2d index
+TEST_F(QueryPlannerTest, Or2DNonNear) {
+ addIndex(BSON("a"
+ << "2d"));
+ addIndex(BSON("b"
+ << "2d"));
+ runQuery(fromjson(
+ "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
+}
+
+// SERVER-3984, $or 2d index
+TEST_F(QueryPlannerTest, Or2DSameFieldNonNear) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson(
+ "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+}
+
+// SERVER-3984, $or 2dsphere index
+TEST_F(QueryPlannerTest, Or2DSphereNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ addIndex(BSON("b"
+ << "2dsphere"));
+ runQuery(fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
+ " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+}
+
+// SERVER-3984, $or 2dsphere index
+TEST_F(QueryPlannerTest, Or2DSphereNonNearMultikey) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+ addIndex(BSON("b"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}},"
+ " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: "
+ "[{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+}
+
+TEST_F(QueryPlannerTest, And2DSameFieldNonNear) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson(
+ "{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ // Bounds of the two 2d geo predicates are combined into
+ // a single index scan.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, And2DWith2DNearSameField) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson(
+ "{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $near : [ 5, 5 ] } } ]}"));
+
+ // GEO_NEAR must use the index, and GEO predicate becomes a filter.
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: { node : { geoNear2d: {a: '2d'} } } }");
+}
+
+TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ runQuery(fromjson(
+ "{$and: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ // Bounds of the two 2dsphere geo predicates are combined into
+ // a single index scan.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNearMultikey) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{$and: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ // Bounds of the two 2dsphere geo predicates are combined into
+ // a single index scan.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, And2DSphereWithNearSameField) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ runQuery(fromjson(
+ "{$and: [{a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ "{a: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
+
+ // GEO_NEAR must use the index, and GEO predicate becomes a filter.
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, And2DSphereWithNearSameFieldMultikey) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{$and: [{a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ "{a: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
+
+ // GEO_NEAR must use the index, and GEO predicate becomes a filter.
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ runQuery(fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNearMultikey) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNear) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: {$gte: 0}, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {a: 1, b: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearFetchRequired) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: {$gte: 0, $lt: 5}, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gte:0}}, node: "
+ "{geoNear2dsphere: {a: 1, b: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleIndices) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b"
+ << "2dsphere"),
+ true);
+ addIndex(BSON("c" << 1 << "b"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: {$gte: 0}, c: 3, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {c:3}, node: "
+ "{geoNear2dsphere: {a: 1, b: '2dsphere'}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gte:0}}, node: "
+ "{geoNear2dsphere: {c: 1, b: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleLeadingFields) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1 << "c"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: {$lt: 5, $gt: 1}, b: 6, c: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:1}}, node: "
+ "{geoNear2dsphere: {a: 1, b: 1, c: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleGeoPreds) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1 << "c"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: 1, b: 6, $and: ["
+ "{c: {$near: {$geometry: {type: 'Point', coordinates: [2, 2]}}}},"
+ "{c: {$geoWithin: {$box: [ [1, 1], [3, 3] ] } } } ] }"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a:1, b:1, c:'2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearCompoundTest) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b"
+ << "2dsphere"
+ << "c" << 1 << "d" << 1),
+ true);
+ runQuery(fromjson(
+ "{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
+ "b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {d:{$gt:1},c:{$gte:0}}, node: "
+ "{geoNear2dsphere: {a: 1, b: '2dsphere', c: 1, d: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DNear) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2d"
+ << "b" << 1),
+ true);
+ runQuery(fromjson("{a: {$near: [0, 0]}, b: {$gte: 0}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: { filter : {b:{$gte: 0}}, node: "
+ "{geoNear2d: {a: '2d', b: 1} } } }");
+}
+
+// SERVER-9257
+TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicate) {
+ addIndex(BSON("creationDate" << 1 << "foo.bar"
+ << "2dsphere"));
+ runQuerySortProj(
+ fromjson("{creationDate: { $gt: 7}}"), fromjson("{creationDate: 1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {creationDate: 1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
+}
+
+// SERVER-9257
+TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicateMultikey) {
+ // true means multikey
+ addIndex(BSON("creationDate" << 1 << "foo.bar"
+ << "2dsphere"),
+ true);
+ runQuerySortProj(
+ fromjson("{creationDate: { $gt: 7}}"), fromjson("{creationDate: 1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {creationDate: 1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
+}
+
+// Test that a 2dsphere index can satisfy a whole index scan solution if the query has a GEO
+// predicate on at least one of the indexed geo fields.
+// Currently fails. Tracked by SERVER-10801.
+/*
+TEST_F(QueryPlannerTest, SortOnGeoQuery) {
+ addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
+ BSONObj sort = fromjson("{timestamp: -1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortOnGeoQueryMultikey) {
+ // true means multikey
+ addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"), true);
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
+ "coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
+ BSONObj sort = fromjson("{timestamp: -1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: "
+ "{timestamp: -1, position: '2dsphere'}}}}}");
+}
+*/
+
+
+//
+// Sort
+//
+
+TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSort) {
+ addIndex(BSON("x"
+ << "2dsphere"));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSortWithIndexablePred) {
+ addIndex(BSON("x"
+ << "2dsphere"));
+ runQuerySortProj(fromjson(
+ "{x: {$geoIntersects: {$geometry: {type: 'Point',"
+ " coordinates: [0, 0]}}}}"),
+ BSON("x" << 1),
+ BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {x: '2dsphere'}}}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{cscan: {dir: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseCompoundGeoIndexToProvideSortIfNoGeoPred) {
+ addIndex(BSON("x" << 1 << "y"
+ << "2dsphere"));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CanUseCompoundGeoIndexToProvideSortWithGeoPred) {
+ addIndex(BSON("x" << 1 << "y"
+ << "2dsphere"));
+ runQuerySortProj(fromjson(
+ "{x: 1, y: {$geoIntersects: {$geometry: {type: 'Point',"
+ " coordinates: [0, 0]}}}}"),
+ BSON("x" << 1),
+ BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{fetch: {node: "
+ "{ixscan: {pattern: {x: 1, y: '2dsphere'}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{cscan: {dir: 1}}}}");
+}
+
+//
+// Negation
+//
+
+//
+// 2D geo negation
+// The filter b != 1 is embedded in the geoNear2d node.
+// Can only do near + old point.
+//
+TEST_F(QueryPlannerTest, Negation2DGeoNear) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson("{$and: [{a: {$near: [0, 0], $maxDistance: 0.3}}, {b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: { geoNear2d: {a: '2d'} } } }");
+}
+
+//
+// 2DSphere geo negation
+// Filter is embedded in a separate fetch node.
+//
+TEST_F(QueryPlannerTest, Negation2DSphereGeoNear) {
+ // Can do nearSphere + old point, near + new point.
+ addIndex(BSON("a"
+ << "2dsphere"));
+
+ runQuery(fromjson(
+ "{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
+ "{b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+
+ runQuery(fromjson(
+ "{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
+ "coordinates: [0, 0]},"
+ "$maxDistance: 100}}},"
+ "{b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+//
+// 2DSphere geo negation
+// Filter is embedded in a separate fetch node.
+//
+TEST_F(QueryPlannerTest, Negation2DSphereGeoNearMultikey) {
+ // Can do nearSphere + old point, near + new point.
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+
+ runQuery(fromjson(
+ "{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
+ "{b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+
+ runQuery(fromjson(
+ "{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
+ "coordinates: [0, 0]},"
+ "$maxDistance: 100}}},"
+ "{b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+//
+// 2dsphere V2 sparse indices, SERVER-9639
+//
+
+// Basic usage of a sparse 2dsphere index. V1 ignores the sparse field. We can use any prefix
+// of the index as every document is indexed.
+TEST_F(QueryPlannerTest, TwoDSphereSparseV1) {
+ // Create a V1 index.
+ addIndex(BSON("nonGeo" << 1 << "geo"
+ << "2dsphere"),
+ BSON("2dsphereIndexVersion" << 1));
+
+ // Can use the index for this.
+ runQuery(fromjson("{nonGeo: 7}"));
+ assertNumSolutions(2);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {nonGeo: 1, geo: '2dsphere'}}}}}");
+}
+
+// V2 is "geo sparse" and removes the nonGeo assignment.
+TEST_F(QueryPlannerTest, TwoDSphereSparseV2CantUse) {
+ // Create a V2 index.
+ addIndex(BSON("nonGeo" << 1 << "geo"
+ << "2dsphere"),
+ BSON("2dsphereIndexVersion" << 2));
+
+ // Can't use the index prefix here as it's a V2 index and we have no geo pred.
+ runQuery(fromjson("{nonGeo: 7}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereSparseOnePred) {
+ // Create a V2 index.
+ addIndex(BSON("geo"
+ << "2dsphere"),
+ BSON("2dsphereIndexVersion" << 2));
+
+ // We can use the index here as we have a geo pred.
+ runQuery(fromjson("{geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}}"));
+ assertNumSolutions(2);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// V2 is geo-sparse and the planner removes the nonGeo assignment when there's no geo pred
+TEST_F(QueryPlannerTest, TwoDSphereSparseV2TwoPreds) {
+ addIndex(BSON("nonGeo" << 1 << "geo"
+ << "2dsphere"
+ << "geo2"
+ << "2dsphere"),
+ BSON("2dsphereIndexVersion" << 2));
+
+ // Non-geo preds can only use a collscan.
+ runQuery(fromjson("{nonGeo: 7}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // One geo pred so we can use the index.
+ runQuery(
+ fromjson("{nonGeo: 7, geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}}"));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+
+ // Two geo preds, so we can use the index still.
+ runQuery(fromjson(
+ "{nonGeo: 7, geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] }},"
+ " geo2 : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] }}}"));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+}
+
+TEST_F(QueryPlannerTest, TwoDNearCompound) {
+ addIndex(BSON("geo"
+ << "2dsphere"
+ << "nongeo" << 1),
+ BSON("2dsphereIndexVersion" << 2));
+ runQuery(fromjson("{geo: {$nearSphere: [-71.34895, 42.46037]}}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereSparseV2BelowOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("geo1"
+ << "2dsphere"
+ << "a" << 1 << "b" << 1),
+ BSON("2dsphereIndexVersion" << 2));
+ addIndex(BSON("geo2"
+ << "2dsphere"
+ << "a" << 1 << "b" << 1),
+ BSON("2dsphereIndexVersion" << 2));
+
+ runQuery(fromjson(
+ "{a: 4, b: 5, $or: ["
+ "{geo1: {$geoWithin: {$centerSphere: [[10, 20], 0.01]}}},"
+ "{geo2: {$geoWithin: {$centerSphere: [[10, 20], 0.01]}}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a: 4, b: 5}, node: {or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {geo1:'2dsphere',a:1,b:1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {geo2:'2dsphere',a:1,b:1}}}}}"
+ "]}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereSparseV2BelowElemMatch) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a.b"
+ << "2dsphere"
+ << "a.c" << 1),
+ BSON("2dsphereIndexVersion" << 2));
+
+ runQuery(fromjson(
+ "{a: {$elemMatch: {b: {$geoWithin: {$centerSphere: [[10,20], 0.01]}},"
+ "c: {$gt: 3}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': '2dsphere', 'a.c': 1}}}}}");
+}
} // namespace
diff --git a/src/mongo/db/query/query_planner_params.h b/src/mongo/db/query/query_planner_params.h
index aef16655184..9e0b1a32fb3 100644
--- a/src/mongo/db/query/query_planner_params.h
+++ b/src/mongo/db/query/query_planner_params.h
@@ -36,76 +36,76 @@
namespace mongo {
- struct QueryPlannerParams {
-
- QueryPlannerParams() : options(DEFAULT),
- indexFiltersApplied(false),
- maxIndexedSolutions(internalQueryPlannerMaxIndexedSolutions) { }
-
- enum Options {
- // You probably want to set this.
- DEFAULT = 0,
-
- // Set this if you don't want a table scan.
- // See http://docs.mongodb.org/manual/reference/parameters/
- NO_TABLE_SCAN = 1,
-
- // Set this if you *always* want a collscan outputted, even if there's an ixscan. This
- // makes ranking less accurate, especially in the presence of blocking stages.
- INCLUDE_COLLSCAN = 1 << 1,
-
- // Set this if you're running on a sharded cluster. We'll add a "drop all docs that
- // shouldn't be on this shard" stage before projection.
- //
- // In order to set this, you must check
- // shardingState.needCollectionMetadata(current_namespace) in the same lock that you use
- // to build the query executor. You must also wrap the PlanExecutor in a ClientCursor
- // within the same lock. See the comment on ShardFilterStage for details.
- INCLUDE_SHARD_FILTER = 1 << 2,
-
- // Set this if you don't want any plans with a blocking sort stage. All sorts must be
- // provided by an index.
- NO_BLOCKING_SORT = 1 << 3,
-
- // Set this if you want to turn on index intersection.
- INDEX_INTERSECTION = 1 << 4,
-
- // Set this if you want to try to keep documents deleted or mutated during the execution
- // of the query in the query results.
- KEEP_MUTATIONS = 1 << 5,
-
- // Nobody should set this above the getExecutor interface. Internal flag set as a hint
- // to the planner that the caller is actually the count command.
- PRIVATE_IS_COUNT = 1 << 6,
-
- // Set this if you want to handle batchSize properly with sort(). If limits on SORT
- // stages are always actually limits, then this should be left off. If they are
- // sometimes to be interpreted as batchSize, then this should be turned on.
- SPLIT_LIMITED_SORT = 1 << 7,
-
- // Set this to prevent the planner from generating plans which answer a predicate
- // implicitly via exact index bounds for index intersection solutions.
- CANNOT_TRIM_IXISECT = 1 << 8,
- };
-
- // See Options enum above.
- size_t options;
-
- // What indices are available for planning?
- std::vector<IndexEntry> indices;
-
- // What's our shard key? If INCLUDE_SHARD_FILTER is set we will create a shard filtering
- // stage. If we know the shard key, we can perform covering analysis instead of always
- // forcing a fetch.
- BSONObj shardKey;
-
- // Were index filters applied to indices?
- bool indexFiltersApplied;
-
- // What's the max number of indexed solutions we want to output? It's expensive to compare
- // plans via the MultiPlanStage, and the set of possible plans is very large for certain
- // index+query combinations.
- size_t maxIndexedSolutions;
+struct QueryPlannerParams {
+ QueryPlannerParams()
+ : options(DEFAULT),
+ indexFiltersApplied(false),
+ maxIndexedSolutions(internalQueryPlannerMaxIndexedSolutions) {}
+
+ enum Options {
+ // You probably want to set this.
+ DEFAULT = 0,
+
+ // Set this if you don't want a table scan.
+ // See http://docs.mongodb.org/manual/reference/parameters/
+ NO_TABLE_SCAN = 1,
+
+ // Set this if you *always* want a collscan outputted, even if there's an ixscan. This
+ // makes ranking less accurate, especially in the presence of blocking stages.
+ INCLUDE_COLLSCAN = 1 << 1,
+
+ // Set this if you're running on a sharded cluster. We'll add a "drop all docs that
+ // shouldn't be on this shard" stage before projection.
+ //
+ // In order to set this, you must check
+ // shardingState.needCollectionMetadata(current_namespace) in the same lock that you use
+ // to build the query executor. You must also wrap the PlanExecutor in a ClientCursor
+ // within the same lock. See the comment on ShardFilterStage for details.
+ INCLUDE_SHARD_FILTER = 1 << 2,
+
+ // Set this if you don't want any plans with a blocking sort stage. All sorts must be
+ // provided by an index.
+ NO_BLOCKING_SORT = 1 << 3,
+
+ // Set this if you want to turn on index intersection.
+ INDEX_INTERSECTION = 1 << 4,
+
+ // Set this if you want to try to keep documents deleted or mutated during the execution
+ // of the query in the query results.
+ KEEP_MUTATIONS = 1 << 5,
+
+ // Nobody should set this above the getExecutor interface. Internal flag set as a hint
+ // to the planner that the caller is actually the count command.
+ PRIVATE_IS_COUNT = 1 << 6,
+
+ // Set this if you want to handle batchSize properly with sort(). If limits on SORT
+ // stages are always actually limits, then this should be left off. If they are
+ // sometimes to be interpreted as batchSize, then this should be turned on.
+ SPLIT_LIMITED_SORT = 1 << 7,
+
+ // Set this to prevent the planner from generating plans which answer a predicate
+ // implicitly via exact index bounds for index intersection solutions.
+ CANNOT_TRIM_IXISECT = 1 << 8,
};
+ // See Options enum above.
+ size_t options;
+
+ // What indices are available for planning?
+ std::vector<IndexEntry> indices;
+
+ // What's our shard key? If INCLUDE_SHARD_FILTER is set we will create a shard filtering
+ // stage. If we know the shard key, we can perform covering analysis instead of always
+ // forcing a fetch.
+ BSONObj shardKey;
+
+ // Were index filters applied to indices?
+ bool indexFiltersApplied;
+
+ // What's the max number of indexed solutions we want to output? It's expensive to compare
+ // plans via the MultiPlanStage, and the set of possible plans is very large for certain
+ // index+query combinations.
+ size_t maxIndexedSolutions;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index e1abb290262..2c0ca9167a5 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -39,3443 +39,3778 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- //
- // Equality
- //
+//
+// Equality
+//
- TEST_F(QueryPlannerTest, EqualityIndexScan) {
- addIndex(BSON("x" << 1));
+TEST_F(QueryPlannerTest, EqualityIndexScan) {
+ addIndex(BSON("x" << 1));
- runQuery(BSON("x" << 5));
+ runQuery(BSON("x" << 5));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, EqualityIndexScanWithTrailingFields) {
- addIndex(BSON("x" << 1 << "y" << 1));
-
- runQuery(BSON("x" << 5));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
- }
-
- //
- // indexFilterApplied
- // Check that index filter flag is passed from planner params
- // to generated query solution.
- //
-
- TEST_F(QueryPlannerTest, IndexFilterAppliedDefault) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << 5));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
-
- // Check indexFilterApplied in query solutions;
- for (std::vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolution* soln = *it;
- ASSERT_FALSE(soln->indexFilterApplied);
- }
- }
-
- TEST_F(QueryPlannerTest, IndexFilterAppliedTrue) {
- params.indexFiltersApplied = true;
-
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << 5));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
-
- // Check indexFilterApplied in query solutions;
- for (std::vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolution* soln = *it;
- ASSERT_EQUALS(params.indexFiltersApplied, soln->indexFilterApplied);
- }
- }
-
- //
- // <
- //
-
- TEST_F(QueryPlannerTest, LessThan) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << BSON("$lt" << 5)));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: {$lt: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- //
- // <=
- //
-
- TEST_F(QueryPlannerTest, LessThanEqual) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << BSON("$lte" << 5)));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: {$lte: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- //
- // >
- //
-
- TEST_F(QueryPlannerTest, GreaterThan) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << BSON("$gt" << 5)));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: {$gt: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- //
- // >=
- //
-
- TEST_F(QueryPlannerTest, GreaterThanEqual) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << BSON("$gte" << 5)));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: {$gte: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- //
- // Mod
- //
-
- TEST_F(QueryPlannerTest, Mod) {
- addIndex(BSON("a" << 1));
-
- runQuery(fromjson("{a: {$mod: [2, 0]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: {$mod: [2, 0]}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: {a: {$mod: [2, 0]}}, pattern: {a: 1}}}}}");
- }
-
- //
- // Exists
- //
-
- TEST_F(QueryPlannerTest, ExistsTrue) {
- addIndex(BSON("x" << 1));
-
- runQuery(fromjson("{x: {$exists: true}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsFalse) {
- addIndex(BSON("x" << 1));
-
- runQuery(fromjson("{x: {$exists: false}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsTrueSparseIndex) {
- addIndex(BSON("x" << 1), false, true);
-
- runQuery(fromjson("{x: {$exists: true}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsFalseSparseIndex) {
- addIndex(BSON("x" << 1), false, true);
-
- runQuery(fromjson("{x: {$exists: false}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsTrueOnUnindexedField) {
- addIndex(BSON("x" << 1));
-
- runQuery(fromjson("{x: 1, y: {$exists: true}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsFalseOnUnindexedField) {
- addIndex(BSON("x" << 1));
-
- runQuery(fromjson("{x: 1, y: {$exists: false}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsTrueSparseIndexOnOtherField) {
- addIndex(BSON("x" << 1), false, true);
-
- runQuery(fromjson("{x: 1, y: {$exists: true}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsFalseSparseIndexOnOtherField) {
- addIndex(BSON("x" << 1), false, true);
-
- runQuery(fromjson("{x: 1, y: {$exists: false}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsBounds) {
- addIndex(BSON("b" << 1));
-
- runQuery(fromjson("{b: {$exists: true}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: true}}, node: "
- "{ixscan: {pattern: {b: 1}, bounds: "
- "{b: [['MinKey', 'MaxKey', true, true]]}}}}}");
-
- // This ends up being a double negation, which we currently don't index.
- runQuery(fromjson("{b: {$not: {$exists: false}}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- runQuery(fromjson("{b: {$exists: false}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{ixscan: {pattern: {b: 1}, bounds: "
- "{b: [[null, null, true, true]]}}}}}");
-
- runQuery(fromjson("{b: {$not: {$exists: true}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{ixscan: {pattern: {b: 1}, bounds: "
- "{b: [[null, null, true, true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsBoundsCompound) {
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuery(fromjson("{a: 1, b: {$exists: true}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: true}}, node: "
- "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
-
- // This ends up being a double negation, which we currently don't index.
- runQuery(fromjson("{a: 1, b: {$not: {$exists: false}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
-
- runQuery(fromjson("{a: 1, b: {$exists: false}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
-
- runQuery(fromjson("{a: 1, b: {$not: {$exists: true}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
- }
-
- //
- // skip and limit
- //
-
- TEST_F(QueryPlannerTest, BasicSkipNoIndex) {
- addIndex(BSON("a" << 1));
-
- runQuerySkipLimit(BSON("x" << 5), 3, 0);
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{skip: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicSkipWithIndex) {
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuerySkipLimit(BSON("a" << 5), 8, 0);
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{skip: {n: 8, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{skip: {n: 8, node: {fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicLimitNoIndex) {
- addIndex(BSON("a" << 1));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
+}
- runQuerySkipLimit(BSON("x" << 5), 0, -3);
+TEST_F(QueryPlannerTest, EqualityIndexScanWithTrailingFields) {
+ addIndex(BSON("x" << 1 << "y" << 1));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{limit: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
- }
+ runQuery(BSON("x" << 5));
- TEST_F(QueryPlannerTest, BasicSoftLimitNoIndex) {
- addIndex(BSON("a" << 1));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
+}
- runQuerySkipLimit(BSON("x" << 5), 0, 3);
+//
+// indexFilterApplied
+// Check that index filter flag is passed from planner params
+// to generated query solution.
+//
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- }
+TEST_F(QueryPlannerTest, IndexFilterAppliedDefault) {
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, BasicLimitWithIndex) {
- addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(BSON("x" << 5));
- runQuerySkipLimit(BSON("a" << 5), 0, -5);
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{limit: {n: 5, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{limit: {n: 5, node: {fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}");
+ // Check indexFilterApplied in query solutions;
+ for (std::vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolution* soln = *it;
+ ASSERT_FALSE(soln->indexFilterApplied);
}
+}
- TEST_F(QueryPlannerTest, BasicSoftLimitWithIndex) {
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuerySkipLimit(BSON("a" << 5), 0, 5);
+TEST_F(QueryPlannerTest, IndexFilterAppliedTrue) {
+ params.indexFiltersApplied = true;
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}");
- }
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, SkipAndLimit) {
- addIndex(BSON("x" << 1));
+ runQuery(BSON("x" << 5));
- runQuerySkipLimit(BSON("x" << BSON("$lte" << 4)), 7, -2);
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{limit: {n: 2, node: {skip: {n: 7, node: "
- "{cscan: {dir: 1, filter: {x: {$lte: 4}}}}}}}}");
- assertSolutionExists("{limit: {n: 2, node: {skip: {n: 7, node: {fetch: "
- "{filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}}}}}");
+ // Check indexFilterApplied in query solutions;
+ for (std::vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolution* soln = *it;
+ ASSERT_EQUALS(params.indexFiltersApplied, soln->indexFilterApplied);
}
+}
- TEST_F(QueryPlannerTest, SkipAndSoftLimit) {
- addIndex(BSON("x" << 1));
+//
+// <
+//
- runQuerySkipLimit(BSON("x" << BSON("$lte" << 4)), 7, 2);
+TEST_F(QueryPlannerTest, LessThan) {
+ addIndex(BSON("x" << 1));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{skip: {n: 7, node: "
- "{cscan: {dir: 1, filter: {x: {$lte: 4}}}}}}");
- assertSolutionExists("{skip: {n: 7, node: {fetch: "
- "{filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}}}");
- }
+ runQuery(BSON("x" << BSON("$lt" << 5)));
- //
- // tree operations
- //
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: {$lt: 5}}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, TwoPredicatesAnding) {
- addIndex(BSON("x" << 1));
+//
+// <=
+//
- runQuery(fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }"));
+TEST_F(QueryPlannerTest, LessThanEqual) {
+ addIndex(BSON("x" << 1));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
+ runQuery(BSON("x" << BSON("$lte" << 5)));
- TEST_F(QueryPlannerTest, SimpleOr) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: 20}, {a: 21}]}"));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: {$lte: 5}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 20}, {a: 21}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a:1}}}}}");
- }
+//
+// >
+//
- TEST_F(QueryPlannerTest, OrWithoutEnoughIndices) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: 20}, {b: 21}]}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 20}, {b: 21}]}}}");
- }
+TEST_F(QueryPlannerTest, GreaterThan) {
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, OrWithAndChild) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}"));
+ runQuery(BSON("x" << BSON("$gt" << 5)));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1}}}, "
- "{fetch: {filter: {b: 7}, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}]}}}}");
- }
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: {$gt: 5}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, AndWithUnindexedOrChild) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a:20, $or: [{b:1}, {c:7}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Logical rewrite means we could get one of these two outcomes:
- size_t matches = 0;
- matches += numSolutionMatches("{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- matches += numSolutionMatches("{or: {filter: null, nodes: ["
- "{fetch: {filter: {b:1}, node: {"
- "ixscan: {filter: null, pattern: {a:1}}}}},"
- "{fetch: {filter: {c:7}, node: {"
- "ixscan: {filter: null, pattern: {a:1}}}}}]}}");
- ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
- }
+//
+// >=
+//
+TEST_F(QueryPlannerTest, GreaterThanEqual) {
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, AndWithOrWithOneIndex) {
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{b:1}, {c:7}], a:20}"));
-
- // Logical rewrite gives us at least one of these:
- assertSolutionExists("{cscan: {dir: 1}}");
- size_t matches = 0;
- matches += numSolutionMatches("{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- matches += numSolutionMatches("{or: {filter: null, nodes: ["
- "{fetch: {filter: {b:1}, node: {"
- "ixscan: {filter: null, pattern: {a:1}}}}},"
- "{fetch: {filter: {c:7}, node: {"
- "ixscan: {filter: null, pattern: {a:1}}}}}]}}");
- ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
- }
+ runQuery(BSON("x" << BSON("$gte" << 5)));
- //
- // Additional $or tests
- //
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: {$gte: 5}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, OrCollapsesToSingleScan) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:2}}, {a:{$gt:0}}]}"));
+//
+// Mod
+//
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[0,Infinity,false,true]]}}}}}");
- }
+TEST_F(QueryPlannerTest, Mod) {
+ addIndex(BSON("a" << 1));
- TEST_F(QueryPlannerTest, OrCollapsesToSingleScan2) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$lt:2}}, {a:{$lt:4}}]}"));
+ runQuery(fromjson("{a: {$mod: [2, 0]}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[-Infinity,4,true,false]]}}}}}");
- }
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: {$mod: [2, 0]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: {a: {$mod: [2, 0]}}, pattern: {a: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, OrCollapsesToSingleScan3) {
- addIndex(BSON("a" << 1));
- runQueryHint(fromjson("{$or: [{a:1},{a:3}]}"), fromjson("{a:1}"));
+//
+// Exists
+//
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[1,1,true,true], [3,3,true,true]]}}}}}");
- }
+TEST_F(QueryPlannerTest, ExistsTrue) {
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, OrOnlyOneBranchCanUseIndex) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:1}, {b:2}]}"));
+ runQuery(fromjson("{x: {$exists: true}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, OrOnlyOneBranchCanUseIndexHinted) {
- addIndex(BSON("a" << 1));
- runQueryHint(fromjson("{$or: [{a:1}, {b:2}]}"), fromjson("{a:1}"));
+TEST_F(QueryPlannerTest, ExistsFalse) {
+ addIndex(BSON("x" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {$or:[{a:1},{b:2}]}, node: {ixscan: "
- "{pattern: {a:1}, bounds: "
- "{a: [['MinKey','MaxKey',true,true]]}}}}}");
- }
+ runQuery(fromjson("{x: {$exists: false}}"));
- TEST_F(QueryPlannerTest, OrNaturalHint) {
- addIndex(BSON("a" << 1));
- runQueryHint(fromjson("{$or: [{a:1}, {a:3}]}"), fromjson("{$natural:1}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
+TEST_F(QueryPlannerTest, ExistsTrueSparseIndex) {
+ addIndex(BSON("x" << 1), false, true);
- // SERVER-13714. A non-top-level indexable negation exposed a bug in plan enumeration.
- TEST_F(QueryPlannerTest, NonTopLevelIndexedNegation) {
- addIndex(BSON("state" << 1));
- addIndex(BSON("is_draft" << 1));
- addIndex(BSON("published_date" << 1));
- addIndex(BSON("newsroom_id" << 1));
-
- BSONObj queryObj = fromjson("{$and:[{$or:[{is_draft:false},{creator_id:1}]},"
- "{$or:[{state:3,is_draft:false},"
- "{published_date:{$ne:null}}]},"
- "{newsroom_id:{$in:[1]}}]}");
- runQuery(queryObj);
- }
-
- TEST_F(QueryPlannerTest, NonTopLevelIndexedNegationMinQuery) {
- addIndex(BSON("state" << 1));
- addIndex(BSON("is_draft" << 1));
- addIndex(BSON("published_date" << 1));
-
- // This is the min query to reproduce SERVER-13714
- BSONObj queryObj = fromjson("{$or:[{state:1, is_draft:1}, {published_date:{$ne: 1}}]}");
- runQuery(queryObj);
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:5}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a:1}, bounds: {a: [[2,10,false,false]]}}}, "
- "{ixscan: {pattern: {a:1}, bounds: "
- "{a: [[0,5,false,false]]}}}]}}}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd2) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:15}}, {a:{$gt:20}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a:1}, bounds: {a: [[2,10,false,false]]}}}, "
- "{ixscan: {pattern: {a:1}, bounds: {a: [[0,15,false,false]]}}}, "
- "{ixscan: {pattern: {a:1}, bounds: "
- "{a: [[20,Infinity,false,true]]}}}]}}}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd3) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:1,$lt:5},b:6}, {a:3,b:{$gt:0,$lt:10}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {b:6}, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[1,5,false,false]]}}}}}, "
- "{fetch: {filter: {$and:[{b:{$lt:10}},{b:{$gt:0}}]}, node: "
- "{ixscan: {pattern: {a:1}, bounds: {a:[[3,3,true,true]]}}}}}]}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd4) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
- "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {c:6}, node: {ixscan: {pattern: {a:1,b:1}, "
- "bounds: {a: [[1,5,false,false]], b: [[0,3,false,false]]}}}}}, "
- "{fetch: {filter: {$and:[{c:{$lt:10}},{c:{$gt:0}}]}, node: "
- "{ixscan: {pattern: {a:1,b:1}, "
- " bounds: {a:[[3,3,true,true]], b:[[1,2,false,false]]}}}}}]}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd5) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:1,$lt:5}, c:6}, "
- "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {c:6}, node: {ixscan: {pattern: {a:1,b:1}, "
- "bounds: {a: [[1,5,false,false]], "
- "b: [['MinKey','MaxKey',true,true]]}}}}}, "
- "{fetch: {filter: {$and:[{c:{$lt:10}},{c:{$gt:0}}]}, node: "
- "{ixscan: {pattern: {a:1,b:1}, "
- " bounds: {a:[[3,3,true,true]], b:[[1,2,false,false]]}}}}}]}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd6) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$or: [{a:{$in:[1]},b:{$in:[1]}}, {a:{$in:[1,5]},b:{$in:[1,5]}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true]], b: [[1,1,true,true]]}}}, "
- "{ixscan: {pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true], [5,5,true,true]], "
- " b: [[1,1,true,true], [5,5,true,true]]}}}]}}}}");
- }
-
- // SERVER-13960: properly handle $or with a mix of exact and inexact predicates.
- TEST_F(QueryPlannerTest, OrInexactWithExact) {
- addIndex(BSON("name" << 1));
- runQuery(fromjson("{$or: [{name: 'thomas'}, {name: /^alexand(er|ra)/}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter:"
- "{$or: [{name: 'thomas'}, {name: /^alexand(er|ra)/}]},"
- "pattern: {name: 1}}}}}");
- }
-
- // SERVER-13960: multiple indices, each with an inexact covered predicate.
- TEST_F(QueryPlannerTest, OrInexactWithExact2) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{$or: [{a: 'foo'}, {a: /bar/}, {b: 'foo'}, {b: /bar/}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {or: {nodes: ["
- "{ixscan: {filter: {$or:[{a:'foo'},{a:/bar/}]},"
- "pattern: {a: 1}}},"
- "{ixscan: {filter: {$or:[{b:'foo'},{b:/bar/}]},"
- "pattern: {b: 1}}}]}}}}");
- }
-
- // SERVER-13960: an exact, inexact covered, and inexact fetch predicate.
- TEST_F(QueryPlannerTest, OrAllThreeTightnesses) {
- addIndex(BSON("names" << 1));
- runQuery(fromjson("{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: "
- "{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}, "
- "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
- }
-
- // SERVER-13960: two inexact fetch predicates.
- TEST_F(QueryPlannerTest, OrTwoInexactFetch) {
- // true means multikey
- addIndex(BSON("names" << 1), true);
- runQuery(fromjson("{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: "
- "{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}, "
- "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
- }
-
- // SERVER-13960: multikey with exact and inexact covered predicates.
- TEST_F(QueryPlannerTest, OrInexactCoveredMultikey) {
- // true means multikey
- addIndex(BSON("names" << 1), true);
- runQuery(fromjson("{$or: [{names: 'dave'}, {names: /joe/}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$or: [{names: 'dave'}, {names: /joe/}]}, "
- "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
- }
-
- // SERVER-13960: $elemMatch object with $or.
- TEST_F(QueryPlannerTest, OrElemMatchObject) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{$or: [{a: {$elemMatch: {b: {$lte: 1}}}},"
- "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {a:{$elemMatch:{b:{$gte:4}}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}},"
- "{fetch: {filter: {a:{$elemMatch:{b:{$lte:1}}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}]}}");
- }
-
- // SERVER-13960: $elemMatch object inside an $or, below an AND.
- TEST_F(QueryPlannerTest, OrElemMatchObjectBeneathAnd) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{$or: [{'a.b': 0, a: {$elemMatch: {b: {$lte: 1}}}},"
- "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {$and:[{a:{$elemMatch:{b:{$lte:1}}}},{'a.b':0}]},"
- "node: {ixscan: {filter: null, pattern: {'a.b': 1}, "
- "bounds: {'a.b': [[-Infinity,1,true,true]]}}}}},"
- "{fetch: {filter: {a:{$elemMatch:{b:{$gte:4}}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1},"
- "bounds: {'a.b': [[4,Infinity,true,true]]}}}}}]}}");
- }
-
- // SERVER-13960: $or below $elemMatch with an inexact covered predicate.
- TEST_F(QueryPlannerTest, OrBelowElemMatchInexactCovered) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}},"
- "node: {ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- // SERVER-13960: $in with exact and inexact covered predicates.
- TEST_F(QueryPlannerTest, OrWithExactAndInexact) {
- addIndex(BSON("name" << 1));
- runQuery(fromjson("{name: {$in: ['thomas', /^alexand(er|ra)/]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: {name: {$in: ['thomas', /^alexand(er|ra)/]}}, "
- "pattern: {name: 1}}}}}");
- }
-
- // SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates.
- TEST_F(QueryPlannerTest, OrWithExactAndInexact2) {
- addIndex(BSON("name" << 1));
- runQuery(fromjson("{$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
- "{name: {$exists: false}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
- "{name: {$exists: false}}]}, "
- "node: {ixscan: {filter: null, pattern: {name: 1}}}}}");
- }
-
- // SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates
- // over two indices.
- TEST_F(QueryPlannerTest, OrWithExactAndInexact3) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{$or: [{a: {$in: [/z/, /x/]}}, {a: 'w'},"
- "{b: {$exists: false}}, {b: {$in: ['p']}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: {$or:[{a:{$in:[/z/, /x/]}}, {a:'w'}]}, "
- "pattern: {a: 1}}}, "
- "{fetch: {filter: {$or:[{b:{$exists:false}}, {b:{$in:['p']}}]},"
- "node: {ixscan: {filter: null, pattern: {b: 1}}}}}]}}}}");
- }
-
- //
- // Min/Max
- //
-
- TEST_F(QueryPlannerTest, MinValid) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MinWithoutIndex) {
- runInvalidQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
- }
-
- TEST_F(QueryPlannerTest, MinBadHint) {
- addIndex(BSON("b" << 1));
- runInvalidQueryHintMinMax(BSONObj(), fromjson("{b: 1}"), fromjson("{a: 1}"), BSONObj());
- }
-
- TEST_F(QueryPlannerTest, MaxValid) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MinMaxSameValue) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxWithoutIndex) {
- runInvalidQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
- }
-
- TEST_F(QueryPlannerTest, MaxBadHint) {
- addIndex(BSON("b" << 1));
- runInvalidQueryHintMinMax(BSONObj(), fromjson("{b: 1}"), BSONObj(), fromjson("{a: 1}"));
- }
-
- TEST_F(QueryPlannerTest, MaxMinSort) {
- addIndex(BSON("a" << 1));
-
- // Run an empty query, sort {a: 1}, max/min arguments.
- runQueryFull(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 2}"), fromjson("{a: 8}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxMinReverseSort) {
- addIndex(BSON("a" << 1));
-
- // Run an empty query, sort {a: -1}, max/min arguments.
- runQueryFull(BSONObj(), fromjson("{a: -1}"), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 2}"), fromjson("{a: 8}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: -1, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxMinReverseIndexDir) {
- addIndex(BSON("a" << -1));
-
- // Because the index is descending, the min is numerically larger than the max.
- runQueryFull(BSONObj(), fromjson("{a: -1}"), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 8}"), fromjson("{a: 2}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: -1}}}}}");
- }
+ runQuery(fromjson("{x: {$exists: true}}"));
- TEST_F(QueryPlannerTest, MaxMinReverseIndexDirSort) {
- addIndex(BSON("a" << -1));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsFalseSparseIndex) {
+ addIndex(BSON("x" << 1), false, true);
+
+ runQuery(fromjson("{x: {$exists: false}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsTrueOnUnindexedField) {
+ addIndex(BSON("x" << 1));
+
+ runQuery(fromjson("{x: 1, y: {$exists: true}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsFalseOnUnindexedField) {
+ addIndex(BSON("x" << 1));
+
+ runQuery(fromjson("{x: 1, y: {$exists: false}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsTrueSparseIndexOnOtherField) {
+ addIndex(BSON("x" << 1), false, true);
+
+ runQuery(fromjson("{x: 1, y: {$exists: true}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsFalseSparseIndexOnOtherField) {
+ addIndex(BSON("x" << 1), false, true);
+
+ runQuery(fromjson("{x: 1, y: {$exists: false}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsBounds) {
+ addIndex(BSON("b" << 1));
+
+ runQuery(fromjson("{b: {$exists: true}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: true}}, node: "
+ "{ixscan: {pattern: {b: 1}, bounds: "
+ "{b: [['MinKey', 'MaxKey', true, true]]}}}}}");
+
+ // This ends up being a double negation, which we currently don't index.
+ runQuery(fromjson("{b: {$not: {$exists: false}}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
- // Min/max specifies a forward scan with bounds [{a: 8}, {a: 2}]. Asking for
- // an ascending sort reverses the direction of the scan to [{a: 2}, {a: 8}].
- runQueryFull(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 8}"), fromjson("{a: 2}"), false);
+ runQuery(fromjson("{b: {$exists: false}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {b: 1}, bounds: "
+ "{b: [[null, null, true, true]]}}}}}");
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: -1,"
- "pattern: {a: -1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxMinNoMatchingIndexDir) {
- addIndex(BSON("a" << -1));
- runInvalidQueryHintMinMax(BSONObj(), fromjson("{a: 2}"), BSONObj(), fromjson("{a: 8}"));
- }
-
- TEST_F(QueryPlannerTest, MaxMinSelectCorrectlyOrderedIndex) {
- // There are both ascending and descending indices on 'a'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << -1));
-
- // The ordering of min and max means that we *must* use the descending index.
- runQueryFull(BSONObj(), BSONObj(), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 8}"), fromjson("{a: 2}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: -1}}}}}");
-
- // If we switch the ordering, then we use the ascending index.
- // The ordering of min and max means that we *must* use the descending index.
- runQueryFull(BSONObj(), BSONObj(), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 2}"), fromjson("{a: 8}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxMinBadHintSelectsReverseIndex) {
- // There are both ascending and descending indices on 'a'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << -1));
-
- // A query hinting on {a: 1} is bad if min is {a: 8} and {a: 2} because this
- // min/max pairing requires a descending index.
- runInvalidQueryFull(BSONObj(), BSONObj(), BSONObj(), 0, 0, fromjson("{a: 1}"),
- fromjson("{a: 8}"), fromjson("{a: 2}"), false);
- }
-
-
- //
- // $snapshot
- //
-
- TEST_F(QueryPlannerTest, Snapshot) {
- addIndex(BSON("a" << 1));
- runQuerySnapshot(fromjson("{a: {$gt: 0}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:{$gt:0}}, node: "
- "{ixscan: {filter: null, pattern: {_id: 1}}}}}");
- }
-
- //
- // Tree operations that require simple tree rewriting.
- //
-
- TEST_F(QueryPlannerTest, AndOfAnd) {
- addIndex(BSON("x" << 1));
- runQuery(fromjson("{$and: [ {$and: [ {x: 2.5}]}, {x: {$gt: 1}}, {x: {$lt: 3}} ] }"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- //
- // Logically equivalent queries
- //
-
- TEST_F(QueryPlannerTest, EquivalentAndsOne) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$and: [{a: 1}, {b: {$all: [10, 20]}}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{a:1},{b:10},{b:20}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, EquivalentAndsTwo) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$and: [{a: 1, b: 10}, {a: 1, b: 20}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{a:1},{a:1},{b:10},{b:20}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- //
- // Covering
- //
-
- TEST_F(QueryPlannerTest, BasicCovering) {
- addIndex(BSON("x" << 1));
- // query, sort, proj
- runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{_id: 0, x: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, x: 1}, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, x: 1}, node: "
- "{cscan: {dir: 1, filter: {x:{$gt:1}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, DottedFieldCovering) {
- addIndex(BSON("a.b" << 1));
- runQuerySortProj(fromjson("{'a.b': 5}"), BSONObj(), fromjson("{_id: 0, 'a.b': 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, 'a.b': 1}, node: "
- "{cscan: {dir: 1, filter: {'a.b': 5}}}}}");
- // SERVER-2104
- //assertSolutionExists("{proj: {spec: {_id: 0, 'a.b': 1}, node: {'a.b': 1}}}");
- }
-
- TEST_F(QueryPlannerTest, IdCovering) {
- runQuerySortProj(fromjson("{_id: {$gt: 10}}"), BSONObj(), fromjson("{_id: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 1}, node: "
- "{cscan: {dir: 1, filter: {_id: {$gt: 10}}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 1}, node: {ixscan: "
- "{filter: null, pattern: {_id: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ProjNonCovering) {
- addIndex(BSON("x" << 1));
- runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{x: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {x: 1}, node: {cscan: "
- "{dir: 1, filter: {x: {$gt: 1}}}}}}");
- assertSolutionExists("{proj: {spec: {x: 1}, node: {fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {x: 1}}}}}}}");
- }
-
- //
- // Basic sort
- //
-
- TEST_F(QueryPlannerTest, BasicSort) {
- addIndex(BSON("x" << 1));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSort) {
- addIndex(BSON("x" << "hashed"));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSortWithIndexablePred) {
- addIndex(BSON("x" << "hashed"));
- runQuerySortProj(BSON("x" << BSON("$in" << BSON_ARRAY(0 << 1))), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {x: 'hashed'}}}}}}}");
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{cscan: {dir: 1, filter: {x: {$in: [0, 1]}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseTextIndexToProvideSort) {
- addIndex(BSON("x" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicSortWithIndexablePred) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortProj(fromjson("{ a : 5 }"), BSON("b" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, "
- "node: {fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}}}");
- assertSolutionExists("{fetch: {filter: {a: 5}, node: {ixscan: "
- "{filter: null, pattern: {b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicSortBooleanIndexKeyPattern) {
- addIndex(BSON("a" << true));
- runQuerySortProj(fromjson("{ a : 5 }"), BSON("a" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: true}}}}}");
- }
-
- // SERVER-14070
- TEST_F(QueryPlannerTest, CompoundIndexWithEqualityPredicatesProvidesSort) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{a: 1, b: 1}"), fromjson("{b: 1}"), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null,"
- "pattern: {a: 1, b: 1}, "
- "bounds: {a:[[1,1,true,true]], b:[[1,1,true,true]]}}}}}");
- }
-
- //
- // Sort with limit and/or skip
- //
-
- TEST_F(QueryPlannerTest, SortLimit) {
- // Negative limit indicates hard limit - see lite_parsed_query.cpp
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, -3);
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 3, "
- "node: {cscan: {dir: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortSkip) {
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 0);
- assertNumSolutions(1U);
- // If only skip is provided, do not limit sort.
- assertSolutionExists("{skip: {n: 2, node: "
- "{sort: {pattern: {a: 1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortSkipLimit) {
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, -3);
- assertNumSolutions(1U);
- // Limit in sort node should be adjusted by skip count
- assertSolutionExists("{skip: {n: 2, node: "
- "{sort: {pattern: {a: 1}, limit: 5, "
- "node: {cscan: {dir: 1}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortSoftLimit) {
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 3);
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 3, "
- "node: {cscan: {dir: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortSkipSoftLimit) {
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 3);
- assertNumSolutions(1U);
- assertSolutionExists("{skip: {n: 2, node: "
- "{sort: {pattern: {a: 1}, limit: 5, "
- "node: {cscan: {dir: 1}}}}}}");
- }
-
- //
- // Sort elimination
- //
-
- TEST_F(QueryPlannerTest, BasicSortElim) {
- addIndex(BSON("x" << 1));
- // query, sort, proj
- runQuerySortProj(fromjson("{ x : {$gt: 1}}"), fromjson("{x: 1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {x: {$gt: 1}}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortElimCompound) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{ a : 5 }"), BSON("b" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- // SERVER-13611: test that sort elimination still works if there are
- // trailing fields in the index.
- TEST_F(QueryPlannerTest, SortElimTrailingFields) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuerySortProj(fromjson("{a: 5}"), BSON("b" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1, c: 1}}}}}");
- }
-
- // Sort elimination with trailing fields where the sort direction is descending.
- TEST_F(QueryPlannerTest, SortElimTrailingFieldsReverse) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProj(fromjson("{a: 5, b: 6}"), BSON("c" << -1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {c: -1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5, b: 6}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, dir: -1, pattern: {a: 1, b: 1, c: 1, d: 1}}}}}");
- }
-
- //
- // Basic compound
- //
-
- TEST_F(QueryPlannerTest, BasicCompound) {
- addIndex(BSON("x" << 1 << "y" << 1));
- runQuery(fromjson("{ x : 5, y: 10}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1, y: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMissingField) {
- addIndex(BSON("x" << 1 << "y" << 1 << "z" << 1));
- runQuery(fromjson("{ x : 5, z: 10}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {x: 1, y: 1, z: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundFieldsOrder) {
- addIndex(BSON("x" << 1 << "y" << 1 << "z" << 1));
- runQuery(fromjson("{ x : 5, z: 10, y:1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1, y: 1, z: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseCompound) {
- addIndex(BSON("x" << 1 << "y" << 1));
- runQuery(fromjson("{ y: 10}"));
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {y: 10}}}");
- }
-
- //
- // $in
- //
-
- TEST_F(QueryPlannerTest, InBasic) {
- addIndex(fromjson("{a: 1}"));
- runQuery(fromjson("{a: {$in: [1, 2]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: {$in: [1, 2]}}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1}}}}}");
- }
-
- // Logically equivalent to the preceding $in query.
- // Indexed solution should be the same.
- TEST_F(QueryPlannerTest, InBasicOrEquivalent) {
- addIndex(fromjson("{a: 1}"));
- runQuery(fromjson("{$or: [{a: 1}, {a: 2}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 1}, {a: 2}]}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, InSparseIndex) {
- addIndex(fromjson("{a: 1}"),
- false, // multikey
- true); // sparse
- runQuery(fromjson("{a: {$in: [null]}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: {$in: [null]}}}}");
- }
-
- TEST_F(QueryPlannerTest, InCompoundIndexFirst) {
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuery(fromjson("{a: {$in: [1, 2]}, b: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {b: 3, a: {$in: [1, 2]}}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
-
- // Logically equivalent to the preceding $in query.
- // Indexed solution should be the same.
- // Currently fails - pre-requisite to SERVER-12024
- /*
- TEST_F(QueryPlannerTest, InCompoundIndexFirstOrEquivalent) {
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuery(fromjson("{$and: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
- */
-
- TEST_F(QueryPlannerTest, InCompoundIndexLast) {
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuery(fromjson("{a: 3, b: {$in: [1, 2]}}"));
-
- assertNumSolutions(2U);
- // TODO: update filter in cscan solution when SERVER-12024 is implemented
- assertSolutionExists("{cscan: {dir: 1, filter: {a: 3, b: {$in: [1, 2]}}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
-
- // Logically equivalent to the preceding $in query.
- // Indexed solution should be the same.
- // Currently fails - pre-requisite to SERVER-12024
- /*
- TEST_F(QueryPlannerTest, InCompoundIndexLastOrEquivalent) {
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuery(fromjson("{$and: [{a: 3}, {$or: [{b: 1}, {b: 2}]}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{a: 3}, {$or: [{b: 1}, {b: 2}]}]}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
- */
-
- // SERVER-1205
- TEST_F(QueryPlannerTest, InWithSort) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"),
- BSON("b" << 1), BSONObj(), 0, 1);
-
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 1, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}");
- }
-
- // SERVER-1205
- TEST_F(QueryPlannerTest, InWithoutSort) {
- addIndex(BSON("a" << 1 << "b" << 1));
- // No sort means we don't bother to blow up the bounds.
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSONObj(), BSONObj(), 0, 1);
-
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
-
- // SERVER-1205
- TEST_F(QueryPlannerTest, ManyInWithSort) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b:{$in:[1,2]}, c:{$in:[1,2]}}"),
- BSON("d" << 1), BSONObj(), 0, 1);
-
- assertSolutionExists("{sort: {pattern: {d: 1}, limit: 1, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}]}}}}");
- }
-
- // SERVER-1205
- TEST_F(QueryPlannerTest, TooManyToExplode) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1,2,3,4,5,6]},"
- "b:{$in:[1,2,3,4,5,6,7,8]},"
- "c:{$in:[1,2,3,4,5,6,7,8]}}"),
- BSON("d" << 1), BSONObj(), 0, 1);
-
- // We cap the # of ixscans we're willing to create.
- assertNumSolutions(2);
- assertSolutionExists("{sort: {pattern: {d: 1}, limit: 1, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {d: 1}, limit: 1, node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantExplodeMetaSort) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << "text"));
- runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"),
- fromjson("{c: {$meta: 'textScore'}}"),
- fromjson("{c: {$meta: 'textScore'}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {c:{$meta:'textScore'}}, node: "
- "{sort: {pattern: {c:{$meta:'textScore'}}, limit: 0, node: "
- "{cscan: {filter: {a:{$in:[1,2]},b:{$in:[3,4]}}, dir: 1}}}}}}");
- }
-
- // SERVER-13618: test that exploding scans for sort works even
- // if we must reverse the scan direction.
- TEST_F(QueryPlannerTest, ExplodeMustReverseScans) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"),
- BSON("c" << -1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}}]}}}}");
- }
-
- // SERVER-13618
- TEST_F(QueryPlannerTest, ExplodeMustReverseScans2) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << -1));
- runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"),
- BSON("c" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a:1, b:1, c:-1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:-1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:-1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:-1}}}]}}}}");
- }
-
- // SERVER-13752: don't try to explode if the ordered interval list for
- // the leading field of the compound index is empty.
- TEST_F(QueryPlannerTest, CantExplodeWithEmptyBounds) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{a: {$in: []}}"), BSON("b" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}}}");
- }
-
- // SERVER-13752
- TEST_F(QueryPlannerTest, CantExplodeWithEmptyBounds2) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuerySortProj(fromjson("{a: {$gt: 3, $lt: 0}}"), BSON("b" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: "
- "{fetch: {node: {ixscan: {pattern: {a:1,b:1,c:1}}}}}}}");
- }
-
- // SERVER-13754: exploding an $or
- TEST_F(QueryPlannerTest, ExplodeOrForSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}, {b: 2}]}"),
- BSON("c" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {bounds: {a: [[1,1,true,true]], "
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {a:1, c:1}}},"
- "{ixscan: {bounds: {a: [[2,2,true,true]], "
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {a:1, c:1}}},"
- "{ixscan: {bounds: {b: [[2,2,true,true]], "
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {b:1, c:1}}}]}}}}");
- }
-
- // SERVER-13754: exploding an $or
- TEST_F(QueryPlannerTest, ExplodeOrForSort2) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- addIndex(BSON("d" << 1 << "c" << 1));
-
- runQuerySortProj(fromjson("{$or: [{a: 1, b: {$in: [1, 2]}}, {d: 3}]}"),
- BSON("c" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {bounds: {a: [[1,1,true,true]], b: [[1,1,true,true]],"
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {a:1, b:1, c:1}}},"
- "{ixscan: {bounds: {a: [[1,1,true,true]], b: [[2,2,true,true]],"
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {a:1, b:1, c:1}}},"
- "{ixscan: {bounds: {d: [[3,3,true,true]], "
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {d:1, c:1}}}]}}}}");
- }
-
- // SERVER-13754: an $or that can't be exploded, because one clause of the
- // $or does provide the sort, even after explosion.
- TEST_F(QueryPlannerTest, CantExplodeOrForSort) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- addIndex(BSON("d" << 1 << "c" << 1));
-
- runQuerySortProj(fromjson("{$or: [{a: {$in: [1, 2]}}, {d: 3}]}"),
- BSON("c" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: "
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a: 1, b: 1, c: 1}}},"
- "{ixscan: {pattern: {d: 1, c: 1}}}]}}}}}}");
- }
-
- // SERVER-15286: Make sure that at least the explodeForSort() path bails out
- // when it finds that there are no union of point interval fields to explode.
- // We could convert this into a MERGE_SORT plan, but we don't yet do this
- // optimization.
- TEST_F(QueryPlannerTest, CantExplodeOrForSort2) {
- addIndex(BSON("a" << 1));
-
- runQuerySortProj(fromjson("{$or: [{a: {$gt: 1, $lt: 3}}, {a: {$gt: 6, $lt: 10}}]}"),
- BSON("a" << -1),
- BSONObj());
-
- assertNumSolutions(3U);
- assertSolutionExists("{sort: {pattern: {a: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}}}}}");
- assertSolutionExists("{sort: {pattern: {a: -1}, limit: 0, node: "
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a: 1}, bounds: "
- "{a: [[1,3,false,false]]}}},"
- "{ixscan: {pattern: {a: 1}, bounds: "
- "{a: [[6,10,false,false]]}}}]}}}}}}");
- }
-
- // SERVER-13754: too many scans in an $or explosion.
- TEST_F(QueryPlannerTest, TooManyToExplodeOr) {
- addIndex(BSON("a" << 1 << "e" << 1));
- addIndex(BSON("b" << 1 << "e" << 1));
- addIndex(BSON("c" << 1 << "e" << 1));
- addIndex(BSON("d" << 1 << "e" << 1));
- runQuerySortProj(fromjson("{$or: [{a: {$in: [1,2,3,4,5,6]},"
- "b: {$in: [1,2,3,4,5,6]}},"
- "{c: {$in: [1,2,3,4,5,6]},"
- "d: {$in: [1,2,3,4,5,6]}}]}"),
- BSON("e" << 1), BSONObj());
-
- // We cap the # of ixscans we're willing to create, so we don't get explosion. Instead
- // we get 5 different solutions which all use a blocking sort.
- assertNumSolutions(5U);
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: "
- "{or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {a: 1, e: 1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {c: 1, e: 1}}}}}]}}}}");
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: "
- "{or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {b: 1, e: 1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {c: 1, e: 1}}}}}]}}}}");
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: "
- "{or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {a: 1, e: 1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {d: 1, e: 1}}}}}]}}}}");
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: "
- "{or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {b: 1, e: 1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {d: 1, e: 1}}}}}]}}}}");
- }
-
- // SERVER-15696: Make sure explodeForSort copies filters on IXSCAN stages to all of the
- // scans resulting from the explode. Regex is the easiest way to have the planner create
- // an index scan which filters using the index key.
- TEST_F(QueryPlannerTest, ExplodeIxscanWithFilter) {
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuerySortProj(fromjson("{$and: [{b: {$regex: 'foo', $options: 'i'}},"
- "{a: {$in: [1, 2]}}]}"),
- BSON("b" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a:1, b:1},"
- "filter: {b: {$regex: 'foo', $options: 'i'}}}},"
- "{ixscan: {pattern: {a:1, b:1},"
- "filter: {b: {$regex: 'foo', $options: 'i'}}}}]}}}}");
-
- }
-
- TEST_F(QueryPlannerTest, InWithSortAndLimitTrailingField) {
- addIndex(BSON("a" << 1 << "b" << -1 << "c" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b: {$gte: 0}}"),
- fromjson("{b: -1}"),
- BSONObj(), // no projection
- 0, // no skip
- -1); // .limit(1)
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b:-1}, limit: 1, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{limit: {n: 1, node: {fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a:1,b:-1,c:1}}}, "
- " {ixscan: {pattern: {a:1,b:-1,c:1}}}]}}}}}}");
- }
-
- //
- // Multiple solutions
- //
-
- TEST_F(QueryPlannerTest, TwoPlans) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuery(fromjson("{a:1, b:{$gt:2,$lt:2}}"));
-
- // 2 indexed solns and one non-indexed
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{b:{$lt:2}},{a:1},{b:{$gt:2}}]}}}");
- assertSolutionExists("{fetch: {filter: {$and:[{b:{$lt:2}},{b:{$gt:2}}]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoPlansElemMatch) {
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("arr.x" << 1 << "a" << 1));
-
- runQuery(fromjson("{arr: { $elemMatch : { x : 5 , y : 5 } },"
- " a : 55 , b : { $in : [ 1 , 5 , 8 ] } }"));
-
- // 2 indexed solns and one non-indexed
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[55,55,true,true]], b: [[1,1,true,true], "
- "[5,5,true,true], [8,8,true,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {$and: [{arr:{$elemMatch:{x:5,y:5}}},"
- "{b:{$in:[1,5,8]}}]}, "
- "node: {ixscan: {pattern: {'arr.x':1,a:1}, bounds: "
- "{'arr.x': [[5,5,true,true]], 'a':[[55,55,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundAndNonCompoundIndices) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << 1 << "b" << 1), true);
- runQuery(fromjson("{a: 1, b: {$gt: 2, $lt: 2}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and:[{b:{$lt:2}},{b:{$gt:2}}]}, node: "
- "{ixscan: {pattern: {a:1}, bounds: {a: [[1,1,true,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {b:{$gt:2}}, node: "
- "{ixscan: {pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true]], b: [[-Infinity,2,true,false]]}}}}}");
- }
-
- //
- // Sort orders
- //
-
- // SERVER-1205.
- TEST_F(QueryPlannerTest, MergeSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
- runQuerySortProj(fromjson("{$or: [{a:1}, {b:1}]}"), fromjson("{c:1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
- }
-
- // SERVER-1205 as well.
- TEST_F(QueryPlannerTest, NoMergeSortIfNoSortWanted) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
- runQuerySortProj(fromjson("{$or: [{a:1}, {b:1}]}"), BSONObj(), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a:1}, {b:1}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
- "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
- }
-
- // Basic "keep sort in mind with an OR"
- TEST_F(QueryPlannerTest, MergeSortEvenIfSameIndex) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{$or: [{a:1}, {a:7}]}"), fromjson("{b:1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- // TODO the second solution should be mergeSort rather than just sort
- }
-
- TEST_F(QueryPlannerTest, ReverseScanForSort) {
- addIndex(BSON("_id" << 1));
- runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {_id: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {_id: 1}}}}}");
- }
-
- //
- // Hint tests
- //
-
- TEST_F(QueryPlannerTest, NaturalHint) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: "
- "{cscan: {filter: {a: 1}, dir: 1}}}}");
- }
-
- // Test $natural sort and its interaction with $natural hint.
- TEST_F(QueryPlannerTest, NaturalSortAndHint) {
- addIndex(BSON("x" << 1));
-
- // Non-empty query, -1 sort, no hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Non-empty query, 1 sort, no hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << 1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Non-empty query, -1 sort, -1 hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1),
- BSON("$natural" << -1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Non-empty query, 1 sort, -1 hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << 1),
- BSON("$natural" << -1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Non-empty query, -1 sort, 1 hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1),
- BSON("$natural" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Non-empty query, 1 sort, 1 hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << 1),
- BSON("$natural" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Empty query, -1 sort, no hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Empty query, 1 sort, no hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Empty query, -1 sort, -1 hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSON("$natural" << -1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Empty query, 1 sort, -1 hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSON("$natural" << -1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Empty query, -1 sort, 1 hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSON("$natural" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Empty query, 1 sort, 1 hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSON("$natural" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, HintOverridesNaturalSort) {
- addIndex(BSON("x" << 1));
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSON("x" << 1));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {x:{$exists:true}}, node: "
- "{ixscan: {filter: null, pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintValid) {
- addIndex(BSON("a" << 1));
- runQueryHint(BSONObj(), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintValidWithPredicate) {
- addIndex(BSON("a" << 1));
- runQueryHint(fromjson("{a: {$gt: 1}}"), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintValidWithSort) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortHint(fromjson("{a: 100, b: 200}"), fromjson("{b: 1}"), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: "
- "{fetch: {filter: {b: 200}, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintElemMatch) {
- // true means multikey
- addIndex(fromjson("{'a.b': 1}"), true);
- runQueryHint(fromjson("{'a.b': 1, a: {$elemMatch: {b: 2}}}"), fromjson("{'a.b': 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {$and: [{a:{$elemMatch:{b:2}}}, {'a.b': 1}]}, "
- "node: {ixscan: {filter: null, pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[2, 2, true, true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintInvalid) {
- addIndex(BSON("a" << 1));
- runInvalidQueryHint(BSONObj(), fromjson("{b: 1}"));
- }
-
- //
- // Sparse indices, SERVER-8067
- // Each index in this block of tests is sparse.
- //
-
- TEST_F(QueryPlannerTest, SparseIndexIgnoreForSort) {
- addIndex(fromjson("{a: 1}"), false, true);
- runQuerySortProj(BSONObj(), fromjson("{a: 1}"), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, SparseIndexHintForSort) {
- addIndex(fromjson("{a: 1}"), false, true);
- runQuerySortHint(BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SparseIndexPreferCompoundIndexForSort) {
- addIndex(fromjson("{a: 1}"), false, true);
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuerySortProj(BSONObj(), fromjson("{a: 1}"), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SparseIndexForQuery) {
- addIndex(fromjson("{a: 1}"), false, true);
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: 1}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}");
- }
-
- //
- // Regex
- //
-
- TEST_F(QueryPlannerTest, PrefixRegex) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a: /^foo/}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: /^foo/}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, PrefixRegexCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: /^foo/}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {a: /^foo/}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegex) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a: /foo/}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: /foo/}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: {a: /foo/}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: /foo/}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {a: /foo/}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {a: /foo/}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexAnd) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: /foo/, b: 2}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{b: 2}, {a: /foo/}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: {a: /foo/}, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexAndCovering) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{a: /foo/, b: 2}"), BSONObj(),
- fromjson("{_id: 0, a: 1, b: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
- "{cscan: {dir: 1, filter: {$and: [{b: 2}, {a: /foo/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
- "{ixscan: {filter: {a: /foo/}, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexOrCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: /0/}, {a: /1/}]}"), BSONObj(),
- fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {$or: [{a: /0/}, {a: /1/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {$or: [{a: /0/}, {a: /1/}]}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexInCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: {$in: [/foo/, /bar/]}}"), BSONObj(),
- fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {a:{$in:[/foo/,/bar/]}}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {a:{$in:[/foo/,/bar/]}}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoRegexCompoundIndexCovering) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{a: /0/, b: /1/}"), BSONObj(),
- fromjson("{_id: 0, a: 1, b: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
- "{cscan: {dir: 1, filter: {$and:[{a:/0/},{b:/1/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
- "{ixscan: {filter: {$and:[{a:/0/},{b:/1/}]}, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoRegexSameFieldCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$and: [{a: /0/}, {a: /1/}]}"), BSONObj(),
- fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {$and:[{a:/0/},{a:/1/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {$and:[{a:/0/},{a:/1/}]}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ThreeRegexSameFieldCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$and: [{a: /0/}, {a: /1/}, {a: /2/}]}"), BSONObj(),
- fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexMultikey) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: /foo/}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {filter: {a: /foo/}, dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a: /foo/}, node: {ixscan: "
- "{pattern: {a: 1}, filter: null}}}}");
- }
-
- TEST_F(QueryPlannerTest, ThreeRegexSameFieldMultikey) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{$and: [{a: /0/}, {a: /1/}, {a: /2/}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, node: {ixscan: "
- "{pattern: {a: 1}, filter: null}}}}");
- }
-
- //
- // Negation
- //
-
- TEST_F(QueryPlannerTest, NegationIndexForSort) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: {$ne: 1}}"), fromjson("{a: 1}"), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, "
- "bounds: {a: [['MinKey',1,true,false], "
- "[1,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationTopLevel) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: {$ne: 1}}"), BSONObj(), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [['MinKey',1,true,false], "
- "[1,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationOr) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, NegationOrNotIn) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: 1}, {b: {$nin: [1]}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, NegationAndIndexOnEquality) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$and: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1},"
- "bounds: {a: [[1,1,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationAndIndexOnEqualityAndNegationBranches) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortProj(fromjson("{$and: [{a: 1}, {b: 2}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, "
- "bounds: {a: [[1,1,true,true]]}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {b: 1}, "
- "bounds: {b: [[2,2,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationAndIndexOnInequality) {
- addIndex(BSON("b" << 1));
- runQuerySortProj(fromjson("{$and: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: {ixscan: {pattern: {b:1}, "
- "bounds: {b: [['MinKey',1,true,false], "
- "[1,'MaxKey',false,true]]}}}}}");
- }
-
- // Negated regexes don't use the index.
- TEST_F(QueryPlannerTest, NegationRegexPrefix) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: /^a/}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated mods don't use the index
- TEST_F(QueryPlannerTest, NegationMod) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$mod: [2, 1]}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated $type doesn't use the index
- TEST_F(QueryPlannerTest, NegationTypeOperator) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$type: 16}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated $elemMatch value won't use the index
- TEST_F(QueryPlannerTest, NegationElemMatchValue) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$elemMatch: {$gt: 3, $lt: 10}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated $elemMatch object won't use the index
- TEST_F(QueryPlannerTest, NegationElemMatchObject) {
- addIndex(BSON("i.j" << 1));
- runQuery(fromjson("{i: {$not: {$elemMatch: {j: 1}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated $elemMatch object won't use the index
- TEST_F(QueryPlannerTest, NegationElemMatchObject2) {
- addIndex(BSON("i.j" << 1));
- runQuery(fromjson("{i: {$not: {$elemMatch: {j: {$ne: 1}}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // If there is a negation that can't use the index,
- // ANDed with a predicate that can use the index, then
- // we can still use the index for the latter predicate.
- TEST_F(QueryPlannerTest, NegationRegexWithIndexablePred) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{$and: [{i: {$not: /o/}}, {i: 2}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [[2,2,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationCantUseSparseIndex) {
- // false means not multikey, true means sparse
- addIndex(BSON("i" << 1), false, true);
- runQuery(fromjson("{i: {$ne: 4}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, NegationCantUseSparseIndex2) {
- // false means not multikey, true means sparse
- addIndex(BSON("i" << 1 << "j" << 1), false, true);
- runQuery(fromjson("{i: 4, j: {$ne: 5}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {i:1,j:1}, bounds: "
- "{i: [[4,4,true,true]], j: [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatedRangeStrGT) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$gt: 'a'}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey','a',true,true], "
- "[{},'MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatedRangeStrGTE) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$gte: 'a'}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey','a',true,false], "
- "[{},'MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatedRangeIntGT) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$gt: 5}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey',5,true,true], "
- "[Infinity,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatedRangeIntGTE) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$gte: 5}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey',5,true,false], "
- "[Infinity,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoNegatedRanges) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{$and: [{i: {$not: {$lte: 'b'}}}, "
- "{i: {$not: {$gte: 'f'}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey','',true,false], "
- "['b','f',false,false], "
- "[{},'MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, AndWithNestedNE) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a: {$gt: -1, $lt: 1, $ne: 0}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[-1,0,false,false], "
- "[0,1,false,false]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatePredOnCompoundIndex) {
- addIndex(BSON("x" << 1 << "a" << 1));
- runQuery(fromjson("{x: 1, a: {$ne: 1}, b: {$ne: 2}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x:1,a:1}, bounds: "
- "{x: [[1,1,true,true]], "
- "a: [['MinKey',1,true,false], [1,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NEOnMultikeyIndex) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: {$ne: 3}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$ne:3}}, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [['MinKey',3,true,false],"
- "[3,'MaxKey',false,true]]}}}}}");
- }
-
- // In general, a negated $nin can make use of an index.
- TEST_F(QueryPlannerTest, NinUsesMultikeyIndex) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: {$nin: [4, 10]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$nin:[4,10]}}, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [['MinKey',4,true,false],"
- "[4,10,false,false],"
- "[10,'MaxKey',false,true]]}}}}}");
- }
-
- // But it can't if the $nin contains a regex because regex bounds can't
- // be complemented.
- TEST_F(QueryPlannerTest, NinCantUseMultikeyIndex) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: {$nin: [4, /foobar/]}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- //
- // Multikey indices
- //
-
- //
- // Index bounds related tests
- //
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsLastFieldMissing) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuery(fromjson("{a: 5, b: {$gt: 7}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c: 1}, bounds: "
- "{a: [[5,5,true,true]], b: [[7,Infinity,false,true]], "
- " c: [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsMiddleFieldMissing) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuery(fromjson("{a: 1, c: {$lt: 3}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]], "
- " c: [[-Infinity,3,true,false]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsRangeAndEquality) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: {$gt: 8}, b: 6}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[8,Infinity,false,true]], b:[[6,6,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsEqualityThenIn) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: 5, b: {$in: [2,6,11]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, pattern: "
- "{a: 1, b: 1}, bounds: {a: [[5,5,true,true]], "
- "b:[[2,2,true,true],[6,6,true,true],[11,11,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsStringBounds) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: {$gt: 'foo'}, b: {$gte: 'bar'}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, pattern: "
- "{a: 1, b: 1}, bounds: {a: [['foo',{},false,false]], "
- "b:[['bar',{},true,false]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, IndexBoundsAndWithNestedOr) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$and: [{a: 1, $or: [{a: 2}, {a: 3}]}]}"));
-
- // Given that the index over 'a' isn't multikey, we ideally won't generate any solutions
- // since we know the query describes an empty set if 'a' isn't multikey. Any solutions
- // below are "this is how it currently works" instead of "this is how it should work."
-
- // It's kind of iffy to look for indexed solutions so we don't...
- size_t matches = 0;
- matches += numSolutionMatches("{cscan: {dir: 1, filter: "
- "{$or: [{a: 2, a:1}, {a: 3, a:1}]}}}");
- matches += numSolutionMatches("{cscan: {dir: 1, filter: "
- "{$and: [{$or: [{a: 2}, {a: 3}]}, {a: 1}]}}}");
- ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
- }
-
- TEST_F(QueryPlannerTest, IndexBoundsIndexedSort) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("a" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {a:1}, limit: 0, node: "
- "{cscan: {filter: {$or:[{a:1},{a:2}]}, dir: 1}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, "
- "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, IndexBoundsUnindexedSort) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("b" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: "
- "{cscan: {filter: {$or:[{a:1},{a:2}]}, dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {fetch: "
- "{filter: null, node: {ixscan: {filter: null, "
- "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, IndexBoundsUnindexedSortHint) {
- addIndex(BSON("a" << 1));
- runQuerySortHint(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("b" << 1), BSON("a" << 1));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {fetch: "
- "{filter: null, node: {ixscan: {filter: null, "
- "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsIntersectRanges) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- addIndex(BSON("a" << 1 << "c" << 1));
- runQuery(fromjson("{a: {$gt: 1, $lt: 10}, c: {$gt: 1, $lt: 10}}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1,b:1,c:1}, "
- "bounds: {a: [[1,10,false,false]], "
- "b: [['MinKey','MaxKey',true,true]], "
- "c: [[1,10,false,false]]}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1,c:1}, "
- "bounds: {a: [[1,10,false,false]], "
- "c: [[1,10,false,false]]}}}}}");
- }
-
- // Test that planner properly unionizes the index bounds for two negation
- // predicates (SERVER-13890).
- TEST_F(QueryPlannerTest, IndexBoundsOrOfNegations) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: {$ne: 3}}, {a: {$ne: 4}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BoundsTypeMinKeyMaxKey) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
-
- runQuery(fromjson("{a: {$type: -1}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
- "{a: [['MinKey','MinKey',true,true]]}}}}}");
-
- runQuery(fromjson("{a: {$type: 127}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
- "{a: [['MaxKey','MaxKey',true,true]]}}}}}");
- }
-
- //
- // Tests related to building index bounds for multikey
- // indices, combined with compound and $elemMatch
- //
-
- // SERVER-12475: make sure that we compound bounds, even
- // for a multikey index.
- TEST_F(QueryPlannerTest, CompoundMultikeyBounds) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1), true);
- runQuery(fromjson("{a: 1, b: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {filter: {$and:[{a:1},{b:3}]}, dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, "
- "pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true]], b: [[3,3,true,true]]}}}}}");
- }
-
- // Make sure that we compound bounds but do not intersect bounds
- // for a compound multikey index.
- TEST_F(QueryPlannerTest, CompoundMultikeyBoundsNoIntersect) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1), true);
- runQuery(fromjson("{a: 1, b: {$gt: 3, $lte: 5}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b:{$gt:3}}, node: {ixscan: {filter: null, "
- "pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true]], b: [[-Infinity,5,true,true]]}}}}}");
- }
-
- //
- // QueryPlannerParams option tests
- //
-
- TEST_F(QueryPlannerTest, NoBlockingSortsAllowedTest) {
- params.options = QueryPlannerParams::NO_BLOCKING_SORT;
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
- assertNumSolutions(0U);
-
- addIndex(BSON("x" << 1));
-
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NoTableScanBasic) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- runQuery(BSONObj());
- assertNumSolutions(0U);
-
- addIndex(BSON("x" << 1));
-
- runQuery(BSONObj());
- assertNumSolutions(0U);
-
- runQuery(fromjson("{x: {$gte: 0}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NoTableScanOrWithAndChild) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1}}}, "
- "{fetch: {filter: {b: 7}, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}]}}}}");
- }
-
- //
- // Index Intersection.
- //
- // We don't exhaustively check all plans here. Instead we check that there exists an
- // intersection plan. The blending of >1 index plans and ==1 index plans is under development
- // but we want to make sure that we create an >1 index plan when we should.
- //
-
- TEST_F(QueryPlannerTest, IntersectBasicTwoPred) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{a:1, b:{$gt: 1}}"));
-
- assertSolutionExists("{fetch: {filter: null, node: {andHash: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompound) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{a:1, b:1, c:1}"));
-
- // There's an andSorted not andHash because the two seeks are point intervals.
- assertSolutionExists("{fetch: {filter: null, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1, c:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- // SERVER-12196
- TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompoundMatchesIdxOrder1) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{a:1, b:1}"));
-
- assertNumSolutions(3U);
-
- assertSolutionExists("{fetch: {filter: {b:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- // SERVER-12196
- TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompoundMatchesIdxOrder2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a:1, b:1}"));
-
- assertNumSolutions(3U);
-
- assertSolutionExists("{fetch: {filter: {b:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectManySelfIntersections) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- // True means multikey.
- addIndex(BSON("a" << 1), true);
-
- // This one goes to 11.
- runQuery(fromjson("{a:1, a:2, a:3, a:4, a:5, a:6, a:7, a:8, a:9, a:10, a:11}"));
-
- // But this one only goes to 10.
- assertSolutionExists("{fetch: {filter: {a:11}, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}}," // 1
- "{ixscan: {filter: null, pattern: {a:1}}}," // 2
- "{ixscan: {filter: null, pattern: {a:1}}}," // 3
- "{ixscan: {filter: null, pattern: {a:1}}}," // 4
- "{ixscan: {filter: null, pattern: {a:1}}}," // 5
- "{ixscan: {filter: null, pattern: {a:1}}}," // 6
- "{ixscan: {filter: null, pattern: {a:1}}}," // 7
- "{ixscan: {filter: null, pattern: {a:1}}}," // 8
- "{ixscan: {filter: null, pattern: {a:1}}}," // 9
- "{ixscan: {filter: null, pattern: {a:1}}}]}}}}"); // 10
- }
-
- TEST_F(QueryPlannerTest, IntersectSubtreeNodes) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1));
-
- runQuery(fromjson("{$or: [{a: 1}, {b: 1}], $or: [{c:1}, {d:1}]}"));
- assertSolutionExists("{fetch: {filter: null, node: {andHash: {nodes: ["
- "{or: {nodes: [{ixscan:{filter:null, pattern:{a:1}}},"
- "{ixscan:{filter:null, pattern:{b:1}}}]}},"
- "{or: {nodes: [{ixscan:{filter:null, pattern:{c:1}}},"
- "{ixscan:{filter:null, pattern:{d:1}}}]}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectSubtreeAndPred) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- runQuery(fromjson("{a: 1, $or: [{b:1}, {c:1}]}"));
-
- // This (can be) rewritten to $or:[ {a:1, b:1}, {c:1, d:1}]. We don't look for the various
- // single $or solutions as that's tested elsewhere. We look for the intersect solution,
- // where each AND inside of the root OR is an and_sorted.
- size_t matches = 0;
- matches += numSolutionMatches("{fetch: {filter: null, node: {or: {nodes: ["
- "{andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {'a':1}}},"
- "{ixscan: {filter: null, pattern: {'b':1}}}]}},"
- "{andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {'a':1}}},"
- "{ixscan: {filter: null, pattern: {'c':1}}}]}}]}}}}");
- matches += numSolutionMatches("{fetch: {filter: null, node: {andHash: {nodes:["
- "{or: {nodes: [{ixscan:{filter:null, pattern:{b:1}}},"
- "{ixscan:{filter:null, pattern:{c:1}}}]}},"
- "{ixscan:{filter: null, pattern:{a:1}}}]}}}}");
- ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
- }
-
- TEST_F(QueryPlannerTest, IntersectElemMatch) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a.b" << 1));
- addIndex(BSON("a.c" << 1));
- runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:1, c:1}}},"
- "node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {'a.b':1}}},"
- "{ixscan: {filter: null, pattern: {'a.c':1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectSortFromAndHash) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortProj(fromjson("{a: 1, b:{$gt: 1}}"), fromjson("{b:1}"), BSONObj());
-
- // This provides the sort.
- assertSolutionExists("{fetch: {filter: null, node: {andHash: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
-
- // Rearrange the preds, shouldn't matter.
- runQuerySortProj(fromjson("{b: 1, a:{$lt: 7}}"), fromjson("{b:1}"), BSONObj());
- assertSolutionExists("{fetch: {filter: null, node: {andHash: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectCanBeVeryBig) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1));
- runQuery(fromjson("{$or: [{ 'a' : null, 'b' : 94, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 98, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 1, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 2, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 7, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 9, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 16, 'c' : null, 'd' : null }]}"));
-
- assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
- }
-
- // Ensure that disabling AND_HASH intersection works properly.
- TEST_F(QueryPlannerTest, IntersectDisableAndHash) {
- bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection;
-
- // Turn index intersection on but disable hash-based intersection.
- internalQueryPlannerEnableHashIntersection = false;
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
-
- runQuery(fromjson("{a: {$gt: 1}, b: 1, c: 1}"));
-
- // We should do an AND_SORT intersection of {b: 1} and {c: 1}, but no AND_HASH plans.
- assertNumSolutions(4U);
- assertSolutionExists("{fetch: {filter: {b: 1, c: 1}, node: {ixscan: "
- "{pattern: {a: 1}, bounds: {a: [[1,Infinity,false,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$gt:1},c:1}, node: {ixscan: "
- "{pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$gt:1},b:1}, node: {ixscan: "
- "{pattern: {c: 1}, bounds: {c: [[1,1,true,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$gt:1}}, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {b:1}}},"
- "{ixscan: {filter: null, pattern: {c:1}}}]}}}}");
-
- // Restore the old value of the has intersection switch.
- internalQueryPlannerEnableHashIntersection = oldEnableHashIntersection;
- }
-
- //
- // Index intersection cases for SERVER-12825: make sure that
- // we don't generate an ixisect plan if a compound index is
- // available instead.
- //
-
- // SERVER-12825
- TEST_F(QueryPlannerTest, IntersectCompoundInsteadBasic) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: 1, b: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {b:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
- }
-
- // SERVER-12825
- TEST_F(QueryPlannerTest, IntersectCompoundInsteadThreeCompoundIndices) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("c" << 1 << "d" << 1));
- addIndex(BSON("a" << 1 << "c" << -1 << "b" << -1 << "d" << 1));
- runQuery(fromjson("{a: 1, b: 1, c: 1, d: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {$and: [{c:1},{d:1}]}, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
- assertSolutionExists("{fetch: {filter: {$and:[{a:1},{b:1}]}, node: "
- "{ixscan: {filter: null, pattern: {c:1,d:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a:1,c:-1,b:-1,d:1}}}}}");
- }
-
- // SERVER-12825
- TEST_F(QueryPlannerTest, IntersectCompoundInsteadUnusedField) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuery(fromjson("{a: 1, b: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {b:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1,c:1}}}}}");
- }
-
- // SERVER-12825
- TEST_F(QueryPlannerTest, IntersectCompoundInsteadUnusedField2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("c" << 1 << "d" << 1));
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuery(fromjson("{a: 1, c: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {c:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {c:1,d:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1,c:1}}}}}");
- }
-
- //
- // Test that we add a KeepMutations when we should and and we don't add one when we shouldn't.
- //
-
- // Collection scan doesn't keep any state, so it can't produce flagged data.
- TEST_F(QueryPlannerTest, NoMutationsForCollscan) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- runQuery(fromjson(""));
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Collscan + sort doesn't produce flagged data either.
- TEST_F(QueryPlannerTest, NoMutationsForSort) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- runQuerySortProj(fromjson(""), fromjson("{a:1}"), BSONObj());
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- }
-
- // An index scan + fetch requires a keep node as it can flag data. Also make sure we put it in
- // the right place, under the sort.
- TEST_F(QueryPlannerTest, MutationsFromFetch) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: 5}"), fromjson("{b:1}"), BSONObj());
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {keep: {node: "
- "{fetch: {node: {ixscan: {pattern: {a:1}}}}}}}}}");
- }
-
- // Index scan w/covering doesn't require a keep node as there's no fetch.
- TEST_F(QueryPlannerTest, NoFetchNoKeep) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- addIndex(BSON("x" << 1));
- // query, sort, proj
- runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{_id: 0, x: 1}"));
-
- // cscan is a soln but we override the params that say to include it.
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{proj: {spec: {_id: 0, x: 1}, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- // No keep with geoNear.
- TEST_F(QueryPlannerTest, NoKeepWithGeoNear) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{geoNear2d: {a: '2d'}}");
- }
-
- // No keep when we have an indexed sort.
- TEST_F(QueryPlannerTest, NoKeepWithIndexedSort) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"),
- BSON("b" << 1), BSONObj(), 0, 1);
-
- // cscan solution exists but we didn't turn on the "always include a collscan."
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}");
- }
-
- // Make sure a top-level $or hits the limiting number
- // of solutions that we are willing to consider.
- TEST_F(QueryPlannerTest, OrEnumerationLimit) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // 6 $or clauses, each with 2 indexed predicates
- // means 2^6 = 64 possibilities. We should hit the limit.
- runQuery(fromjson("{$or: [{a: 1, b: 1},"
- "{a: 2, b: 2},"
- "{a: 3, b: 3},"
- "{a: 4, b: 4},"
- "{a: 5, b: 5},"
- "{a: 6, b: 6}]}"));
-
- assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
- }
-
- TEST_F(QueryPlannerTest, OrEnumerationLimit2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1));
-
- // 3 $or clauses, and a few other preds. Each $or clause can
- // generate up to the max number of allowed $or enumerations.
- runQuery(fromjson("{$or: [{a: 1, b: 1, c: 1, d: 1},"
- "{a: 2, b: 2, c: 2, d: 2},"
- "{a: 3, b: 3, c: 3, d: 3}]}"));
-
- assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
- }
-
- // SERVER-13104: test that we properly enumerate all solutions for nested $or.
- TEST_F(QueryPlannerTest, EnumerateNestedOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
-
- runQuery(fromjson("{d: 1, $or: [{a: 1, b: 1}, {c: 1}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
- "{ixscan: {pattern: {c: 1}}}]}}}}");
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
- "{ixscan: {pattern: {c: 1}}}]}}}}");
- }
-
- // SERVER-13104: test that we properly enumerate all solutions for nested $or.
- TEST_F(QueryPlannerTest, EnumerateNestedOr2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1));
- addIndex(BSON("e" << 1));
- addIndex(BSON("f" << 1));
-
- runQuery(fromjson("{a: 1, b: 1, $or: [{c: 1, d: 1}, {e: 1, f: 1}]}"));
-
- assertNumSolutions(6U);
-
- // Four possibilities from indexing the $or.
- assertSolutionExists("{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {d: 1}, node: {ixscan: {pattern: {c: 1}}}}},"
- "{fetch: {filter: {f: 1}, node: {ixscan: {pattern: {e: 1}}}}}"
- "]}}}}");
- assertSolutionExists("{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}},"
- "{fetch: {filter: {f: 1}, node: {ixscan: {pattern: {e: 1}}}}}"
- "]}}}}");
- assertSolutionExists("{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {d: 1}, node: {ixscan: {pattern: {c: 1}}}}},"
- "{fetch: {filter: {e: 1}, node: {ixscan: {pattern: {f: 1}}}}}"
- "]}}}}");
- assertSolutionExists("{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}},"
- "{fetch: {filter: {e: 1}, node: {ixscan: {pattern: {f: 1}}}}}"
- "]}}}}");
-
- // Two possibilties from outside the $or.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {b: 1}}}}}");
- }
-
- //
- // Test the "split limited sort stages" hack.
- //
-
- TEST_F(QueryPlannerTest, SplitLimitedSort) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQuerySortProjSkipLimit(fromjson("{a: 1}"), fromjson("{b: 1}"),
- BSONObj(), 0, 3);
-
- assertNumSolutions(2U);
- // First solution has no blocking stage; no need to split.
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}");
- // Second solution has a blocking sort with a limit: it gets split and
- // joined with an OR stage.
- assertSolutionExists("{or: {nodes: ["
- "{sort: {pattern: {b: 1}, limit: 3, node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}, "
- "{sort: {pattern: {b: 1}, limit: 0, node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}]}}");
- }
-
- // The same query run as a find command with a limit should not require the "split limited sort"
- // hack.
- TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommand) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, limit: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{limit: {n: 3, node: {fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}}}");
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 3, node: {fetch: {filter: null,"
- "node: {ixscan: {pattern: {a: 1}}}}}}}");
- }
-
- // Same query run as a find command with a batchSize rather than a limit should not require
- // the "split limited sort" hack, and should not have any limit represented inside the plan.
- TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommandBatchSize) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, batchSize: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {a: 1}, node: {ixscan: "
- "{filter: null, pattern: {b: 1}}}}}");
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {fetch: {filter: null,"
- "node: {ixscan: {pattern: {a: 1}}}}}}}");
- }
-
- //
- // Test shard filter query planning
- //
-
- TEST_F(QueryPlannerTest, ShardFilterCollScan) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1));
-
- runQuery(fromjson("{b: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sharding_filter: {node: "
- "{cscan: {dir: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterBasicIndex) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQuery(fromjson("{b: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sharding_filter: {node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {b: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterBasicCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1));
-
- runQuery(fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: "
- "{sharding_filter: {node: "
- "{ixscan: {pattern: {a: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterBasicProjCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, type: 'coveredIndex', node: "
- "{sharding_filter: {node: "
- "{ixscan: {pattern: {a: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterCompoundProjCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1 << "b" << 1);
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id: 0, a: 1, b: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1 }, type: 'coveredIndex', node: "
- "{sharding_filter: {node: "
- "{ixscan: {pattern: {a: 1, b: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterNestedProjNotCovered) {
- // Nested projections can't be covered currently, though the shard key filter shouldn't need
- // to fetch.
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1 << "b.c" << 1);
- addIndex(BSON("a" << 1 << "b.c" << 1));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id: 0, a: 1, 'b.c': 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, 'b.c': 1 }, type: 'default', node: "
- "{fetch: {node: "
- "{sharding_filter: {node: "
- "{ixscan: {pattern: {a: 1, 'b.c': 1}}}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterHashProjNotCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << "hashed");
- addIndex(BSON("a" << "hashed"));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0,a: 1}, type: 'simple', node: "
- "{sharding_filter : {node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {a: 'hashed'}}}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterKeyPrefixIndexCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1 << "b" << 1 << "_id" << 1));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{a : 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {a: 1}, type: 'coveredIndex', node: "
- "{sharding_filter : {node: "
- "{ixscan: {pattern: {a: 1, b: 1, _id: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterNoIndexNotCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << "hashed");
- addIndex(BSON("b" << 1));
-
- runQuerySortProj(fromjson("{b: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0,a: 1}, type: 'simple', node: "
- "{sharding_filter : {node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {b: 1}}}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CannotTrimIxisectParam) {
- params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
- params.options |= QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQuery(fromjson("{a: 1, b: 1, c: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {b: 1, c: 1}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- assertSolutionExists("{fetch: {filter: {a: 1, c: 1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1,b:1,c:1}, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, CannotTrimIxisectParamBeneathOr) {
- params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
- params.options |= QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
-
- runQuery(fromjson("{d: 1, $or: [{a: 1}, {b: 1, c: 1}]}"));
-
- assertNumSolutions(3U);
-
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {c: 1}, node: {ixscan: {filter: null,"
- "pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}},"
- "{ixscan: {filter: null, pattern: {a: 1},"
- "bounds: {a: [[1,1,true,true]]}}}]}}}}");
-
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {b: 1}, node: {ixscan: {filter: null,"
- "pattern: {c: 1}, bounds: {c: [[1,1,true,true]]}}}}},"
- "{ixscan: {filter: null, pattern: {a: 1},"
- "bounds: {a: [[1,1,true,true]]}}}]}}}}");
-
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {b: 1, c: 1}, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {b: 1}}},"
- "{ixscan: {filter: null, pattern: {c: 1}}}]}}}},"
- "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, CannotTrimIxisectAndHashWithOrChild) {
- params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
- params.options |= QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
-
- runQuery(fromjson("{c: 1, $or: [{a: 1}, {b: 1, d: 1}]}"));
-
- assertNumSolutions(3U);
-
- assertSolutionExists("{fetch: {filter: {c: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {d: 1}, node: {ixscan: {filter: null,"
- "pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}},"
- "{ixscan: {filter: null, pattern: {a: 1},"
- "bounds: {a: [[1,1,true,true]]}}}]}}}}");
-
- assertSolutionExists("{fetch: {filter: {$or:[{b:1,d:1},{a:1}]}, node:"
- "{ixscan: {filter: null, pattern: {c: 1}}}}}");
-
- assertSolutionExists("{fetch: {filter: {c:1,$or:[{a:1},{b:1,d:1}]}, node:{andHash:{nodes:["
- "{or: {nodes: ["
- "{fetch: {filter: {d:1}, node: {ixscan: {pattern: {b: 1}}}}},"
- "{ixscan: {filter: null, pattern: {a: 1}}}]}},"
- "{ixscan: {filter: null, pattern: {c: 1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, CannotTrimIxisectParamSelfIntersection) {
- params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
- params.options = QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- // true means multikey
- addIndex(BSON("a" << 1), true);
-
- runQuery(fromjson("{a: {$all: [1, 2, 3]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {$and: [{a:2}, {a:3}]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1},"
- "bounds: {a: [[1,1,true,true]]}}},"
- "{ixscan: {filter: null, pattern: {a:1},"
- "bounds: {a: [[2,2,true,true]]}}},"
- "{ixscan: {filter: null, pattern: {a:1},"
- "bounds: {a: [[3,3,true,true]]}}}]}}}}");
- }
-
-
- // If a lookup against a unique index is available as a possible plan, then the planner
- // should not generate other possibilities.
- TEST_F(QueryPlannerTest, UniqueIndexLookup) {
- params.options = QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQuery(fromjson("{a: 1, b: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a: 1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintOnNonUniqueIndex) {
- params.options = QueryPlannerParams::INDEX_INTERSECTION;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQueryHint(fromjson("{a: 1, b: 1}"), BSON("a" << 1));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {b: 1}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, UniqueIndexLookupBelowOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQuery(fromjson("{$or: [{a: 1, b: 1}, {c: 1, d: 1}]}"));
-
- // Only two plans because we throw out plans for the right branch of the $or that do not
- // use equality over the unique index.
- assertNumSolutions(2U);
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}]}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}]}}");
- }
-
- TEST_F(QueryPlannerTest, UniqueIndexLookupBelowOrBelowAnd) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQuery(fromjson("{e: 1, $or: [{a: 1, b: 1}, {c: 1, d: 1}]}"));
-
- // Only two plans because we throw out plans for the right branch of the $or that do not
- // use equality over the unique index.
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {e: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}"
- "]}}}}");
- assertSolutionExists("{fetch: {filter: {e: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}"
- "]}}}}");
- }
-
- TEST_F(QueryPlannerTest, CoveredOrUniqueIndexLookup) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("a" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQuerySortProj(fromjson("{a: 1, b: 1}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- //
- // Test bad input to query planner helpers.
- //
-
- TEST(BadInputTest, CacheDataFromTaggedTree) {
- PlanCacheIndexTree* indexTree;
-
- // Null match expression.
- std::vector<IndexEntry> relevantIndices;
- Status s = QueryPlanner::cacheDataFromTaggedTree(NULL, relevantIndices, &indexTree);
- ASSERT_NOT_OK(s);
- ASSERT(NULL == indexTree);
-
- // No relevant index matching the index tag.
- relevantIndices.push_back(IndexEntry(BSON("a" << 1)));
-
- CanonicalQuery *cq;
- Status cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
- ASSERT_OK(cqStatus);
- std::unique_ptr<CanonicalQuery> scopedCq(cq);
- scopedCq->root()->setTag(new IndexTag(1));
-
- s = QueryPlanner::cacheDataFromTaggedTree(scopedCq->root(), relevantIndices, &indexTree);
- ASSERT_NOT_OK(s);
- ASSERT(NULL == indexTree);
- }
-
- TEST(BadInputTest, TagAccordingToCache) {
- CanonicalQuery *cq;
- Status cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
- ASSERT_OK(cqStatus);
- std::unique_ptr<CanonicalQuery> scopedCq(cq);
-
- std::unique_ptr<PlanCacheIndexTree> indexTree(new PlanCacheIndexTree());
- indexTree->setIndexEntry(IndexEntry(BSON("a" << 1)));
-
- std::map<BSONObj, size_t> indexMap;
-
- // Null filter.
- Status s = QueryPlanner::tagAccordingToCache(NULL, indexTree.get(), indexMap);
- ASSERT_NOT_OK(s);
-
- // Null indexTree.
- s = QueryPlanner::tagAccordingToCache(scopedCq->root(), NULL, indexMap);
- ASSERT_NOT_OK(s);
-
- // Index not found.
- s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
- ASSERT_NOT_OK(s);
-
- // Index found once added to the map.
- indexMap[BSON("a" << 1)] = 0;
- s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
- ASSERT_OK(s);
-
- // Regenerate canonical query in order to clear tags.
- cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
- ASSERT_OK(cqStatus);
- scopedCq.reset(cq);
-
- // Mismatched tree topology.
- PlanCacheIndexTree* child = new PlanCacheIndexTree();
- child->setIndexEntry(IndexEntry(BSON("a" << 1)));
- indexTree->children.push_back(child);
- s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
- ASSERT_NOT_OK(s);
- }
+ runQuery(fromjson("{b: {$not: {$exists: true}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {b: 1}, bounds: "
+ "{b: [[null, null, true, true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsBoundsCompound) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuery(fromjson("{a: 1, b: {$exists: true}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: true}}, node: "
+ "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
+
+ // This ends up being a double negation, which we currently don't index.
+ runQuery(fromjson("{a: 1, b: {$not: {$exists: false}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
+
+ runQuery(fromjson("{a: 1, b: {$exists: false}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
+
+ runQuery(fromjson("{a: 1, b: {$not: {$exists: true}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
+}
+
+//
+// skip and limit
+//
+
+TEST_F(QueryPlannerTest, BasicSkipNoIndex) {
+ addIndex(BSON("a" << 1));
+
+ runQuerySkipLimit(BSON("x" << 5), 3, 0);
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{skip: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSkipWithIndex) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySkipLimit(BSON("a" << 5), 8, 0);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{skip: {n: 8, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{skip: {n: 8, node: {fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicLimitNoIndex) {
+ addIndex(BSON("a" << 1));
+
+ runQuerySkipLimit(BSON("x" << 5), 0, -3);
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{limit: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSoftLimitNoIndex) {
+ addIndex(BSON("a" << 1));
+
+ runQuerySkipLimit(BSON("x" << 5), 0, 3);
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicLimitWithIndex) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySkipLimit(BSON("a" << 5), 0, -5);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{limit: {n: 5, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{limit: {n: 5, node: {fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSoftLimitWithIndex) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySkipLimit(BSON("a" << 5), 0, 5);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: 5}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SkipAndLimit) {
+ addIndex(BSON("x" << 1));
+
+ runQuerySkipLimit(BSON("x" << BSON("$lte" << 4)), 7, -2);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{limit: {n: 2, node: {skip: {n: 7, node: "
+ "{cscan: {dir: 1, filter: {x: {$lte: 4}}}}}}}}");
+ assertSolutionExists(
+ "{limit: {n: 2, node: {skip: {n: 7, node: {fetch: "
+ "{filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SkipAndSoftLimit) {
+ addIndex(BSON("x" << 1));
+
+ runQuerySkipLimit(BSON("x" << BSON("$lte" << 4)), 7, 2);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{skip: {n: 7, node: "
+ "{cscan: {dir: 1, filter: {x: {$lte: 4}}}}}}");
+ assertSolutionExists(
+ "{skip: {n: 7, node: {fetch: "
+ "{filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}}}");
+}
+
+//
+// tree operations
+//
+
+TEST_F(QueryPlannerTest, TwoPredicatesAnding) {
+ addIndex(BSON("x" << 1));
+
+ runQuery(fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SimpleOr) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: 20}, {a: 21}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 20}, {a: 21}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a:1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrWithoutEnoughIndices) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: 20}, {b: 21}]}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 20}, {b: 21}]}}}");
+}
+
+TEST_F(QueryPlannerTest, OrWithAndChild) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1}}}, "
+ "{fetch: {filter: {b: 7}, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, AndWithUnindexedOrChild) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a:20, $or: [{b:1}, {c:7}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Logical rewrite means we could get one of these two outcomes:
+ size_t matches = 0;
+ matches += numSolutionMatches(
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ matches += numSolutionMatches(
+ "{or: {filter: null, nodes: ["
+ "{fetch: {filter: {b:1}, node: {"
+ "ixscan: {filter: null, pattern: {a:1}}}}},"
+ "{fetch: {filter: {c:7}, node: {"
+ "ixscan: {filter: null, pattern: {a:1}}}}}]}}");
+ ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
+}
+
+
+TEST_F(QueryPlannerTest, AndWithOrWithOneIndex) {
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{b:1}, {c:7}], a:20}"));
+
+ // Logical rewrite gives us at least one of these:
+ assertSolutionExists("{cscan: {dir: 1}}");
+ size_t matches = 0;
+ matches += numSolutionMatches(
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ matches += numSolutionMatches(
+ "{or: {filter: null, nodes: ["
+ "{fetch: {filter: {b:1}, node: {"
+ "ixscan: {filter: null, pattern: {a:1}}}}},"
+ "{fetch: {filter: {c:7}, node: {"
+ "ixscan: {filter: null, pattern: {a:1}}}}}]}}");
+ ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
+}
+
+//
+// Additional $or tests
+//
+
+TEST_F(QueryPlannerTest, OrCollapsesToSingleScan) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$gt:2}}, {a:{$gt:0}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[0,Infinity,false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrCollapsesToSingleScan2) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$lt:2}}, {a:{$lt:4}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[-Infinity,4,true,false]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrCollapsesToSingleScan3) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(fromjson("{$or: [{a:1},{a:3}]}"), fromjson("{a:1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[1,1,true,true], [3,3,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrOnlyOneBranchCanUseIndex) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:1}, {b:2}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, OrOnlyOneBranchCanUseIndexHinted) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(fromjson("{$or: [{a:1}, {b:2}]}"), fromjson("{a:1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {$or:[{a:1},{b:2}]}, node: {ixscan: "
+ "{pattern: {a:1}, bounds: "
+ "{a: [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrNaturalHint) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(fromjson("{$or: [{a:1}, {a:3}]}"), fromjson("{$natural:1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// SERVER-13714. A non-top-level indexable negation exposed a bug in plan enumeration.
+TEST_F(QueryPlannerTest, NonTopLevelIndexedNegation) {
+ addIndex(BSON("state" << 1));
+ addIndex(BSON("is_draft" << 1));
+ addIndex(BSON("published_date" << 1));
+ addIndex(BSON("newsroom_id" << 1));
+
+ BSONObj queryObj = fromjson(
+ "{$and:[{$or:[{is_draft:false},{creator_id:1}]},"
+ "{$or:[{state:3,is_draft:false},"
+ "{published_date:{$ne:null}}]},"
+ "{newsroom_id:{$in:[1]}}]}");
+ runQuery(queryObj);
+}
+
+TEST_F(QueryPlannerTest, NonTopLevelIndexedNegationMinQuery) {
+ addIndex(BSON("state" << 1));
+ addIndex(BSON("is_draft" << 1));
+ addIndex(BSON("published_date" << 1));
+
+ // This is the min query to reproduce SERVER-13714
+ BSONObj queryObj = fromjson("{$or:[{state:1, is_draft:1}, {published_date:{$ne: 1}}]}");
+ runQuery(queryObj);
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:5}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a:1}, bounds: {a: [[2,10,false,false]]}}}, "
+ "{ixscan: {pattern: {a:1}, bounds: "
+ "{a: [[0,5,false,false]]}}}]}}}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd2) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:15}}, {a:{$gt:20}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a:1}, bounds: {a: [[2,10,false,false]]}}}, "
+ "{ixscan: {pattern: {a:1}, bounds: {a: [[0,15,false,false]]}}}, "
+ "{ixscan: {pattern: {a:1}, bounds: "
+ "{a: [[20,Infinity,false,true]]}}}]}}}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd3) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$gt:1,$lt:5},b:6}, {a:3,b:{$gt:0,$lt:10}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {b:6}, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[1,5,false,false]]}}}}}, "
+ "{fetch: {filter: {$and:[{b:{$lt:10}},{b:{$gt:0}}]}, node: "
+ "{ixscan: {pattern: {a:1}, bounds: {a:[[3,3,true,true]]}}}}}]}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd4) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson(
+ "{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
+ "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {c:6}, node: {ixscan: {pattern: {a:1,b:1}, "
+ "bounds: {a: [[1,5,false,false]], b: [[0,3,false,false]]}}}}}, "
+ "{fetch: {filter: {$and:[{c:{$lt:10}},{c:{$gt:0}}]}, node: "
+ "{ixscan: {pattern: {a:1,b:1}, "
+ " bounds: {a:[[3,3,true,true]], b:[[1,2,false,false]]}}}}}]}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd5) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson(
+ "{$or: [{a:{$gt:1,$lt:5}, c:6}, "
+ "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {c:6}, node: {ixscan: {pattern: {a:1,b:1}, "
+ "bounds: {a: [[1,5,false,false]], "
+ "b: [['MinKey','MaxKey',true,true]]}}}}}, "
+ "{fetch: {filter: {$and:[{c:{$lt:10}},{c:{$gt:0}}]}, node: "
+ "{ixscan: {pattern: {a:1,b:1}, "
+ " bounds: {a:[[3,3,true,true]], b:[[1,2,false,false]]}}}}}]}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd6) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{$or: [{a:{$in:[1]},b:{$in:[1]}}, {a:{$in:[1,5]},b:{$in:[1,5]}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[1,1,true,true]]}}}, "
+ "{ixscan: {pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true], [5,5,true,true]], "
+ " b: [[1,1,true,true], [5,5,true,true]]}}}]}}}}");
+}
+
+// SERVER-13960: properly handle $or with a mix of exact and inexact predicates.
+TEST_F(QueryPlannerTest, OrInexactWithExact) {
+ addIndex(BSON("name" << 1));
+ runQuery(fromjson("{$or: [{name: 'thomas'}, {name: /^alexand(er|ra)/}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter:"
+ "{$or: [{name: 'thomas'}, {name: /^alexand(er|ra)/}]},"
+ "pattern: {name: 1}}}}}");
+}
+
+// SERVER-13960: multiple indices, each with an inexact covered predicate.
+TEST_F(QueryPlannerTest, OrInexactWithExact2) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson("{$or: [{a: 'foo'}, {a: /bar/}, {b: 'foo'}, {b: /bar/}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {or: {nodes: ["
+ "{ixscan: {filter: {$or:[{a:'foo'},{a:/bar/}]},"
+ "pattern: {a: 1}}},"
+ "{ixscan: {filter: {$or:[{b:'foo'},{b:/bar/}]},"
+ "pattern: {b: 1}}}]}}}}");
+}
+
+// SERVER-13960: an exact, inexact covered, and inexact fetch predicate.
+TEST_F(QueryPlannerTest, OrAllThreeTightnesses) {
+ addIndex(BSON("names" << 1));
+ runQuery(fromjson(
+ "{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: "
+ "{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}, "
+ "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
+}
+
+// SERVER-13960: two inexact fetch predicates.
+TEST_F(QueryPlannerTest, OrTwoInexactFetch) {
+ // true means multikey
+ addIndex(BSON("names" << 1), true);
+ runQuery(fromjson(
+ "{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: "
+ "{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}, "
+ "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
+}
+
+// SERVER-13960: multikey with exact and inexact covered predicates.
+TEST_F(QueryPlannerTest, OrInexactCoveredMultikey) {
+ // true means multikey
+ addIndex(BSON("names" << 1), true);
+ runQuery(fromjson("{$or: [{names: 'dave'}, {names: /joe/}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$or: [{names: 'dave'}, {names: /joe/}]}, "
+ "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
+}
+
+// SERVER-13960: $elemMatch object with $or.
+TEST_F(QueryPlannerTest, OrElemMatchObject) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson(
+ "{$or: [{a: {$elemMatch: {b: {$lte: 1}}}},"
+ "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {a:{$elemMatch:{b:{$gte:4}}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}},"
+ "{fetch: {filter: {a:{$elemMatch:{b:{$lte:1}}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}]}}");
+}
+
+// SERVER-13960: $elemMatch object inside an $or, below an AND.
+TEST_F(QueryPlannerTest, OrElemMatchObjectBeneathAnd) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson(
+ "{$or: [{'a.b': 0, a: {$elemMatch: {b: {$lte: 1}}}},"
+ "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {$and:[{a:{$elemMatch:{b:{$lte:1}}}},{'a.b':0}]},"
+ "node: {ixscan: {filter: null, pattern: {'a.b': 1}, "
+ "bounds: {'a.b': [[-Infinity,1,true,true]]}}}}},"
+ "{fetch: {filter: {a:{$elemMatch:{b:{$gte:4}}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1},"
+ "bounds: {'a.b': [[4,Infinity,true,true]]}}}}}]}}");
+}
+
+// SERVER-13960: $or below $elemMatch with an inexact covered predicate.
+TEST_F(QueryPlannerTest, OrBelowElemMatchInexactCovered) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}},"
+ "node: {ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+// SERVER-13960: $in with exact and inexact covered predicates.
+TEST_F(QueryPlannerTest, OrWithExactAndInexact) {
+ addIndex(BSON("name" << 1));
+ runQuery(fromjson("{name: {$in: ['thomas', /^alexand(er|ra)/]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: {name: {$in: ['thomas', /^alexand(er|ra)/]}}, "
+ "pattern: {name: 1}}}}}");
+}
+
+// SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates.
+TEST_F(QueryPlannerTest, OrWithExactAndInexact2) {
+ addIndex(BSON("name" << 1));
+ runQuery(fromjson(
+ "{$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
+ "{name: {$exists: false}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
+ "{name: {$exists: false}}]}, "
+ "node: {ixscan: {filter: null, pattern: {name: 1}}}}}");
+}
+
+// SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates
+// over two indices.
+TEST_F(QueryPlannerTest, OrWithExactAndInexact3) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson(
+ "{$or: [{a: {$in: [/z/, /x/]}}, {a: 'w'},"
+ "{b: {$exists: false}}, {b: {$in: ['p']}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: {$or:[{a:{$in:[/z/, /x/]}}, {a:'w'}]}, "
+ "pattern: {a: 1}}}, "
+ "{fetch: {filter: {$or:[{b:{$exists:false}}, {b:{$in:['p']}}]},"
+ "node: {ixscan: {filter: null, pattern: {b: 1}}}}}]}}}}");
+}
+
+//
+// Min/Max
+//
+
+TEST_F(QueryPlannerTest, MinValid) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MinWithoutIndex) {
+ runInvalidQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
+}
+
+TEST_F(QueryPlannerTest, MinBadHint) {
+ addIndex(BSON("b" << 1));
+ runInvalidQueryHintMinMax(BSONObj(), fromjson("{b: 1}"), fromjson("{a: 1}"), BSONObj());
+}
+
+TEST_F(QueryPlannerTest, MaxValid) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MinMaxSameValue) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxWithoutIndex) {
+ runInvalidQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
+}
+
+TEST_F(QueryPlannerTest, MaxBadHint) {
+ addIndex(BSON("b" << 1));
+ runInvalidQueryHintMinMax(BSONObj(), fromjson("{b: 1}"), BSONObj(), fromjson("{a: 1}"));
+}
+
+TEST_F(QueryPlannerTest, MaxMinSort) {
+ addIndex(BSON("a" << 1));
+
+ // Run an empty query, sort {a: 1}, max/min arguments.
+ runQueryFull(BSONObj(),
+ fromjson("{a: 1}"),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 2}"),
+ fromjson("{a: 8}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinReverseSort) {
+ addIndex(BSON("a" << 1));
+
+ // Run an empty query, sort {a: -1}, max/min arguments.
+ runQueryFull(BSONObj(),
+ fromjson("{a: -1}"),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 2}"),
+ fromjson("{a: 8}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: -1, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinReverseIndexDir) {
+ addIndex(BSON("a" << -1));
+
+ // Because the index is descending, the min is numerically larger than the max.
+ runQueryFull(BSONObj(),
+ fromjson("{a: -1}"),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 8}"),
+ fromjson("{a: 2}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: -1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinReverseIndexDirSort) {
+ addIndex(BSON("a" << -1));
+
+ // Min/max specifies a forward scan with bounds [{a: 8}, {a: 2}]. Asking for
+ // an ascending sort reverses the direction of the scan to [{a: 2}, {a: 8}].
+ runQueryFull(BSONObj(),
+ fromjson("{a: 1}"),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 8}"),
+ fromjson("{a: 2}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter: null, dir: -1,"
+ "pattern: {a: -1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinNoMatchingIndexDir) {
+ addIndex(BSON("a" << -1));
+ runInvalidQueryHintMinMax(BSONObj(), fromjson("{a: 2}"), BSONObj(), fromjson("{a: 8}"));
+}
+
+TEST_F(QueryPlannerTest, MaxMinSelectCorrectlyOrderedIndex) {
+ // There are both ascending and descending indices on 'a'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << -1));
+
+ // The ordering of min and max means that we *must* use the descending index.
+ runQueryFull(BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 8}"),
+ fromjson("{a: 2}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: -1}}}}}");
+
+ // If we switch the ordering, then we use the ascending index.
+ // The ordering of min and max means that we *must* use the descending index.
+ runQueryFull(BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 2}"),
+ fromjson("{a: 8}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinBadHintSelectsReverseIndex) {
+ // There are both ascending and descending indices on 'a'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << -1));
+
+ // A query hinting on {a: 1} is bad if min is {a: 8} and {a: 2} because this
+ // min/max pairing requires a descending index.
+ runInvalidQueryFull(BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ 0,
+ 0,
+ fromjson("{a: 1}"),
+ fromjson("{a: 8}"),
+ fromjson("{a: 2}"),
+ false);
+}
+
+
+//
+// $snapshot
+//
+
+TEST_F(QueryPlannerTest, Snapshot) {
+ addIndex(BSON("a" << 1));
+ runQuerySnapshot(fromjson("{a: {$gt: 0}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:0}}, node: "
+ "{ixscan: {filter: null, pattern: {_id: 1}}}}}");
+}
+
+//
+// Tree operations that require simple tree rewriting.
+//
+
+TEST_F(QueryPlannerTest, AndOfAnd) {
+ addIndex(BSON("x" << 1));
+ runQuery(fromjson("{$and: [ {$and: [ {x: 2.5}]}, {x: {$gt: 1}}, {x: {$lt: 3}} ] }"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+//
+// Logically equivalent queries
+//
+
+TEST_F(QueryPlannerTest, EquivalentAndsOne) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{$and: [{a: 1}, {b: {$all: [10, 20]}}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{a:1},{b:10},{b:20}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, EquivalentAndsTwo) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{$and: [{a: 1, b: 10}, {a: 1, b: 20}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{a:1},{a:1},{b:10},{b:20}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+//
+// Covering
+//
+
+TEST_F(QueryPlannerTest, BasicCovering) {
+ addIndex(BSON("x" << 1));
+ // query, sort, proj
+ runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{_id: 0, x: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, x: 1}, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, x: 1}, node: "
+ "{cscan: {dir: 1, filter: {x:{$gt:1}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, DottedFieldCovering) {
+ addIndex(BSON("a.b" << 1));
+ runQuerySortProj(fromjson("{'a.b': 5}"), BSONObj(), fromjson("{_id: 0, 'a.b': 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, 'a.b': 1}, node: "
+ "{cscan: {dir: 1, filter: {'a.b': 5}}}}}");
+ // SERVER-2104
+ // assertSolutionExists("{proj: {spec: {_id: 0, 'a.b': 1}, node: {'a.b': 1}}}");
+}
+
+TEST_F(QueryPlannerTest, IdCovering) {
+ runQuerySortProj(fromjson("{_id: {$gt: 10}}"), BSONObj(), fromjson("{_id: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 1}, node: "
+ "{cscan: {dir: 1, filter: {_id: {$gt: 10}}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 1}, node: {ixscan: "
+ "{filter: null, pattern: {_id: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ProjNonCovering) {
+ addIndex(BSON("x" << 1));
+ runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{x: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {x: 1}, node: {cscan: "
+ "{dir: 1, filter: {x: {$gt: 1}}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {x: 1}, node: {fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {x: 1}}}}}}}");
+}
+
+//
+// Basic sort
+//
+
+TEST_F(QueryPlannerTest, BasicSort) {
+ addIndex(BSON("x" << 1));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSort) {
+ addIndex(BSON("x"
+ << "hashed"));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSortWithIndexablePred) {
+ addIndex(BSON("x"
+ << "hashed"));
+ runQuerySortProj(BSON("x" << BSON("$in" << BSON_ARRAY(0 << 1))), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {x: 'hashed'}}}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{cscan: {dir: 1, filter: {x: {$in: [0, 1]}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseTextIndexToProvideSort) {
+ addIndex(BSON("x" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSortWithIndexablePred) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortProj(fromjson("{ a : 5 }"), BSON("b" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, "
+ "node: {fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 5}, node: {ixscan: "
+ "{filter: null, pattern: {b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSortBooleanIndexKeyPattern) {
+ addIndex(BSON("a" << true));
+ runQuerySortProj(fromjson("{ a : 5 }"), BSON("a" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {a: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: true}}}}}");
+}
+
+// SERVER-14070
+TEST_F(QueryPlannerTest, CompoundIndexWithEqualityPredicatesProvidesSort) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{a: 1, b: 1}"), fromjson("{b: 1}"), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null,"
+ "pattern: {a: 1, b: 1}, "
+ "bounds: {a:[[1,1,true,true]], b:[[1,1,true,true]]}}}}}");
+}
+
+//
+// Sort with limit and/or skip
+//
+
+TEST_F(QueryPlannerTest, SortLimit) {
+ // Negative limit indicates hard limit - see lite_parsed_query.cpp
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, -3);
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {a: 1}, limit: 3, "
+ "node: {cscan: {dir: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortSkip) {
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 0);
+ assertNumSolutions(1U);
+ // If only skip is provided, do not limit sort.
+ assertSolutionExists(
+ "{skip: {n: 2, node: "
+ "{sort: {pattern: {a: 1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortSkipLimit) {
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, -3);
+ assertNumSolutions(1U);
+ // Limit in sort node should be adjusted by skip count
+ assertSolutionExists(
+ "{skip: {n: 2, node: "
+ "{sort: {pattern: {a: 1}, limit: 5, "
+ "node: {cscan: {dir: 1}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortSoftLimit) {
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 3);
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {a: 1}, limit: 3, "
+ "node: {cscan: {dir: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortSkipSoftLimit) {
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 3);
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{skip: {n: 2, node: "
+ "{sort: {pattern: {a: 1}, limit: 5, "
+ "node: {cscan: {dir: 1}}}}}}");
+}
+
+//
+// Sort elimination
+//
+
+TEST_F(QueryPlannerTest, BasicSortElim) {
+ addIndex(BSON("x" << 1));
+ // query, sort, proj
+ runQuerySortProj(fromjson("{ x : {$gt: 1}}"), fromjson("{x: 1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {x: {$gt: 1}}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortElimCompound) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{ a : 5 }"), BSON("b" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+// SERVER-13611: test that sort elimination still works if there are
+// trailing fields in the index.
+TEST_F(QueryPlannerTest, SortElimTrailingFields) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuerySortProj(fromjson("{a: 5}"), BSON("b" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1, c: 1}}}}}");
+}
+
+// Sort elimination with trailing fields where the sort direction is descending.
+TEST_F(QueryPlannerTest, SortElimTrailingFieldsReverse) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
+ runQuerySortProj(fromjson("{a: 5, b: 6}"), BSON("c" << -1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {c: -1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5, b: 6}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, dir: -1, pattern: {a: 1, b: 1, c: 1, d: 1}}}}}");
+}
+
+//
+// Basic compound
+//
+
+TEST_F(QueryPlannerTest, BasicCompound) {
+ addIndex(BSON("x" << 1 << "y" << 1));
+ runQuery(fromjson("{ x : 5, y: 10}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1, y: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMissingField) {
+ addIndex(BSON("x" << 1 << "y" << 1 << "z" << 1));
+ runQuery(fromjson("{ x : 5, z: 10}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {x: 1, y: 1, z: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundFieldsOrder) {
+ addIndex(BSON("x" << 1 << "y" << 1 << "z" << 1));
+ runQuery(fromjson("{ x : 5, z: 10, y:1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1, y: 1, z: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseCompound) {
+ addIndex(BSON("x" << 1 << "y" << 1));
+ runQuery(fromjson("{ y: 10}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {y: 10}}}");
+}
+
+//
+// $in
+//
+
+TEST_F(QueryPlannerTest, InBasic) {
+ addIndex(fromjson("{a: 1}"));
+ runQuery(fromjson("{a: {$in: [1, 2]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: {$in: [1, 2]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1}}}}}");
+}
+
+// Logically equivalent to the preceding $in query.
+// Indexed solution should be the same.
+TEST_F(QueryPlannerTest, InBasicOrEquivalent) {
+ addIndex(fromjson("{a: 1}"));
+ runQuery(fromjson("{$or: [{a: 1}, {a: 2}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 1}, {a: 2}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, InSparseIndex) {
+ addIndex(fromjson("{a: 1}"),
+ false, // multikey
+ true); // sparse
+ runQuery(fromjson("{a: {$in: [null]}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: {$in: [null]}}}}");
+}
+
+TEST_F(QueryPlannerTest, InCompoundIndexFirst) {
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuery(fromjson("{a: {$in: [1, 2]}, b: 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {b: 3, a: {$in: [1, 2]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+
+// Logically equivalent to the preceding $in query.
+// Indexed solution should be the same.
+// Currently fails - pre-requisite to SERVER-12024
+/*
+TEST_F(QueryPlannerTest, InCompoundIndexFirstOrEquivalent) {
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuery(fromjson("{$and: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}}}");
+ assertSolutionExists("{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+*/
+
+TEST_F(QueryPlannerTest, InCompoundIndexLast) {
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuery(fromjson("{a: 3, b: {$in: [1, 2]}}"));
+
+ assertNumSolutions(2U);
+ // TODO: update filter in cscan solution when SERVER-12024 is implemented
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: 3, b: {$in: [1, 2]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+
+// Logically equivalent to the preceding $in query.
+// Indexed solution should be the same.
+// Currently fails - pre-requisite to SERVER-12024
+/*
+TEST_F(QueryPlannerTest, InCompoundIndexLastOrEquivalent) {
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuery(fromjson("{$and: [{a: 3}, {$or: [{b: 1}, {b: 2}]}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{a: 3}, {$or: [{b: 1}, {b: 2}]}]}}}");
+ assertSolutionExists("{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+*/
+
+// SERVER-1205
+TEST_F(QueryPlannerTest, InWithSort) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSON("b" << 1), BSONObj(), 0, 1);
+
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 1, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}");
+}
+
+// SERVER-1205
+TEST_F(QueryPlannerTest, InWithoutSort) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ // No sort means we don't bother to blow up the bounds.
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSONObj(), BSONObj(), 0, 1);
+
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+
+// SERVER-1205
+TEST_F(QueryPlannerTest, ManyInWithSort) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b:{$in:[1,2]}, c:{$in:[1,2]}}"),
+ BSON("d" << 1),
+ BSONObj(),
+ 0,
+ 1);
+
+ assertSolutionExists(
+ "{sort: {pattern: {d: 1}, limit: 1, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}]}}}}");
+}
+
+// SERVER-1205
+TEST_F(QueryPlannerTest, TooManyToExplode) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
+ runQuerySortProjSkipLimit(fromjson(
+ "{a: {$in: [1,2,3,4,5,6]},"
+ "b:{$in:[1,2,3,4,5,6,7,8]},"
+ "c:{$in:[1,2,3,4,5,6,7,8]}}"),
+ BSON("d" << 1),
+ BSONObj(),
+ 0,
+ 1);
+
+ // We cap the # of ixscans we're willing to create.
+ assertNumSolutions(2);
+ assertSolutionExists(
+ "{sort: {pattern: {d: 1}, limit: 1, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {d: 1}, limit: 1, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantExplodeMetaSort) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c"
+ << "text"));
+ runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"),
+ fromjson("{c: {$meta: 'textScore'}}"),
+ fromjson("{c: {$meta: 'textScore'}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {c:{$meta:'textScore'}}, node: "
+ "{sort: {pattern: {c:{$meta:'textScore'}}, limit: 0, node: "
+ "{cscan: {filter: {a:{$in:[1,2]},b:{$in:[3,4]}}, dir: 1}}}}}}");
+}
+
+// SERVER-13618: test that exploding scans for sort works even
+// if we must reverse the scan direction.
+TEST_F(QueryPlannerTest, ExplodeMustReverseScans) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
+ runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"), BSON("c" << -1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}}]}}}}");
+}
+
+// SERVER-13618
+TEST_F(QueryPlannerTest, ExplodeMustReverseScans2) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << -1));
+ runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"), BSON("c" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a:1, b:1, c:-1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:-1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:-1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:-1}}}]}}}}");
+}
+
+// SERVER-13752: don't try to explode if the ordered interval list for
+// the leading field of the compound index is empty.
+TEST_F(QueryPlannerTest, CantExplodeWithEmptyBounds) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{a: {$in: []}}"), BSON("b" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}}}");
+}
+
+// SERVER-13752
+TEST_F(QueryPlannerTest, CantExplodeWithEmptyBounds2) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuerySortProj(fromjson("{a: {$gt: 3, $lt: 0}}"), BSON("b" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: "
+ "{fetch: {node: {ixscan: {pattern: {a:1,b:1,c:1}}}}}}}");
+}
+
+// SERVER-13754: exploding an $or
+TEST_F(QueryPlannerTest, ExplodeOrForSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}, {b: 2}]}"), BSON("c" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {bounds: {a: [[1,1,true,true]], "
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {a:1, c:1}}},"
+ "{ixscan: {bounds: {a: [[2,2,true,true]], "
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {a:1, c:1}}},"
+ "{ixscan: {bounds: {b: [[2,2,true,true]], "
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {b:1, c:1}}}]}}}}");
+}
+
+// SERVER-13754: exploding an $or
+TEST_F(QueryPlannerTest, ExplodeOrForSort2) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ addIndex(BSON("d" << 1 << "c" << 1));
+
+ runQuerySortProj(
+ fromjson("{$or: [{a: 1, b: {$in: [1, 2]}}, {d: 3}]}"), BSON("c" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {bounds: {a: [[1,1,true,true]], b: [[1,1,true,true]],"
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {a:1, b:1, c:1}}},"
+ "{ixscan: {bounds: {a: [[1,1,true,true]], b: [[2,2,true,true]],"
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {a:1, b:1, c:1}}},"
+ "{ixscan: {bounds: {d: [[3,3,true,true]], "
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {d:1, c:1}}}]}}}}");
+}
+
+// SERVER-13754: an $or that can't be exploded, because one clause of the
+// $or does provide the sort, even after explosion.
+TEST_F(QueryPlannerTest, CantExplodeOrForSort) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ addIndex(BSON("d" << 1 << "c" << 1));
+
+ runQuerySortProj(fromjson("{$or: [{a: {$in: [1, 2]}}, {d: 3}]}"), BSON("c" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {c: 1}, limit: 0, node: "
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a: 1, b: 1, c: 1}}},"
+ "{ixscan: {pattern: {d: 1, c: 1}}}]}}}}}}");
+}
+
+// SERVER-15286: Make sure that at least the explodeForSort() path bails out
+// when it finds that there are no union of point interval fields to explode.
+// We could convert this into a MERGE_SORT plan, but we don't yet do this
+// optimization.
+TEST_F(QueryPlannerTest, CantExplodeOrForSort2) {
+ addIndex(BSON("a" << 1));
+
+ runQuerySortProj(fromjson("{$or: [{a: {$gt: 1, $lt: 3}}, {a: {$gt: 6, $lt: 10}}]}"),
+ BSON("a" << -1),
+ BSONObj());
+
+ assertNumSolutions(3U);
+ assertSolutionExists("{sort: {pattern: {a: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {a: -1}, limit: 0, node: "
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a: 1}, bounds: "
+ "{a: [[1,3,false,false]]}}},"
+ "{ixscan: {pattern: {a: 1}, bounds: "
+ "{a: [[6,10,false,false]]}}}]}}}}}}");
+}
+
+// SERVER-13754: too many scans in an $or explosion.
+TEST_F(QueryPlannerTest, TooManyToExplodeOr) {
+ addIndex(BSON("a" << 1 << "e" << 1));
+ addIndex(BSON("b" << 1 << "e" << 1));
+ addIndex(BSON("c" << 1 << "e" << 1));
+ addIndex(BSON("d" << 1 << "e" << 1));
+ runQuerySortProj(fromjson(
+ "{$or: [{a: {$in: [1,2,3,4,5,6]},"
+ "b: {$in: [1,2,3,4,5,6]}},"
+ "{c: {$in: [1,2,3,4,5,6]},"
+ "d: {$in: [1,2,3,4,5,6]}}]}"),
+ BSON("e" << 1),
+ BSONObj());
+
+ // We cap the # of ixscans we're willing to create, so we don't get explosion. Instead
+ // we get 5 different solutions which all use a blocking sort.
+ assertNumSolutions(5U);
+ assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {e: 1}, limit: 0, node: "
+ "{or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {a: 1, e: 1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {c: 1, e: 1}}}}}]}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {e: 1}, limit: 0, node: "
+ "{or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {b: 1, e: 1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {c: 1, e: 1}}}}}]}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {e: 1}, limit: 0, node: "
+ "{or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {a: 1, e: 1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {d: 1, e: 1}}}}}]}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {e: 1}, limit: 0, node: "
+ "{or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {b: 1, e: 1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {d: 1, e: 1}}}}}]}}}}");
+}
+
+// SERVER-15696: Make sure explodeForSort copies filters on IXSCAN stages to all of the
+// scans resulting from the explode. Regex is the easiest way to have the planner create
+// an index scan which filters using the index key.
+TEST_F(QueryPlannerTest, ExplodeIxscanWithFilter) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySortProj(fromjson(
+ "{$and: [{b: {$regex: 'foo', $options: 'i'}},"
+ "{a: {$in: [1, 2]}}]}"),
+ BSON("b" << 1),
+ BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a:1, b:1},"
+ "filter: {b: {$regex: 'foo', $options: 'i'}}}},"
+ "{ixscan: {pattern: {a:1, b:1},"
+ "filter: {b: {$regex: 'foo', $options: 'i'}}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, InWithSortAndLimitTrailingField) {
+ addIndex(BSON("a" << 1 << "b" << -1 << "c" << 1));
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b: {$gte: 0}}"),
+ fromjson("{b: -1}"),
+ BSONObj(), // no projection
+ 0, // no skip
+ -1); // .limit(1)
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{sort: {pattern: {b:-1}, limit: 1, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{limit: {n: 1, node: {fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a:1,b:-1,c:1}}}, "
+ " {ixscan: {pattern: {a:1,b:-1,c:1}}}]}}}}}}");
+}
+
+//
+// Multiple solutions
+//
+
+TEST_F(QueryPlannerTest, TwoPlans) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuery(fromjson("{a:1, b:{$gt:2,$lt:2}}"));
+
+ // 2 indexed solns and one non-indexed
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{b:{$lt:2}},{a:1},{b:{$gt:2}}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{b:{$lt:2}},{b:{$gt:2}}]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoPlansElemMatch) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("arr.x" << 1 << "a" << 1));
+
+ runQuery(fromjson(
+ "{arr: { $elemMatch : { x : 5 , y : 5 } },"
+ " a : 55 , b : { $in : [ 1 , 5 , 8 ] } }"));
+
+ // 2 indexed solns and one non-indexed
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[55,55,true,true]], b: [[1,1,true,true], "
+ "[5,5,true,true], [8,8,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{arr:{$elemMatch:{x:5,y:5}}},"
+ "{b:{$in:[1,5,8]}}]}, "
+ "node: {ixscan: {pattern: {'arr.x':1,a:1}, bounds: "
+ "{'arr.x': [[5,5,true,true]], 'a':[[55,55,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundAndNonCompoundIndices) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1), true);
+ runQuery(fromjson("{a: 1, b: {$gt: 2, $lt: 2}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{b:{$lt:2}},{b:{$gt:2}}]}, node: "
+ "{ixscan: {pattern: {a:1}, bounds: {a: [[1,1,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b:{$gt:2}}, node: "
+ "{ixscan: {pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[-Infinity,2,true,false]]}}}}}");
+}
+
+//
+// Sort orders
+//
+
+// SERVER-1205.
+TEST_F(QueryPlannerTest, MergeSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+ runQuerySortProj(fromjson("{$or: [{a:1}, {b:1}]}"), fromjson("{c:1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// SERVER-1205 as well.
+TEST_F(QueryPlannerTest, NoMergeSortIfNoSortWanted) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+ runQuerySortProj(fromjson("{$or: [{a:1}, {b:1}]}"), BSONObj(), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a:1}, {b:1}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
+ "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// Basic "keep sort in mind with an OR"
+TEST_F(QueryPlannerTest, MergeSortEvenIfSameIndex) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{$or: [{a:1}, {a:7}]}"), fromjson("{b:1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ // TODO the second solution should be mergeSort rather than just sort
+}
+
+TEST_F(QueryPlannerTest, ReverseScanForSort) {
+ addIndex(BSON("_id" << 1));
+ runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {_id: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {_id: 1}}}}}");
+}
+
+//
+// Hint tests
+//
+
+TEST_F(QueryPlannerTest, NaturalHint) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{cscan: {filter: {a: 1}, dir: 1}}}}");
+}
+
+// Test $natural sort and its interaction with $natural hint.
+TEST_F(QueryPlannerTest, NaturalSortAndHint) {
+ addIndex(BSON("x" << 1));
+
+ // Non-empty query, -1 sort, no hint.
+ runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Non-empty query, 1 sort, no hint.
+ runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << 1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Non-empty query, -1 sort, -1 hint.
+ runQuerySortHint(
+ fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSON("$natural" << -1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Non-empty query, 1 sort, -1 hint.
+ runQuerySortHint(
+ fromjson("{x: {$exists: true}}"), BSON("$natural" << 1), BSON("$natural" << -1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Non-empty query, -1 sort, 1 hint.
+ runQuerySortHint(
+ fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSON("$natural" << 1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Non-empty query, 1 sort, 1 hint.
+ runQuerySortHint(
+ fromjson("{x: {$exists: true}}"), BSON("$natural" << 1), BSON("$natural" << 1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Empty query, -1 sort, no hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Empty query, 1 sort, no hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Empty query, -1 sort, -1 hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSON("$natural" << -1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Empty query, 1 sort, -1 hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSON("$natural" << -1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Empty query, -1 sort, 1 hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSON("$natural" << 1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Empty query, 1 sort, 1 hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSON("$natural" << 1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, HintOverridesNaturalSort) {
+ addIndex(BSON("x" << 1));
+ runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSON("x" << 1));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {x:{$exists:true}}, node: "
+ "{ixscan: {filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintValid) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(BSONObj(), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintValidWithPredicate) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(fromjson("{a: {$gt: 1}}"), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintValidWithSort) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortHint(fromjson("{a: 100, b: 200}"), fromjson("{b: 1}"), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{fetch: {filter: {b: 200}, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintElemMatch) {
+ // true means multikey
+ addIndex(fromjson("{'a.b': 1}"), true);
+ runQueryHint(fromjson("{'a.b': 1, a: {$elemMatch: {b: 2}}}"), fromjson("{'a.b': 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{a:{$elemMatch:{b:2}}}, {'a.b': 1}]}, "
+ "node: {ixscan: {filter: null, pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[2, 2, true, true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintInvalid) {
+ addIndex(BSON("a" << 1));
+ runInvalidQueryHint(BSONObj(), fromjson("{b: 1}"));
+}
+
+//
+// Sparse indices, SERVER-8067
+// Each index in this block of tests is sparse.
+//
+
+TEST_F(QueryPlannerTest, SparseIndexIgnoreForSort) {
+ addIndex(fromjson("{a: 1}"), false, true);
+ runQuerySortProj(BSONObj(), fromjson("{a: 1}"), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, SparseIndexHintForSort) {
+ addIndex(fromjson("{a: 1}"), false, true);
+ runQuerySortHint(BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SparseIndexPreferCompoundIndexForSort) {
+ addIndex(fromjson("{a: 1}"), false, true);
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuerySortProj(BSONObj(), fromjson("{a: 1}"), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SparseIndexForQuery) {
+ addIndex(fromjson("{a: 1}"), false, true);
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: 1}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}");
+}
+
+//
+// Regex
+//
+
+TEST_F(QueryPlannerTest, PrefixRegex) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a: /^foo/}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: /^foo/}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, PrefixRegexCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: /^foo/}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {a: /^foo/}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegex) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a: /foo/}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: /foo/}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: {a: /foo/}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: /foo/}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {a: /foo/}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {a: /foo/}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexAnd) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: /foo/, b: 2}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{b: 2}, {a: /foo/}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: {a: /foo/}, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexAndCovering) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{a: /foo/, b: 2}"), BSONObj(), fromjson("{_id: 0, a: 1, b: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
+ "{cscan: {dir: 1, filter: {$and: [{b: 2}, {a: /foo/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
+ "{ixscan: {filter: {a: /foo/}, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexOrCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(
+ fromjson("{$or: [{a: /0/}, {a: /1/}]}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {$or: [{a: /0/}, {a: /1/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {$or: [{a: /0/}, {a: /1/}]}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexInCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: {$in: [/foo/, /bar/]}}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {a:{$in:[/foo/,/bar/]}}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {a:{$in:[/foo/,/bar/]}}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoRegexCompoundIndexCovering) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{a: /0/, b: /1/}"), BSONObj(), fromjson("{_id: 0, a: 1, b: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
+ "{cscan: {dir: 1, filter: {$and:[{a:/0/},{b:/1/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
+ "{ixscan: {filter: {$and:[{a:/0/},{b:/1/}]}, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoRegexSameFieldCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(
+ fromjson("{$and: [{a: /0/}, {a: /1/}]}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {$and:[{a:/0/},{a:/1/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {$and:[{a:/0/},{a:/1/}]}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ThreeRegexSameFieldCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(
+ fromjson("{$and: [{a: /0/}, {a: /1/}, {a: /2/}]}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexMultikey) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: /foo/}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {filter: {a: /foo/}, dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: /foo/}, node: {ixscan: "
+ "{pattern: {a: 1}, filter: null}}}}");
+}
+
+TEST_F(QueryPlannerTest, ThreeRegexSameFieldMultikey) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{$and: [{a: /0/}, {a: /1/}, {a: /2/}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, node: {ixscan: "
+ "{pattern: {a: 1}, filter: null}}}}");
+}
+
+//
+// Negation
+//
+
+TEST_F(QueryPlannerTest, NegationIndexForSort) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: {$ne: 1}}"), fromjson("{a: 1}"), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}, "
+ "bounds: {a: [['MinKey',1,true,false], "
+ "[1,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationTopLevel) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: {$ne: 1}}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [['MinKey',1,true,false], "
+ "[1,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationOr) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, NegationOrNotIn) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {b: {$nin: [1]}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, NegationAndIndexOnEquality) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$and: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1},"
+ "bounds: {a: [[1,1,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationAndIndexOnEqualityAndNegationBranches) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortProj(fromjson("{$and: [{a: 1}, {b: 2}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}, "
+ "bounds: {a: [[1,1,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {b: 1}, "
+ "bounds: {b: [[2,2,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationAndIndexOnInequality) {
+ addIndex(BSON("b" << 1));
+ runQuerySortProj(fromjson("{$and: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: {ixscan: {pattern: {b:1}, "
+ "bounds: {b: [['MinKey',1,true,false], "
+ "[1,'MaxKey',false,true]]}}}}}");
+}
+
+// Negated regexes don't use the index.
+TEST_F(QueryPlannerTest, NegationRegexPrefix) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: /^a/}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated mods don't use the index
+TEST_F(QueryPlannerTest, NegationMod) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$mod: [2, 1]}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated $type doesn't use the index
+TEST_F(QueryPlannerTest, NegationTypeOperator) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$type: 16}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated $elemMatch value won't use the index
+TEST_F(QueryPlannerTest, NegationElemMatchValue) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$elemMatch: {$gt: 3, $lt: 10}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated $elemMatch object won't use the index
+TEST_F(QueryPlannerTest, NegationElemMatchObject) {
+ addIndex(BSON("i.j" << 1));
+ runQuery(fromjson("{i: {$not: {$elemMatch: {j: 1}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated $elemMatch object won't use the index
+TEST_F(QueryPlannerTest, NegationElemMatchObject2) {
+ addIndex(BSON("i.j" << 1));
+ runQuery(fromjson("{i: {$not: {$elemMatch: {j: {$ne: 1}}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// If there is a negation that can't use the index,
+// ANDed with a predicate that can use the index, then
+// we can still use the index for the latter predicate.
+TEST_F(QueryPlannerTest, NegationRegexWithIndexablePred) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{$and: [{i: {$not: /o/}}, {i: 2}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [[2,2,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationCantUseSparseIndex) {
+ // false means not multikey, true means sparse
+ addIndex(BSON("i" << 1), false, true);
+ runQuery(fromjson("{i: {$ne: 4}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, NegationCantUseSparseIndex2) {
+ // false means not multikey, true means sparse
+ addIndex(BSON("i" << 1 << "j" << 1), false, true);
+ runQuery(fromjson("{i: 4, j: {$ne: 5}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {i:1,j:1}, bounds: "
+ "{i: [[4,4,true,true]], j: [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatedRangeStrGT) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$gt: 'a'}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey','a',true,true], "
+ "[{},'MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatedRangeStrGTE) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$gte: 'a'}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey','a',true,false], "
+ "[{},'MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatedRangeIntGT) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$gt: 5}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey',5,true,true], "
+ "[Infinity,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatedRangeIntGTE) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$gte: 5}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey',5,true,false], "
+ "[Infinity,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoNegatedRanges) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson(
+ "{$and: [{i: {$not: {$lte: 'b'}}}, "
+ "{i: {$not: {$gte: 'f'}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey','',true,false], "
+ "['b','f',false,false], "
+ "[{},'MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, AndWithNestedNE) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a: {$gt: -1, $lt: 1, $ne: 0}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[-1,0,false,false], "
+ "[0,1,false,false]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatePredOnCompoundIndex) {
+ addIndex(BSON("x" << 1 << "a" << 1));
+ runQuery(fromjson("{x: 1, a: {$ne: 1}, b: {$ne: 2}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {x:1,a:1}, bounds: "
+ "{x: [[1,1,true,true]], "
+ "a: [['MinKey',1,true,false], [1,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NEOnMultikeyIndex) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: {$ne: 3}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$ne:3}}, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [['MinKey',3,true,false],"
+ "[3,'MaxKey',false,true]]}}}}}");
+}
+
+// In general, a negated $nin can make use of an index.
+TEST_F(QueryPlannerTest, NinUsesMultikeyIndex) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: {$nin: [4, 10]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$nin:[4,10]}}, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [['MinKey',4,true,false],"
+ "[4,10,false,false],"
+ "[10,'MaxKey',false,true]]}}}}}");
+}
+
+// But it can't if the $nin contains a regex because regex bounds can't
+// be complemented.
+TEST_F(QueryPlannerTest, NinCantUseMultikeyIndex) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: {$nin: [4, /foobar/]}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+//
+// Multikey indices
+//
+
+//
+// Index bounds related tests
+//
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsLastFieldMissing) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuery(fromjson("{a: 5, b: {$gt: 7}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c: 1}, bounds: "
+ "{a: [[5,5,true,true]], b: [[7,Infinity,false,true]], "
+ " c: [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsMiddleFieldMissing) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuery(fromjson("{a: 1, c: {$lt: 3}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]], "
+ " c: [[-Infinity,3,true,false]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsRangeAndEquality) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: {$gt: 8}, b: 6}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[8,Infinity,false,true]], b:[[6,6,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsEqualityThenIn) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: 5, b: {$in: [2,6,11]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: "
+ "{a: 1, b: 1}, bounds: {a: [[5,5,true,true]], "
+ "b:[[2,2,true,true],[6,6,true,true],[11,11,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsStringBounds) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: {$gt: 'foo'}, b: {$gte: 'bar'}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: "
+ "{a: 1, b: 1}, bounds: {a: [['foo',{},false,false]], "
+ "b:[['bar',{},true,false]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, IndexBoundsAndWithNestedOr) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$and: [{a: 1, $or: [{a: 2}, {a: 3}]}]}"));
+
+ // Given that the index over 'a' isn't multikey, we ideally won't generate any solutions
+ // since we know the query describes an empty set if 'a' isn't multikey. Any solutions
+ // below are "this is how it currently works" instead of "this is how it should work."
+
+ // It's kind of iffy to look for indexed solutions so we don't...
+ size_t matches = 0;
+ matches += numSolutionMatches(
+ "{cscan: {dir: 1, filter: "
+ "{$or: [{a: 2, a:1}, {a: 3, a:1}]}}}");
+ matches += numSolutionMatches(
+ "{cscan: {dir: 1, filter: "
+ "{$and: [{$or: [{a: 2}, {a: 3}]}, {a: 1}]}}}");
+ ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
+}
+
+TEST_F(QueryPlannerTest, IndexBoundsIndexedSort) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("a" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{sort: {pattern: {a:1}, limit: 0, node: "
+ "{cscan: {filter: {$or:[{a:1},{a:2}]}, dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, "
+ "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, IndexBoundsUnindexedSort) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("b" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: "
+ "{cscan: {filter: {$or:[{a:1},{a:2}]}, dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: {fetch: "
+ "{filter: null, node: {ixscan: {filter: null, "
+ "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, IndexBoundsUnindexedSortHint) {
+ addIndex(BSON("a" << 1));
+ runQuerySortHint(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("b" << 1), BSON("a" << 1));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: {fetch: "
+ "{filter: null, node: {ixscan: {filter: null, "
+ "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsIntersectRanges) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ addIndex(BSON("a" << 1 << "c" << 1));
+ runQuery(fromjson("{a: {$gt: 1, $lt: 10}, c: {$gt: 1, $lt: 10}}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1,b:1,c:1}, "
+ "bounds: {a: [[1,10,false,false]], "
+ "b: [['MinKey','MaxKey',true,true]], "
+ "c: [[1,10,false,false]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1,c:1}, "
+ "bounds: {a: [[1,10,false,false]], "
+ "c: [[1,10,false,false]]}}}}}");
+}
+
+// Test that planner properly unionizes the index bounds for two negation
+// predicates (SERVER-13890).
+TEST_F(QueryPlannerTest, IndexBoundsOrOfNegations) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: {$ne: 3}}, {a: {$ne: 4}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BoundsTypeMinKeyMaxKey) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+
+ runQuery(fromjson("{a: {$type: -1}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
+ "{a: [['MinKey','MinKey',true,true]]}}}}}");
+
+ runQuery(fromjson("{a: {$type: 127}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
+ "{a: [['MaxKey','MaxKey',true,true]]}}}}}");
+}
+
+//
+// Tests related to building index bounds for multikey
+// indices, combined with compound and $elemMatch
+//
+
+// SERVER-12475: make sure that we compound bounds, even
+// for a multikey index.
+TEST_F(QueryPlannerTest, CompoundMultikeyBounds) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1), true);
+ runQuery(fromjson("{a: 1, b: 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {filter: {$and:[{a:1},{b:3}]}, dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, "
+ "pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[3,3,true,true]]}}}}}");
+}
+
+// Make sure that we compound bounds but do not intersect bounds
+// for a compound multikey index.
+TEST_F(QueryPlannerTest, CompoundMultikeyBoundsNoIntersect) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1), true);
+ runQuery(fromjson("{a: 1, b: {$gt: 3, $lte: 5}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b:{$gt:3}}, node: {ixscan: {filter: null, "
+ "pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[-Infinity,5,true,true]]}}}}}");
+}
+
+//
+// QueryPlannerParams option tests
+//
+
+TEST_F(QueryPlannerTest, NoBlockingSortsAllowedTest) {
+ params.options = QueryPlannerParams::NO_BLOCKING_SORT;
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+ assertNumSolutions(0U);
+
+ addIndex(BSON("x" << 1));
+
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NoTableScanBasic) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ runQuery(BSONObj());
+ assertNumSolutions(0U);
+
+ addIndex(BSON("x" << 1));
+
+ runQuery(BSONObj());
+ assertNumSolutions(0U);
+
+ runQuery(fromjson("{x: {$gte: 0}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NoTableScanOrWithAndChild) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1}}}, "
+ "{fetch: {filter: {b: 7}, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}]}}}}");
+}
+
+//
+// Index Intersection.
+//
+// We don't exhaustively check all plans here. Instead we check that there exists an
+// intersection plan. The blending of >1 index plans and ==1 index plans is under development
+// but we want to make sure that we create an >1 index plan when we should.
+//
+
+TEST_F(QueryPlannerTest, IntersectBasicTwoPred) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson("{a:1, b:{$gt: 1}}"));
+
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andHash: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompound) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson("{a:1, b:1, c:1}"));
+
+ // There's an andSorted not andHash because the two seeks are point intervals.
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1, c:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+// SERVER-12196
+TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompoundMatchesIdxOrder1) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson("{a:1, b:1}"));
+
+ assertNumSolutions(3U);
+
+ assertSolutionExists(
+ "{fetch: {filter: {b:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+// SERVER-12196
+TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompoundMatchesIdxOrder2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a:1, b:1}"));
+
+ assertNumSolutions(3U);
+
+ assertSolutionExists(
+ "{fetch: {filter: {b:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectManySelfIntersections) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ // True means multikey.
+ addIndex(BSON("a" << 1), true);
+
+ // This one goes to 11.
+ runQuery(fromjson("{a:1, a:2, a:3, a:4, a:5, a:6, a:7, a:8, a:9, a:10, a:11}"));
+
+ // But this one only goes to 10.
+ assertSolutionExists(
+ "{fetch: {filter: {a:11}, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 1
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 2
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 3
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 4
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 5
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 6
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 7
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 8
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 9
+ "{ixscan: {filter: null, pattern: {a:1}}}]}}}}"); // 10
+}
+
+TEST_F(QueryPlannerTest, IntersectSubtreeNodes) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1));
+
+ runQuery(fromjson("{$or: [{a: 1}, {b: 1}], $or: [{c:1}, {d:1}]}"));
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andHash: {nodes: ["
+ "{or: {nodes: [{ixscan:{filter:null, pattern:{a:1}}},"
+ "{ixscan:{filter:null, pattern:{b:1}}}]}},"
+ "{or: {nodes: [{ixscan:{filter:null, pattern:{c:1}}},"
+ "{ixscan:{filter:null, pattern:{d:1}}}]}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectSubtreeAndPred) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ runQuery(fromjson("{a: 1, $or: [{b:1}, {c:1}]}"));
+
+ // This (can be) rewritten to $or:[ {a:1, b:1}, {c:1, d:1}]. We don't look for the various
+ // single $or solutions as that's tested elsewhere. We look for the intersect solution,
+ // where each AND inside of the root OR is an and_sorted.
+ size_t matches = 0;
+ matches += numSolutionMatches(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {'a':1}}},"
+ "{ixscan: {filter: null, pattern: {'b':1}}}]}},"
+ "{andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {'a':1}}},"
+ "{ixscan: {filter: null, pattern: {'c':1}}}]}}]}}}}");
+ matches += numSolutionMatches(
+ "{fetch: {filter: null, node: {andHash: {nodes:["
+ "{or: {nodes: [{ixscan:{filter:null, pattern:{b:1}}},"
+ "{ixscan:{filter:null, pattern:{c:1}}}]}},"
+ "{ixscan:{filter: null, pattern:{a:1}}}]}}}}");
+ ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
+}
+
+TEST_F(QueryPlannerTest, IntersectElemMatch) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a.b" << 1));
+ addIndex(BSON("a.c" << 1));
+ runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:1, c:1}}},"
+ "node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {'a.b':1}}},"
+ "{ixscan: {filter: null, pattern: {'a.c':1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectSortFromAndHash) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortProj(fromjson("{a: 1, b:{$gt: 1}}"), fromjson("{b:1}"), BSONObj());
+
+ // This provides the sort.
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andHash: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+
+ // Rearrange the preds, shouldn't matter.
+ runQuerySortProj(fromjson("{b: 1, a:{$lt: 7}}"), fromjson("{b:1}"), BSONObj());
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andHash: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectCanBeVeryBig) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1));
+ runQuery(fromjson(
+ "{$or: [{ 'a' : null, 'b' : 94, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 98, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 1, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 2, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 7, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 9, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 16, 'c' : null, 'd' : null }]}"));
+
+ assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
+}
+
+// Ensure that disabling AND_HASH intersection works properly.
+TEST_F(QueryPlannerTest, IntersectDisableAndHash) {
+ bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection;
+
+ // Turn index intersection on but disable hash-based intersection.
+ internalQueryPlannerEnableHashIntersection = false;
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+
+ runQuery(fromjson("{a: {$gt: 1}, b: 1, c: 1}"));
+
+ // We should do an AND_SORT intersection of {b: 1} and {c: 1}, but no AND_HASH plans.
+ assertNumSolutions(4U);
+ assertSolutionExists(
+ "{fetch: {filter: {b: 1, c: 1}, node: {ixscan: "
+ "{pattern: {a: 1}, bounds: {a: [[1,Infinity,false,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:1},c:1}, node: {ixscan: "
+ "{pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:1},b:1}, node: {ixscan: "
+ "{pattern: {c: 1}, bounds: {c: [[1,1,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:1}}, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {b:1}}},"
+ "{ixscan: {filter: null, pattern: {c:1}}}]}}}}");
+
+ // Restore the old value of the has intersection switch.
+ internalQueryPlannerEnableHashIntersection = oldEnableHashIntersection;
+}
+
+//
+// Index intersection cases for SERVER-12825: make sure that
+// we don't generate an ixisect plan if a compound index is
+// available instead.
+//
+
+// SERVER-12825
+TEST_F(QueryPlannerTest, IntersectCompoundInsteadBasic) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: 1, b: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {b:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
+}
+
+// SERVER-12825
+TEST_F(QueryPlannerTest, IntersectCompoundInsteadThreeCompoundIndices) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("c" << 1 << "d" << 1));
+ addIndex(BSON("a" << 1 << "c" << -1 << "b" << -1 << "d" << 1));
+ runQuery(fromjson("{a: 1, b: 1, c: 1, d: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{c:1},{d:1}]}, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{a:1},{b:1}]}, node: "
+ "{ixscan: {filter: null, pattern: {c:1,d:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a:1,c:-1,b:-1,d:1}}}}}");
+}
+
+// SERVER-12825
+TEST_F(QueryPlannerTest, IntersectCompoundInsteadUnusedField) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuery(fromjson("{a: 1, b: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {b:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1,c:1}}}}}");
+}
+
+// SERVER-12825
+TEST_F(QueryPlannerTest, IntersectCompoundInsteadUnusedField2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("c" << 1 << "d" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuery(fromjson("{a: 1, c: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {c:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {c:1,d:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1,c:1}}}}}");
+}
+
+//
+// Test that we add a KeepMutations when we should and and we don't add one when we shouldn't.
+//
+
+// Collection scan doesn't keep any state, so it can't produce flagged data.
+TEST_F(QueryPlannerTest, NoMutationsForCollscan) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ runQuery(fromjson(""));
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Collscan + sort doesn't produce flagged data either.
+TEST_F(QueryPlannerTest, NoMutationsForSort) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ runQuerySortProj(fromjson(""), fromjson("{a:1}"), BSONObj());
+ assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+}
+
+// An index scan + fetch requires a keep node as it can flag data. Also make sure we put it in
+// the right place, under the sort.
+TEST_F(QueryPlannerTest, MutationsFromFetch) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: 5}"), fromjson("{b:1}"), BSONObj());
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: {keep: {node: "
+ "{fetch: {node: {ixscan: {pattern: {a:1}}}}}}}}}");
+}
+
+// Index scan w/covering doesn't require a keep node as there's no fetch.
+TEST_F(QueryPlannerTest, NoFetchNoKeep) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ addIndex(BSON("x" << 1));
+ // query, sort, proj
+ runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{_id: 0, x: 1}"));
+
+ // cscan is a soln but we override the params that say to include it.
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, x: 1}, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+// No keep with geoNear.
+TEST_F(QueryPlannerTest, NoKeepWithGeoNear) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{geoNear2d: {a: '2d'}}");
+}
+
+// No keep when we have an indexed sort.
+TEST_F(QueryPlannerTest, NoKeepWithIndexedSort) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSON("b" << 1), BSONObj(), 0, 1);
+
+ // cscan solution exists but we didn't turn on the "always include a collscan."
+ assertNumSolutions(1);
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}");
+}
+
+// Make sure a top-level $or hits the limiting number
+// of solutions that we are willing to consider.
+TEST_F(QueryPlannerTest, OrEnumerationLimit) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // 6 $or clauses, each with 2 indexed predicates
+ // means 2^6 = 64 possibilities. We should hit the limit.
+ runQuery(fromjson(
+ "{$or: [{a: 1, b: 1},"
+ "{a: 2, b: 2},"
+ "{a: 3, b: 3},"
+ "{a: 4, b: 4},"
+ "{a: 5, b: 5},"
+ "{a: 6, b: 6}]}"));
+
+ assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
+}
+
+TEST_F(QueryPlannerTest, OrEnumerationLimit2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1));
+
+ // 3 $or clauses, and a few other preds. Each $or clause can
+ // generate up to the max number of allowed $or enumerations.
+ runQuery(fromjson(
+ "{$or: [{a: 1, b: 1, c: 1, d: 1},"
+ "{a: 2, b: 2, c: 2, d: 2},"
+ "{a: 3, b: 3, c: 3, d: 3}]}"));
+
+ assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
+}
+
+// SERVER-13104: test that we properly enumerate all solutions for nested $or.
+TEST_F(QueryPlannerTest, EnumerateNestedOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+
+ runQuery(fromjson("{d: 1, $or: [{a: 1, b: 1}, {c: 1}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
+ "{ixscan: {pattern: {c: 1}}}]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
+ "{ixscan: {pattern: {c: 1}}}]}}}}");
+}
+
+// SERVER-13104: test that we properly enumerate all solutions for nested $or.
+TEST_F(QueryPlannerTest, EnumerateNestedOr2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1));
+ addIndex(BSON("e" << 1));
+ addIndex(BSON("f" << 1));
+
+ runQuery(fromjson("{a: 1, b: 1, $or: [{c: 1, d: 1}, {e: 1, f: 1}]}"));
+
+ assertNumSolutions(6U);
+
+ // Four possibilities from indexing the $or.
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {d: 1}, node: {ixscan: {pattern: {c: 1}}}}},"
+ "{fetch: {filter: {f: 1}, node: {ixscan: {pattern: {e: 1}}}}}"
+ "]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}},"
+ "{fetch: {filter: {f: 1}, node: {ixscan: {pattern: {e: 1}}}}}"
+ "]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {d: 1}, node: {ixscan: {pattern: {c: 1}}}}},"
+ "{fetch: {filter: {e: 1}, node: {ixscan: {pattern: {f: 1}}}}}"
+ "]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}},"
+ "{fetch: {filter: {e: 1}, node: {ixscan: {pattern: {f: 1}}}}}"
+ "]}}}}");
+
+ // Two possibilties from outside the $or.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {b: 1}}}}}");
+}
+
+//
+// Test the "split limited sort stages" hack.
+//
+
+TEST_F(QueryPlannerTest, SplitLimitedSort) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQuerySortProjSkipLimit(fromjson("{a: 1}"), fromjson("{b: 1}"), BSONObj(), 0, 3);
+
+ assertNumSolutions(2U);
+ // First solution has no blocking stage; no need to split.
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b: 1}}}}}");
+ // Second solution has a blocking sort with a limit: it gets split and
+ // joined with an OR stage.
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{sort: {pattern: {b: 1}, limit: 3, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}, "
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}]}}");
+}
+
+// The same query run as a find command with a limit should not require the "split limited sort"
+// hack.
+TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommand) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, limit: 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{limit: {n: 3, node: {fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b: 1}}}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 3, node: {fetch: {filter: null,"
+ "node: {ixscan: {pattern: {a: 1}}}}}}}");
+}
+
+// Same query run as a find command with a batchSize rather than a limit should not require
+// the "split limited sort" hack, and should not have any limit represented inside the plan.
+TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommandBatchSize) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, batchSize: 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1}, node: {ixscan: "
+ "{filter: null, pattern: {b: 1}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, node: {fetch: {filter: null,"
+ "node: {ixscan: {pattern: {a: 1}}}}}}}");
+}
+
+//
+// Test shard filter query planning
+//
+
+TEST_F(QueryPlannerTest, ShardFilterCollScan) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1));
+
+ runQuery(fromjson("{b: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sharding_filter: {node: "
+ "{cscan: {dir: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterBasicIndex) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQuery(fromjson("{b: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sharding_filter: {node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {b: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterBasicCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1));
+
+ runQuery(fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: "
+ "{sharding_filter: {node: "
+ "{ixscan: {pattern: {a: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterBasicProjCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, type: 'coveredIndex', node: "
+ "{sharding_filter: {node: "
+ "{ixscan: {pattern: {a: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterCompoundProjCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1 << "b" << 1);
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id: 0, a: 1, b: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1 }, type: 'coveredIndex', node: "
+ "{sharding_filter: {node: "
+ "{ixscan: {pattern: {a: 1, b: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterNestedProjNotCovered) {
+ // Nested projections can't be covered currently, though the shard key filter shouldn't need
+ // to fetch.
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1 << "b.c" << 1);
+ addIndex(BSON("a" << 1 << "b.c" << 1));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id: 0, a: 1, 'b.c': 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, 'b.c': 1 }, type: 'default', node: "
+ "{fetch: {node: "
+ "{sharding_filter: {node: "
+ "{ixscan: {pattern: {a: 1, 'b.c': 1}}}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterHashProjNotCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a"
+ << "hashed");
+ addIndex(BSON("a"
+ << "hashed"));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0,a: 1}, type: 'simple', node: "
+ "{sharding_filter : {node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {a: 'hashed'}}}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterKeyPrefixIndexCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1 << "b" << 1 << "_id" << 1));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{a : 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {a: 1}, type: 'coveredIndex', node: "
+ "{sharding_filter : {node: "
+ "{ixscan: {pattern: {a: 1, b: 1, _id: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterNoIndexNotCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a"
+ << "hashed");
+ addIndex(BSON("b" << 1));
+
+ runQuerySortProj(fromjson("{b: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0,a: 1}, type: 'simple', node: "
+ "{sharding_filter : {node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {b: 1}}}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CannotTrimIxisectParam) {
+ params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ params.options |= QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQuery(fromjson("{a: 1, b: 1, c: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {b: 1, c: 1}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, c: 1}, node: "
+ "{ixscan: {filter: null, pattern: {b: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1,b:1,c:1}, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, CannotTrimIxisectParamBeneathOr) {
+ params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ params.options |= QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+
+ runQuery(fromjson("{d: 1, $or: [{a: 1}, {b: 1, c: 1}]}"));
+
+ assertNumSolutions(3U);
+
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {c: 1}, node: {ixscan: {filter: null,"
+ "pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1},"
+ "bounds: {a: [[1,1,true,true]]}}}]}}}}");
+
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {b: 1}, node: {ixscan: {filter: null,"
+ "pattern: {c: 1}, bounds: {c: [[1,1,true,true]]}}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1},"
+ "bounds: {a: [[1,1,true,true]]}}}]}}}}");
+
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {b: 1, c: 1}, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {b: 1}}},"
+ "{ixscan: {filter: null, pattern: {c: 1}}}]}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, CannotTrimIxisectAndHashWithOrChild) {
+ params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ params.options |= QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+
+ runQuery(fromjson("{c: 1, $or: [{a: 1}, {b: 1, d: 1}]}"));
+
+ assertNumSolutions(3U);
+
+ assertSolutionExists(
+ "{fetch: {filter: {c: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {d: 1}, node: {ixscan: {filter: null,"
+ "pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1},"
+ "bounds: {a: [[1,1,true,true]]}}}]}}}}");
+
+ assertSolutionExists(
+ "{fetch: {filter: {$or:[{b:1,d:1},{a:1}]}, node:"
+ "{ixscan: {filter: null, pattern: {c: 1}}}}}");
+
+ assertSolutionExists(
+ "{fetch: {filter: {c:1,$or:[{a:1},{b:1,d:1}]}, node:{andHash:{nodes:["
+ "{or: {nodes: ["
+ "{fetch: {filter: {d:1}, node: {ixscan: {pattern: {b: 1}}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1}}}]}},"
+ "{ixscan: {filter: null, pattern: {c: 1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, CannotTrimIxisectParamSelfIntersection) {
+ params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ params.options = QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+
+ runQuery(fromjson("{a: {$all: [1, 2, 3]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{a:2}, {a:3}]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1},"
+ "bounds: {a: [[1,1,true,true]]}}},"
+ "{ixscan: {filter: null, pattern: {a:1},"
+ "bounds: {a: [[2,2,true,true]]}}},"
+ "{ixscan: {filter: null, pattern: {a:1},"
+ "bounds: {a: [[3,3,true,true]]}}}]}}}}");
+}
+
+
+// If a lookup against a unique index is available as a possible plan, then the planner
+// should not generate other possibilities.
+TEST_F(QueryPlannerTest, UniqueIndexLookup) {
+ params.options = QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQuery(fromjson("{a: 1, b: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1}, node: "
+ "{ixscan: {filter: null, pattern: {b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintOnNonUniqueIndex) {
+ params.options = QueryPlannerParams::INDEX_INTERSECTION;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQueryHint(fromjson("{a: 1, b: 1}"), BSON("a" << 1));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {b: 1}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, UniqueIndexLookupBelowOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQuery(fromjson("{$or: [{a: 1, b: 1}, {c: 1, d: 1}]}"));
+
+ // Only two plans because we throw out plans for the right branch of the $or that do not
+ // use equality over the unique index.
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}]}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}]}}");
+}
+
+TEST_F(QueryPlannerTest, UniqueIndexLookupBelowOrBelowAnd) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQuery(fromjson("{e: 1, $or: [{a: 1, b: 1}, {c: 1, d: 1}]}"));
+
+ // Only two plans because we throw out plans for the right branch of the $or that do not
+ // use equality over the unique index.
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {e: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}"
+ "]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {e: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}"
+ "]}}}}");
+}
+
+TEST_F(QueryPlannerTest, CoveredOrUniqueIndexLookup) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("a" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQuerySortProj(fromjson("{a: 1, b: 1}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+//
+// Test bad input to query planner helpers.
+//
+
+TEST(BadInputTest, CacheDataFromTaggedTree) {
+ PlanCacheIndexTree* indexTree;
+
+ // Null match expression.
+ std::vector<IndexEntry> relevantIndices;
+ Status s = QueryPlanner::cacheDataFromTaggedTree(NULL, relevantIndices, &indexTree);
+ ASSERT_NOT_OK(s);
+ ASSERT(NULL == indexTree);
+
+ // No relevant index matching the index tag.
+ relevantIndices.push_back(IndexEntry(BSON("a" << 1)));
+
+ CanonicalQuery* cq;
+ Status cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
+ ASSERT_OK(cqStatus);
+ std::unique_ptr<CanonicalQuery> scopedCq(cq);
+ scopedCq->root()->setTag(new IndexTag(1));
+
+ s = QueryPlanner::cacheDataFromTaggedTree(scopedCq->root(), relevantIndices, &indexTree);
+ ASSERT_NOT_OK(s);
+ ASSERT(NULL == indexTree);
+}
+
+TEST(BadInputTest, TagAccordingToCache) {
+ CanonicalQuery* cq;
+ Status cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
+ ASSERT_OK(cqStatus);
+ std::unique_ptr<CanonicalQuery> scopedCq(cq);
+
+ std::unique_ptr<PlanCacheIndexTree> indexTree(new PlanCacheIndexTree());
+ indexTree->setIndexEntry(IndexEntry(BSON("a" << 1)));
+
+ std::map<BSONObj, size_t> indexMap;
+
+ // Null filter.
+ Status s = QueryPlanner::tagAccordingToCache(NULL, indexTree.get(), indexMap);
+ ASSERT_NOT_OK(s);
+
+ // Null indexTree.
+ s = QueryPlanner::tagAccordingToCache(scopedCq->root(), NULL, indexMap);
+ ASSERT_NOT_OK(s);
+
+ // Index not found.
+ s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
+ ASSERT_NOT_OK(s);
+
+ // Index found once added to the map.
+ indexMap[BSON("a" << 1)] = 0;
+ s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
+ ASSERT_OK(s);
+
+ // Regenerate canonical query in order to clear tags.
+ cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
+ ASSERT_OK(cqStatus);
+ scopedCq.reset(cq);
+
+ // Mismatched tree topology.
+ PlanCacheIndexTree* child = new PlanCacheIndexTree();
+ child->setIndexEntry(IndexEntry(BSON("a" << 1)));
+ indexTree->children.push_back(child);
+ s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
+ ASSERT_NOT_OK(s);
+}
} // namespace
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index c6824169ae7..1b876c5296b 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -41,290 +41,299 @@
namespace mongo {
- using unittest::assertGet;
-
- const char* QueryPlannerTest::ns = "somebogus.ns";
-
- void QueryPlannerTest::setUp() {
- internalQueryPlannerEnableHashIntersection = true;
- params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
- addIndex(BSON("_id" << 1));
- }
-
- void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey) {
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- false, // sparse
- false, // unique
- "hari_king_of_the_stove",
- NULL, // filterExpr
- BSONObj()));
- }
-
- void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- sparse,
- false, // unique
- "note_to_self_dont_break_build",
- NULL, // filterExpr
- BSONObj()));
- }
-
- void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey, bool sparse, bool unique) {
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- sparse,
- unique,
- "sql_query_walks_into_bar_and_says_can_i_join_you?",
- NULL, // filterExpr
- BSONObj()));
- }
-
- void QueryPlannerTest::addIndex(BSONObj keyPattern, BSONObj infoObj) {
- params.indices.push_back(IndexEntry(keyPattern,
- false, // multikey
- false, // sparse
- false, // unique
- "foo",
- NULL, // filterExpr
- infoObj));
- }
-
- void QueryPlannerTest::runQuery(BSONObj query) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
- }
-
- void QueryPlannerTest::runQuerySortProj(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj) {
- runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
- }
-
- void QueryPlannerTest::runQuerySkipLimit(const BSONObj& query,
- long long skip,
- long long limit) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
- }
-
- void QueryPlannerTest::runQueryHint(const BSONObj& query, const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
- }
-
- void QueryPlannerTest::runQuerySortProjSkipLimit(const BSONObj& query,
+using unittest::assertGet;
+
+const char* QueryPlannerTest::ns = "somebogus.ns";
+
+void QueryPlannerTest::setUp() {
+ internalQueryPlannerEnableHashIntersection = true;
+ params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
+ addIndex(BSON("_id" << 1));
+}
+
+void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey) {
+ params.indices.push_back(IndexEntry(keyPattern,
+ multikey,
+ false, // sparse
+ false, // unique
+ "hari_king_of_the_stove",
+ NULL, // filterExpr
+ BSONObj()));
+}
+
+void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
+ params.indices.push_back(IndexEntry(keyPattern,
+ multikey,
+ sparse,
+ false, // unique
+ "note_to_self_dont_break_build",
+ NULL, // filterExpr
+ BSONObj()));
+}
+
+void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey, bool sparse, bool unique) {
+ params.indices.push_back(IndexEntry(keyPattern,
+ multikey,
+ sparse,
+ unique,
+ "sql_query_walks_into_bar_and_says_can_i_join_you?",
+ NULL, // filterExpr
+ BSONObj()));
+}
+
+void QueryPlannerTest::addIndex(BSONObj keyPattern, BSONObj infoObj) {
+ params.indices.push_back(IndexEntry(keyPattern,
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "foo",
+ NULL, // filterExpr
+ infoObj));
+}
+
+void QueryPlannerTest::runQuery(BSONObj query) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
+}
+
+void QueryPlannerTest::runQuerySortProj(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj) {
+ runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
+}
+
+void QueryPlannerTest::runQuerySkipLimit(const BSONObj& query, long long skip, long long limit) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
+}
+
+void QueryPlannerTest::runQueryHint(const BSONObj& query, const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
+}
+
+void QueryPlannerTest::runQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit) {
+ runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
+}
+
+void QueryPlannerTest::runQuerySortHint(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
+}
+
+void QueryPlannerTest::runQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
+}
+
+void QueryPlannerTest::runQuerySortProjSkipLimitHint(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long limit) {
- runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
- }
-
- void QueryPlannerTest::runQuerySortHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
- }
-
- void QueryPlannerTest::runQueryHintMinMax(const BSONObj& query,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj) {
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
- }
-
- void QueryPlannerTest::runQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint) {
- runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
- }
-
- void QueryPlannerTest::runQuerySnapshot(const BSONObj& query) {
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(),
- BSONObj(), true);
- }
-
- void QueryPlannerTest::runQueryFull(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot) {
- // Clean up any previous state from a call to runQueryFull
- solns.clear();
-
- {
- CanonicalQuery* rawCq;
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, skip, limit, hint,
- minObj, maxObj, snapshot,
- false, // explain
- &rawCq);
- ASSERT_OK(s);
- cq.reset(rawCq);
- }
-
- ASSERT_OK(QueryPlanner::plan(*cq, params, &solns.mutableVector()));
+ long long limit,
+ const BSONObj& hint) {
+ runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
+}
+
+void QueryPlannerTest::runQuerySnapshot(const BSONObj& query) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(), BSONObj(), true);
+}
+
+void QueryPlannerTest::runQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot) {
+ // Clean up any previous state from a call to runQueryFull
+ solns.clear();
+
+ {
+ CanonicalQuery* rawCq;
+ Status s = CanonicalQuery::canonicalize(ns,
+ query,
+ sort,
+ proj,
+ skip,
+ limit,
+ hint,
+ minObj,
+ maxObj,
+ snapshot,
+ false, // explain
+ &rawCq);
+ ASSERT_OK(s);
+ cq.reset(rawCq);
}
- void QueryPlannerTest::runInvalidQuery(const BSONObj& query) {
- runInvalidQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
- }
+ ASSERT_OK(QueryPlanner::plan(*cq, params, &solns.mutableVector()));
+}
- void QueryPlannerTest::runInvalidQuerySortProj(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj) {
- runInvalidQuerySortProjSkipLimit(query, sort, proj, 0, 0);
- }
+void QueryPlannerTest::runInvalidQuery(const BSONObj& query) {
+ runInvalidQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
+}
- void QueryPlannerTest::runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+void QueryPlannerTest::runInvalidQuerySortProj(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj) {
+ runInvalidQuerySortProjSkipLimit(query, sort, proj, 0, 0);
+}
+
+void QueryPlannerTest::runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit) {
+ runInvalidQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
+}
+
+void QueryPlannerTest::runInvalidQueryHint(const BSONObj& query, const BSONObj& hint) {
+ runInvalidQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
+}
+
+void QueryPlannerTest::runInvalidQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj) {
+ runInvalidQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
+}
+
+void QueryPlannerTest::runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long limit) {
- runInvalidQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
+ long long limit,
+ const BSONObj& hint) {
+ runInvalidQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
+}
+
+void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot) {
+ solns.clear();
+
+ {
+ CanonicalQuery* rawCq;
+ Status s = CanonicalQuery::canonicalize(ns,
+ query,
+ sort,
+ proj,
+ skip,
+ limit,
+ hint,
+ minObj,
+ maxObj,
+ snapshot,
+ false, // explain
+ &rawCq);
+ ASSERT_OK(s);
+ cq.reset(rawCq);
}
- void QueryPlannerTest::runInvalidQueryHint(const BSONObj& query, const BSONObj& hint) {
- runInvalidQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
- }
+ Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
+ ASSERT_NOT_OK(s);
+}
- void QueryPlannerTest::runInvalidQueryHintMinMax(const BSONObj& query,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj) {
- runInvalidQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
- }
+void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
+ solns.clear();
- void QueryPlannerTest::runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint) {
- runInvalidQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
- }
+ const NamespaceString nss(ns);
+ invariant(nss.isValid());
- void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot) {
- solns.clear();
-
- {
- CanonicalQuery* rawCq;
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, skip, limit, hint,
- minObj, maxObj, snapshot,
- false, // explain
- &rawCq);
- ASSERT_OK(s);
- cq.reset(rawCq);
- }
+ const bool isExplain = false;
+ std::unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
- Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
- ASSERT_NOT_OK(s);
- }
+ CanonicalQuery* rawCq;
+ WhereCallbackNoop whereCallback;
+ Status canonStatus = CanonicalQuery::canonicalize(lpq.release(), &rawCq, whereCallback);
+ ASSERT_OK(canonStatus);
+ cq.reset(rawCq);
- void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
- solns.clear();
+ Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
+ ASSERT_OK(s);
+}
- const NamespaceString nss(ns);
- invariant(nss.isValid());
+size_t QueryPlannerTest::getNumSolutions() const {
+ return solns.size();
+}
- const bool isExplain = false;
- std::unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+void QueryPlannerTest::dumpSolutions() const {
+ mongoutils::str::stream ost;
+ dumpSolutions(ost);
+ log() << std::string(ost);
+}
- CanonicalQuery* rawCq;
- WhereCallbackNoop whereCallback;
- Status canonStatus = CanonicalQuery::canonicalize(lpq.release(), &rawCq, whereCallback);
- ASSERT_OK(canonStatus);
- cq.reset(rawCq);
-
- Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
- ASSERT_OK(s);
- }
-
- size_t QueryPlannerTest::getNumSolutions() const {
- return solns.size();
- }
-
- void QueryPlannerTest::dumpSolutions() const {
- mongoutils::str::stream ost;
- dumpSolutions(ost);
- log() << std::string(ost);
+void QueryPlannerTest::dumpSolutions(mongoutils::str::stream& ost) const {
+ for (auto&& soln : solns) {
+ ost << soln->toString() << '\n';
}
+}
- void QueryPlannerTest::dumpSolutions(mongoutils::str::stream& ost) const {
- for (auto&& soln : solns) {
- ost << soln->toString() << '\n';
- }
+void QueryPlannerTest::assertNumSolutions(size_t expectSolutions) const {
+ if (getNumSolutions() == expectSolutions) {
+ return;
}
-
- void QueryPlannerTest::assertNumSolutions(size_t expectSolutions) const {
- if (getNumSolutions() == expectSolutions) {
- return;
+ mongoutils::str::stream ss;
+ ss << "expected " << expectSolutions << " solutions but got " << getNumSolutions()
+ << " instead. solutions generated: " << '\n';
+ dumpSolutions(ss);
+ FAIL(ss);
+}
+
+size_t QueryPlannerTest::numSolutionMatches(const std::string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ size_t matches = 0;
+ for (auto&& soln : solns) {
+ QuerySolutionNode* root = soln->root.get();
+ if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
+ ++matches;
}
- mongoutils::str::stream ss;
- ss << "expected " << expectSolutions << " solutions but got " << getNumSolutions()
- << " instead. solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
}
+ return matches;
+}
- size_t QueryPlannerTest::numSolutionMatches(const std::string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- size_t matches = 0;
- for (auto&& soln : solns) {
- QuerySolutionNode* root = soln->root.get();
- if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
- ++matches;
- }
- }
- return matches;
+void QueryPlannerTest::assertSolutionExists(const std::string& solnJson, size_t numMatches) const {
+ size_t matches = numSolutionMatches(solnJson);
+ if (numMatches == matches) {
+ return;
}
-
- void QueryPlannerTest::assertSolutionExists(const std::string& solnJson,
- size_t numMatches) const {
- size_t matches = numSolutionMatches(solnJson);
- if (numMatches == matches) {
- return;
+ mongoutils::str::stream ss;
+ ss << "expected " << numMatches << " matches for solution " << solnJson << " but got "
+ << matches << " instead. all solutions generated: " << '\n';
+ dumpSolutions(ss);
+ FAIL(ss);
+}
+
+void QueryPlannerTest::assertHasOneSolutionOf(const std::vector<std::string>& solnStrs) const {
+ size_t matches = 0;
+ for (std::vector<std::string>::const_iterator it = solnStrs.begin(); it != solnStrs.end();
+ ++it) {
+ if (1U == numSolutionMatches(*it)) {
+ ++matches;
}
- mongoutils::str::stream ss;
- ss << "expected " << numMatches << " matches for solution " << solnJson
- << " but got " << matches
- << " instead. all solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
}
-
- void QueryPlannerTest::assertHasOneSolutionOf(const std::vector<std::string>& solnStrs) const {
- size_t matches = 0;
- for (std::vector<std::string>::const_iterator it = solnStrs.begin();
- it != solnStrs.end();
- ++it) {
- if (1U == numSolutionMatches(*it)) {
- ++matches;
- }
- }
- if (1U == matches) {
- return;
- }
- mongoutils::str::stream ss;
- ss << "assertHasOneSolutionOf expected one matching solution"
- << " but got " << matches
- << " instead. all solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
+ if (1U == matches) {
+ return;
}
-
-} // namespace mongo
+ mongoutils::str::stream ss;
+ ss << "assertHasOneSolutionOf expected one matching solution"
+ << " but got " << matches << " instead. all solutions generated: " << '\n';
+ dumpSolutions(ss);
+ FAIL(ss);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h
index e8b1901785a..843ed949e1a 100644
--- a/src/mongo/db/query/query_planner_test_fixture.h
+++ b/src/mongo/db/query/query_planner_test_fixture.h
@@ -41,155 +41,153 @@
namespace mongo {
- class QueryPlannerTest : public mongo::unittest::Test {
- protected:
- void setUp();
+class QueryPlannerTest : public mongo::unittest::Test {
+protected:
+ void setUp();
- //
- // Build up test.
- //
+ //
+ // Build up test.
+ //
- void addIndex(BSONObj keyPattern, bool multikey = false);
+ void addIndex(BSONObj keyPattern, bool multikey = false);
- void addIndex(BSONObj keyPattern, bool multikey, bool sparse);
+ void addIndex(BSONObj keyPattern, bool multikey, bool sparse);
- void addIndex(BSONObj keyPattern, bool multikey, bool sparse, bool unique);
+ void addIndex(BSONObj keyPattern, bool multikey, bool sparse, bool unique);
- void addIndex(BSONObj keyPattern, BSONObj infoObj);
+ void addIndex(BSONObj keyPattern, BSONObj infoObj);
- //
- // Execute planner.
- //
+ //
+ // Execute planner.
+ //
- void runQuery(BSONObj query);
+ void runQuery(BSONObj query);
- void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj);
+ void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj);
- void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit);
+ void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit);
- void runQueryHint(const BSONObj& query, const BSONObj& hint);
+ void runQueryHint(const BSONObj& query, const BSONObj& hint);
- void runQuerySortProjSkipLimit(const BSONObj& query,
+ void runQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit);
+
+ void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint);
+
+ void runQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj);
+
+ void runQuerySortProjSkipLimitHint(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long limit);
+ long long limit,
+ const BSONObj& hint);
- void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint);
+ void runQuerySnapshot(const BSONObj& query);
- void runQueryHintMinMax(const BSONObj& query,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj);
+ void runQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot);
- void runQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint);
+ //
+ // Same as runQuery* functions except we expect a failed status from the planning stage.
+ //
- void runQuerySnapshot(const BSONObj& query);
+ void runInvalidQuery(const BSONObj& query);
- void runQueryFull(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot);
+ void runInvalidQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj);
- //
- // Same as runQuery* functions except we expect a failed status from the planning stage.
- //
+ void runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit);
- void runInvalidQuery(const BSONObj& query);
+ void runInvalidQueryHint(const BSONObj& query, const BSONObj& hint);
- void runInvalidQuerySortProj(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj);
+ void runInvalidQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj);
- void runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+ void runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long limit);
-
- void runInvalidQueryHint(const BSONObj& query, const BSONObj& hint);
-
- void runInvalidQueryHintMinMax(const BSONObj& query,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj);
-
- void runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint);
-
- void runInvalidQueryFull(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot);
-
- /**
- * The other runQuery* methods run the query as through it is an OP_QUERY style find. This
- * version goes through find command parsing, and will be planned like a find command.
- */
- void runQueryAsCommand(const BSONObj& cmdObj);
-
- //
- // Introspect solutions.
- //
-
- size_t getNumSolutions() const;
-
- void dumpSolutions() const;
-
- void dumpSolutions(mongoutils::str::stream& ost) const;
-
- /**
- * Checks number solutions. Generates assertion message
- * containing solution dump if applicable.
- */
- void assertNumSolutions(size_t expectSolutions) const;
-
- size_t numSolutionMatches(const std::string& solnJson) const;
-
- /**
- * Verifies that the solution tree represented in json by 'solnJson' is
- * one of the solutions generated by QueryPlanner.
- *
- * The number of expected matches, 'numMatches', could be greater than
- * 1 if solutions differ only by the pattern of index tags on a filter.
- */
- void assertSolutionExists(const std::string& solnJson, size_t numMatches = 1) const;
-
- /**
- * Given a vector of string-based solution tree representations 'solnStrs',
- * verifies that the query planner generated exactly one of these solutions.
- */
- void assertHasOneSolutionOf(const std::vector<std::string>& solnStrs) const;
-
- //
- // Data members.
- //
-
- static const char* ns;
-
- BSONObj queryObj;
- std::unique_ptr<CanonicalQuery> cq;
- QueryPlannerParams params;
- OwnedPointerVector<QuerySolution> solns;
- };
-
-} // namespace mongo
+ long long limit,
+ const BSONObj& hint);
+
+ void runInvalidQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot);
+
+ /**
+ * The other runQuery* methods run the query as through it is an OP_QUERY style find. This
+ * version goes through find command parsing, and will be planned like a find command.
+ */
+ void runQueryAsCommand(const BSONObj& cmdObj);
+
+ //
+ // Introspect solutions.
+ //
+
+ size_t getNumSolutions() const;
+
+ void dumpSolutions() const;
+
+ void dumpSolutions(mongoutils::str::stream& ost) const;
+
+ /**
+ * Checks number solutions. Generates assertion message
+ * containing solution dump if applicable.
+ */
+ void assertNumSolutions(size_t expectSolutions) const;
+
+ size_t numSolutionMatches(const std::string& solnJson) const;
+
+ /**
+ * Verifies that the solution tree represented in json by 'solnJson' is
+ * one of the solutions generated by QueryPlanner.
+ *
+ * The number of expected matches, 'numMatches', could be greater than
+ * 1 if solutions differ only by the pattern of index tags on a filter.
+ */
+ void assertSolutionExists(const std::string& solnJson, size_t numMatches = 1) const;
+
+ /**
+ * Given a vector of string-based solution tree representations 'solnStrs',
+ * verifies that the query planner generated exactly one of these solutions.
+ */
+ void assertHasOneSolutionOf(const std::vector<std::string>& solnStrs) const;
+
+ //
+ // Data members.
+ //
+
+ static const char* ns;
+
+ BSONObj queryObj;
+ std::unique_ptr<CanonicalQuery> cq;
+ QueryPlannerParams params;
+ OwnedPointerVector<QuerySolution> solns;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp
index d81fc0f27e9..6ff080a767f 100644
--- a/src/mongo/db/query/query_planner_test_lib.cpp
+++ b/src/mongo/db/query/query_planner_test_lib.cpp
@@ -43,465 +43,520 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- using std::string;
+using std::string;
- bool filterMatches(const BSONObj& testFilter,
- const QuerySolutionNode* trueFilterNode) {
- if (NULL == trueFilterNode->filter) { return false; }
- StatusWithMatchExpression swme = MatchExpressionParser::parse(testFilter);
- if (!swme.isOK()) {
- return false;
- }
- const std::unique_ptr<MatchExpression> root(swme.getValue());
- CanonicalQuery::sortTree(root.get());
- std::unique_ptr<MatchExpression> trueFilter(trueFilterNode->filter->shallowClone());
- CanonicalQuery::sortTree(trueFilter.get());
- return trueFilter->equivalent(root.get());
+bool filterMatches(const BSONObj& testFilter, const QuerySolutionNode* trueFilterNode) {
+ if (NULL == trueFilterNode->filter) {
+ return false;
}
-
- void appendIntervalBound(BSONObjBuilder& bob, BSONElement& el) {
- if (el.type() == String) {
- std::string data = el.String();
- if (data == "MaxKey") {
- bob.appendMaxKey("");
- }
- else if (data == "MinKey") {
- bob.appendMinKey("");
- }
- else {
- bob.appendAs(el, "");
- }
- }
- else {
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(testFilter);
+ if (!swme.isOK()) {
+ return false;
+ }
+ const std::unique_ptr<MatchExpression> root(swme.getValue());
+ CanonicalQuery::sortTree(root.get());
+ std::unique_ptr<MatchExpression> trueFilter(trueFilterNode->filter->shallowClone());
+ CanonicalQuery::sortTree(trueFilter.get());
+ return trueFilter->equivalent(root.get());
+}
+
+void appendIntervalBound(BSONObjBuilder& bob, BSONElement& el) {
+ if (el.type() == String) {
+ std::string data = el.String();
+ if (data == "MaxKey") {
+ bob.appendMaxKey("");
+ } else if (data == "MinKey") {
+ bob.appendMinKey("");
+ } else {
bob.appendAs(el, "");
}
+ } else {
+ bob.appendAs(el, "");
}
+}
- bool intervalMatches(const BSONObj& testInt, const Interval trueInt) {
- BSONObjIterator it(testInt);
- if (!it.more()) { return false; }
- BSONElement low = it.next();
- if (!it.more()) { return false; }
- BSONElement high = it.next();
- if (!it.more()) { return false; }
- bool startInclusive = it.next().Bool();
- if (!it.more()) { return false; }
- bool endInclusive = it.next().Bool();
- if (it.more()) { return false; }
-
- BSONObjBuilder bob;
- appendIntervalBound(bob, low);
- appendIntervalBound(bob, high);
- Interval toCompare(bob.obj(), startInclusive, endInclusive);
-
- return Interval::INTERVAL_EQUALS == trueInt.compare(toCompare);
+bool intervalMatches(const BSONObj& testInt, const Interval trueInt) {
+ BSONObjIterator it(testInt);
+ if (!it.more()) {
+ return false;
+ }
+ BSONElement low = it.next();
+ if (!it.more()) {
+ return false;
+ }
+ BSONElement high = it.next();
+ if (!it.more()) {
+ return false;
+ }
+ bool startInclusive = it.next().Bool();
+ if (!it.more()) {
+ return false;
+ }
+ bool endInclusive = it.next().Bool();
+ if (it.more()) {
+ return false;
}
- /**
- * Returns whether the BSON representation of the index bounds in
- * 'testBounds' matches 'trueBounds'.
- *
- * 'testBounds' should be of the following format:
- * {<field 1>: <oil 1>, <field 2>: <oil 2>, ...}
- * Each ordered interval list (e.g. <oil 1>) is an array of arrays of
- * the format:
- * [[<low 1>,<high 1>,<lowInclusive 1>,<highInclusive 1>], ...]
- *
- * For example,
- * {a: [[1,2,true,false], [3,4,false,true]], b: [[-Infinity, Infinity]]}
- * Means that the index bounds on field 'a' consist of the two intervals
- * [1, 2) and (3, 4] and the index bounds on field 'b' are [-Infinity, Infinity].
- */
- bool boundsMatch(const BSONObj& testBounds, const IndexBounds trueBounds) {
- // Iterate over the fields on which we have index bounds.
- BSONObjIterator fieldIt(testBounds);
- int fieldItCount = 0;
- while (fieldIt.more()) {
- BSONElement arrEl = fieldIt.next();
- if (arrEl.type() != Array) {
+ BSONObjBuilder bob;
+ appendIntervalBound(bob, low);
+ appendIntervalBound(bob, high);
+ Interval toCompare(bob.obj(), startInclusive, endInclusive);
+
+ return Interval::INTERVAL_EQUALS == trueInt.compare(toCompare);
+}
+
+/**
+ * Returns whether the BSON representation of the index bounds in
+ * 'testBounds' matches 'trueBounds'.
+ *
+ * 'testBounds' should be of the following format:
+ * {<field 1>: <oil 1>, <field 2>: <oil 2>, ...}
+ * Each ordered interval list (e.g. <oil 1>) is an array of arrays of
+ * the format:
+ * [[<low 1>,<high 1>,<lowInclusive 1>,<highInclusive 1>], ...]
+ *
+ * For example,
+ * {a: [[1,2,true,false], [3,4,false,true]], b: [[-Infinity, Infinity]]}
+ * Means that the index bounds on field 'a' consist of the two intervals
+ * [1, 2) and (3, 4] and the index bounds on field 'b' are [-Infinity, Infinity].
+ */
+bool boundsMatch(const BSONObj& testBounds, const IndexBounds trueBounds) {
+ // Iterate over the fields on which we have index bounds.
+ BSONObjIterator fieldIt(testBounds);
+ int fieldItCount = 0;
+ while (fieldIt.more()) {
+ BSONElement arrEl = fieldIt.next();
+ if (arrEl.type() != Array) {
+ return false;
+ }
+ // Iterate over an ordered interval list for
+ // a particular field.
+ BSONObjIterator oilIt(arrEl.Obj());
+ int oilItCount = 0;
+ while (oilIt.more()) {
+ BSONElement intervalEl = oilIt.next();
+ if (intervalEl.type() != Array) {
return false;
}
- // Iterate over an ordered interval list for
- // a particular field.
- BSONObjIterator oilIt(arrEl.Obj());
- int oilItCount = 0;
- while (oilIt.more()) {
- BSONElement intervalEl = oilIt.next();
- if (intervalEl.type() != Array) {
- return false;
- }
- Interval trueInt = trueBounds.getInterval(fieldItCount, oilItCount);
- if (!intervalMatches(intervalEl.Obj(), trueInt)) {
- return false;
- }
- ++oilItCount;
+ Interval trueInt = trueBounds.getInterval(fieldItCount, oilItCount);
+ if (!intervalMatches(intervalEl.Obj(), trueInt)) {
+ return false;
}
- ++fieldItCount;
+ ++oilItCount;
}
-
- return true;
+ ++fieldItCount;
}
-} // namespace
+ return true;
+}
+
+} // namespace
namespace mongo {
- /**
- * Looks in the children stored in the 'nodes' field of 'testSoln'
- * to see if thet match the 'children' field of 'trueSoln'.
- *
- * This does an unordered comparison, i.e. childrenMatch returns
- * true as long as the set of subtrees in testSoln's 'nodes' matches
- * the set of subtrees in trueSoln's 'children' vector.
- */
- static bool childrenMatch(const BSONObj& testSoln, const QuerySolutionNode* trueSoln) {
- BSONElement children = testSoln["nodes"];
- if (children.eoo() || !children.isABSONObj()) { return false; }
-
- // The order of the children array in testSoln might not match
- // the order in trueSoln, so we have to check all combos with
- // these nested loops.
- BSONObjIterator i(children.Obj());
- while (i.more()) {
- BSONElement child = i.next();
- if (child.eoo() || !child.isABSONObj()) { return false; }
-
- // try to match against one of the QuerySolutionNode's children
- bool found = false;
- for (size_t j = 0; j < trueSoln->children.size(); ++j) {
- if (QueryPlannerTestLib::solutionMatches(child.Obj(), trueSoln->children[j])) {
- found = true;
- break;
- }
- }
+/**
+ * Looks in the children stored in the 'nodes' field of 'testSoln'
+ * to see if thet match the 'children' field of 'trueSoln'.
+ *
+ * This does an unordered comparison, i.e. childrenMatch returns
+ * true as long as the set of subtrees in testSoln's 'nodes' matches
+ * the set of subtrees in trueSoln's 'children' vector.
+ */
+static bool childrenMatch(const BSONObj& testSoln, const QuerySolutionNode* trueSoln) {
+ BSONElement children = testSoln["nodes"];
+ if (children.eoo() || !children.isABSONObj()) {
+ return false;
+ }
- // we couldn't match child
- if (!found) { return false; }
+ // The order of the children array in testSoln might not match
+ // the order in trueSoln, so we have to check all combos with
+ // these nested loops.
+ BSONObjIterator i(children.Obj());
+ while (i.more()) {
+ BSONElement child = i.next();
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- return true;
+ // try to match against one of the QuerySolutionNode's children
+ bool found = false;
+ for (size_t j = 0; j < trueSoln->children.size(); ++j) {
+ if (QueryPlannerTestLib::solutionMatches(child.Obj(), trueSoln->children[j])) {
+ found = true;
+ break;
+ }
+ }
+
+ // we couldn't match child
+ if (!found) {
+ return false;
+ }
}
- // static
- bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
- const QuerySolutionNode* trueSoln) {
- //
- // leaf nodes
- //
- if (STAGE_COLLSCAN == trueSoln->getType()) {
- const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(trueSoln);
- BSONElement el = testSoln["cscan"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj csObj = el.Obj();
-
- BSONElement dir = csObj["dir"];
- if (dir.eoo() || !dir.isNumber()) { return false; }
- if (dir.numberInt() != csn->direction) { return false; }
-
- BSONElement filter = csObj["filter"];
- if (filter.eoo()) {
- return true;
- }
- else if (filter.isNull()) {
- return NULL == csn->filter;
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- return filterMatches(filter.Obj(), trueSoln);
+ return true;
+}
+
+// static
+bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
+ const QuerySolutionNode* trueSoln) {
+ //
+ // leaf nodes
+ //
+ if (STAGE_COLLSCAN == trueSoln->getType()) {
+ const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(trueSoln);
+ BSONElement el = testSoln["cscan"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
}
- else if (STAGE_IXSCAN == trueSoln->getType()) {
- const IndexScanNode* ixn = static_cast<const IndexScanNode*>(trueSoln);
- BSONElement el = testSoln["ixscan"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj ixscanObj = el.Obj();
+ BSONObj csObj = el.Obj();
- BSONElement pattern = ixscanObj["pattern"];
- if (pattern.eoo() || !pattern.isABSONObj()) { return false; }
- if (pattern.Obj() != ixn->indexKeyPattern) { return false; }
+ BSONElement dir = csObj["dir"];
+ if (dir.eoo() || !dir.isNumber()) {
+ return false;
+ }
+ if (dir.numberInt() != csn->direction) {
+ return false;
+ }
- BSONElement bounds = ixscanObj["bounds"];
- if (!bounds.eoo()) {
- if (!bounds.isABSONObj()) {
- return false;
- }
- else if (!boundsMatch(bounds.Obj(), ixn->bounds)) {
- return false;
- }
- }
+ BSONElement filter = csObj["filter"];
+ if (filter.eoo()) {
+ return true;
+ } else if (filter.isNull()) {
+ return NULL == csn->filter;
+ } else if (!filter.isABSONObj()) {
+ return false;
+ }
+ return filterMatches(filter.Obj(), trueSoln);
+ } else if (STAGE_IXSCAN == trueSoln->getType()) {
+ const IndexScanNode* ixn = static_cast<const IndexScanNode*>(trueSoln);
+ BSONElement el = testSoln["ixscan"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj ixscanObj = el.Obj();
- BSONElement dir = ixscanObj["dir"];
- if (!dir.eoo() && NumberInt == dir.type()) {
- if (dir.numberInt() != ixn->direction) {
- return false;
- }
- }
+ BSONElement pattern = ixscanObj["pattern"];
+ if (pattern.eoo() || !pattern.isABSONObj()) {
+ return false;
+ }
+ if (pattern.Obj() != ixn->indexKeyPattern) {
+ return false;
+ }
- BSONElement filter = ixscanObj["filter"];
- if (filter.eoo()) {
- return true;
- }
- else if (filter.isNull()) {
- return NULL == ixn->filter;
+ BSONElement bounds = ixscanObj["bounds"];
+ if (!bounds.eoo()) {
+ if (!bounds.isABSONObj()) {
+ return false;
+ } else if (!boundsMatch(bounds.Obj(), ixn->bounds)) {
+ return false;
}
- else if (!filter.isABSONObj()) {
+ }
+
+ BSONElement dir = ixscanObj["dir"];
+ if (!dir.eoo() && NumberInt == dir.type()) {
+ if (dir.numberInt() != ixn->direction) {
return false;
}
- return filterMatches(filter.Obj(), trueSoln);
- }
- else if (STAGE_GEO_NEAR_2D == trueSoln->getType()) {
- const GeoNear2DNode* node = static_cast<const GeoNear2DNode*>(trueSoln);
- BSONElement el = testSoln["geoNear2d"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj geoObj = el.Obj();
- return geoObj == node->indexKeyPattern;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == trueSoln->getType()) {
- const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(trueSoln);
- BSONElement el = testSoln["geoNear2dsphere"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj geoObj = el.Obj();
- return geoObj == node->indexKeyPattern;
- }
- else if (STAGE_TEXT == trueSoln->getType()) {
- // {text: {search: "somestr", language: "something", filter: {blah: 1}}}
- const TextNode* node = static_cast<const TextNode*>(trueSoln);
- BSONElement el = testSoln["text"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj textObj = el.Obj();
-
- BSONElement searchElt = textObj["search"];
- if (!searchElt.eoo()) {
- if (searchElt.String() != node->query) {
- return false;
- }
+ }
+
+ BSONElement filter = ixscanObj["filter"];
+ if (filter.eoo()) {
+ return true;
+ } else if (filter.isNull()) {
+ return NULL == ixn->filter;
+ } else if (!filter.isABSONObj()) {
+ return false;
+ }
+ return filterMatches(filter.Obj(), trueSoln);
+ } else if (STAGE_GEO_NEAR_2D == trueSoln->getType()) {
+ const GeoNear2DNode* node = static_cast<const GeoNear2DNode*>(trueSoln);
+ BSONElement el = testSoln["geoNear2d"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj geoObj = el.Obj();
+ return geoObj == node->indexKeyPattern;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == trueSoln->getType()) {
+ const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(trueSoln);
+ BSONElement el = testSoln["geoNear2dsphere"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj geoObj = el.Obj();
+ return geoObj == node->indexKeyPattern;
+ } else if (STAGE_TEXT == trueSoln->getType()) {
+ // {text: {search: "somestr", language: "something", filter: {blah: 1}}}
+ const TextNode* node = static_cast<const TextNode*>(trueSoln);
+ BSONElement el = testSoln["text"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj textObj = el.Obj();
+
+ BSONElement searchElt = textObj["search"];
+ if (!searchElt.eoo()) {
+ if (searchElt.String() != node->query) {
+ return false;
}
+ }
- BSONElement languageElt = textObj["language"];
- if (!languageElt.eoo()) {
- if (languageElt.String() != node->language) {
- return false;
- }
+ BSONElement languageElt = textObj["language"];
+ if (!languageElt.eoo()) {
+ if (languageElt.String() != node->language) {
+ return false;
}
+ }
- BSONElement caseSensitiveElt = textObj["caseSensitive"];
- if (!caseSensitiveElt.eoo()) {
- if (caseSensitiveElt.trueValue() != node->caseSensitive) {
- return false;
- }
+ BSONElement caseSensitiveElt = textObj["caseSensitive"];
+ if (!caseSensitiveElt.eoo()) {
+ if (caseSensitiveElt.trueValue() != node->caseSensitive) {
+ return false;
}
+ }
- BSONElement indexPrefix = textObj["prefix"];
- if (!indexPrefix.eoo()) {
- if (!indexPrefix.isABSONObj()) {
- return false;
- }
+ BSONElement indexPrefix = textObj["prefix"];
+ if (!indexPrefix.eoo()) {
+ if (!indexPrefix.isABSONObj()) {
+ return false;
+ }
- if (0 != indexPrefix.Obj().woCompare(node->indexPrefix)) {
- return false;
- }
+ if (0 != indexPrefix.Obj().woCompare(node->indexPrefix)) {
+ return false;
}
+ }
- BSONElement filter = textObj["filter"];
- if (!filter.eoo()) {
- if (filter.isNull()) {
- if (NULL != node->filter) { return false; }
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- else if (!filterMatches(filter.Obj(), trueSoln)) {
+ BSONElement filter = textObj["filter"];
+ if (!filter.eoo()) {
+ if (filter.isNull()) {
+ if (NULL != node->filter) {
return false;
}
+ } else if (!filter.isABSONObj()) {
+ return false;
+ } else if (!filterMatches(filter.Obj(), trueSoln)) {
+ return false;
}
-
- return true;
}
- //
- // internal nodes
- //
- if (STAGE_FETCH == trueSoln->getType()) {
- const FetchNode* fn = static_cast<const FetchNode*>(trueSoln);
+ return true;
+ }
- BSONElement el = testSoln["fetch"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj fetchObj = el.Obj();
+ //
+ // internal nodes
+ //
+ if (STAGE_FETCH == trueSoln->getType()) {
+ const FetchNode* fn = static_cast<const FetchNode*>(trueSoln);
- BSONElement filter = fetchObj["filter"];
- if (!filter.eoo()) {
- if (filter.isNull()) {
- if (NULL != fn->filter) { return false; }
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- else if (!filterMatches(filter.Obj(), trueSoln)) {
- return false;
- }
- }
+ BSONElement el = testSoln["fetch"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj fetchObj = el.Obj();
- BSONElement child = fetchObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
- return solutionMatches(child.Obj(), fn->children[0]);
- }
- else if (STAGE_OR == trueSoln->getType()) {
- const OrNode * orn = static_cast<const OrNode*>(trueSoln);
- BSONElement el = testSoln["or"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj orObj = el.Obj();
- return childrenMatch(orObj, orn);
- }
- else if (STAGE_AND_HASH == trueSoln->getType()) {
- const AndHashNode* ahn = static_cast<const AndHashNode*>(trueSoln);
- BSONElement el = testSoln["andHash"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj andHashObj = el.Obj();
-
- BSONElement filter = andHashObj["filter"];
- if (!filter.eoo()) {
- if (filter.isNull()) {
- if (NULL != ahn->filter) { return false; }
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- else if (!filterMatches(filter.Obj(), trueSoln)) {
+ BSONElement filter = fetchObj["filter"];
+ if (!filter.eoo()) {
+ if (filter.isNull()) {
+ if (NULL != fn->filter) {
return false;
}
+ } else if (!filter.isABSONObj()) {
+ return false;
+ } else if (!filterMatches(filter.Obj(), trueSoln)) {
+ return false;
}
+ }
- return childrenMatch(andHashObj, ahn);
+ BSONElement child = fetchObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
+ }
+ return solutionMatches(child.Obj(), fn->children[0]);
+ } else if (STAGE_OR == trueSoln->getType()) {
+ const OrNode* orn = static_cast<const OrNode*>(trueSoln);
+ BSONElement el = testSoln["or"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj orObj = el.Obj();
+ return childrenMatch(orObj, orn);
+ } else if (STAGE_AND_HASH == trueSoln->getType()) {
+ const AndHashNode* ahn = static_cast<const AndHashNode*>(trueSoln);
+ BSONElement el = testSoln["andHash"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
}
- else if (STAGE_AND_SORTED == trueSoln->getType()) {
- const AndSortedNode* asn = static_cast<const AndSortedNode*>(trueSoln);
- BSONElement el = testSoln["andSorted"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj andSortedObj = el.Obj();
+ BSONObj andHashObj = el.Obj();
- BSONElement filter = andSortedObj["filter"];
- if (!filter.eoo()) {
- if (filter.isNull()) {
- if (NULL != asn->filter) { return false; }
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- else if (!filterMatches(filter.Obj(), trueSoln)) {
+ BSONElement filter = andHashObj["filter"];
+ if (!filter.eoo()) {
+ if (filter.isNull()) {
+ if (NULL != ahn->filter) {
return false;
}
+ } else if (!filter.isABSONObj()) {
+ return false;
+ } else if (!filterMatches(filter.Obj(), trueSoln)) {
+ return false;
}
-
- return childrenMatch(andSortedObj, asn);
}
- else if (STAGE_PROJECTION == trueSoln->getType()) {
- const ProjectionNode* pn = static_cast<const ProjectionNode*>(trueSoln);
- BSONElement el = testSoln["proj"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj projObj = el.Obj();
+ return childrenMatch(andHashObj, ahn);
+ } else if (STAGE_AND_SORTED == trueSoln->getType()) {
+ const AndSortedNode* asn = static_cast<const AndSortedNode*>(trueSoln);
+ BSONElement el = testSoln["andSorted"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj andSortedObj = el.Obj();
- BSONElement projType = projObj["type"];
- if (!projType.eoo()) {
- string projTypeStr = projType.str();
- if (!((pn->projType == ProjectionNode::DEFAULT && projTypeStr == "default") ||
- (pn->projType == ProjectionNode::SIMPLE_DOC && projTypeStr == "simple") ||
- (pn->projType == ProjectionNode::COVERED_ONE_INDEX &&
- projTypeStr == "coveredIndex"))) {
+ BSONElement filter = andSortedObj["filter"];
+ if (!filter.eoo()) {
+ if (filter.isNull()) {
+ if (NULL != asn->filter) {
return false;
}
+ } else if (!filter.isABSONObj()) {
+ return false;
+ } else if (!filterMatches(filter.Obj(), trueSoln)) {
+ return false;
}
+ }
- BSONElement spec = projObj["spec"];
- if (spec.eoo() || !spec.isABSONObj()) { return false; }
- BSONElement child = projObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ return childrenMatch(andSortedObj, asn);
+ } else if (STAGE_PROJECTION == trueSoln->getType()) {
+ const ProjectionNode* pn = static_cast<const ProjectionNode*>(trueSoln);
- return (spec.Obj() == pn->projection)
- && solutionMatches(child.Obj(), pn->children[0]);
+ BSONElement el = testSoln["proj"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj projObj = el.Obj();
+
+ BSONElement projType = projObj["type"];
+ if (!projType.eoo()) {
+ string projTypeStr = projType.str();
+ if (!((pn->projType == ProjectionNode::DEFAULT && projTypeStr == "default") ||
+ (pn->projType == ProjectionNode::SIMPLE_DOC && projTypeStr == "simple") ||
+ (pn->projType == ProjectionNode::COVERED_ONE_INDEX &&
+ projTypeStr == "coveredIndex"))) {
+ return false;
+ }
}
- else if (STAGE_SORT == trueSoln->getType()) {
- const SortNode* sn = static_cast<const SortNode*>(trueSoln);
- BSONElement el = testSoln["sort"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj sortObj = el.Obj();
- BSONElement patternEl = sortObj["pattern"];
- if (patternEl.eoo() || !patternEl.isABSONObj()) { return false; }
- BSONElement limitEl = sortObj["limit"];
- if (!limitEl.isNumber()) { return false; }
- BSONElement child = sortObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ BSONElement spec = projObj["spec"];
+ if (spec.eoo() || !spec.isABSONObj()) {
+ return false;
+ }
+ BSONElement child = projObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
+ }
- size_t expectedLimit = limitEl.numberInt();
- return (patternEl.Obj() == sn->pattern)
- && (expectedLimit == sn->limit)
- && solutionMatches(child.Obj(), sn->children[0]);
+ return (spec.Obj() == pn->projection) && solutionMatches(child.Obj(), pn->children[0]);
+ } else if (STAGE_SORT == trueSoln->getType()) {
+ const SortNode* sn = static_cast<const SortNode*>(trueSoln);
+ BSONElement el = testSoln["sort"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj sortObj = el.Obj();
+
+ BSONElement patternEl = sortObj["pattern"];
+ if (patternEl.eoo() || !patternEl.isABSONObj()) {
+ return false;
}
- else if (STAGE_SORT_MERGE == trueSoln->getType()) {
- const MergeSortNode* msn = static_cast<const MergeSortNode*>(trueSoln);
- BSONElement el = testSoln["mergeSort"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj mergeSortObj = el.Obj();
- return childrenMatch(mergeSortObj, msn);
+ BSONElement limitEl = sortObj["limit"];
+ if (!limitEl.isNumber()) {
+ return false;
+ }
+ BSONElement child = sortObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- else if (STAGE_SKIP == trueSoln->getType()) {
- const SkipNode* sn = static_cast<const SkipNode*>(trueSoln);
- BSONElement el = testSoln["skip"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj sortObj = el.Obj();
- BSONElement skipEl = sortObj["n"];
- if (!skipEl.isNumber()) { return false; }
- BSONElement child = sortObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ size_t expectedLimit = limitEl.numberInt();
+ return (patternEl.Obj() == sn->pattern) && (expectedLimit == sn->limit) &&
+ solutionMatches(child.Obj(), sn->children[0]);
+ } else if (STAGE_SORT_MERGE == trueSoln->getType()) {
+ const MergeSortNode* msn = static_cast<const MergeSortNode*>(trueSoln);
+ BSONElement el = testSoln["mergeSort"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj mergeSortObj = el.Obj();
+ return childrenMatch(mergeSortObj, msn);
+ } else if (STAGE_SKIP == trueSoln->getType()) {
+ const SkipNode* sn = static_cast<const SkipNode*>(trueSoln);
+ BSONElement el = testSoln["skip"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj sortObj = el.Obj();
- return (skipEl.numberInt() == sn->skip)
- && solutionMatches(child.Obj(), sn->children[0]);
+ BSONElement skipEl = sortObj["n"];
+ if (!skipEl.isNumber()) {
+ return false;
+ }
+ BSONElement child = sortObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- else if (STAGE_LIMIT == trueSoln->getType()) {
- const LimitNode* ln = static_cast<const LimitNode*>(trueSoln);
- BSONElement el = testSoln["limit"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj sortObj = el.Obj();
- BSONElement limitEl = sortObj["n"];
- if (!limitEl.isNumber()) { return false; }
- BSONElement child = sortObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ return (skipEl.numberInt() == sn->skip) && solutionMatches(child.Obj(), sn->children[0]);
+ } else if (STAGE_LIMIT == trueSoln->getType()) {
+ const LimitNode* ln = static_cast<const LimitNode*>(trueSoln);
+ BSONElement el = testSoln["limit"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj sortObj = el.Obj();
- return (limitEl.numberInt() == ln->limit)
- && solutionMatches(child.Obj(), ln->children[0]);
+ BSONElement limitEl = sortObj["n"];
+ if (!limitEl.isNumber()) {
+ return false;
+ }
+ BSONElement child = sortObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- else if (STAGE_KEEP_MUTATIONS == trueSoln->getType()) {
- const KeepMutationsNode* kn = static_cast<const KeepMutationsNode*>(trueSoln);
- BSONElement el = testSoln["keep"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj keepObj = el.Obj();
+ return (limitEl.numberInt() == ln->limit) && solutionMatches(child.Obj(), ln->children[0]);
+ } else if (STAGE_KEEP_MUTATIONS == trueSoln->getType()) {
+ const KeepMutationsNode* kn = static_cast<const KeepMutationsNode*>(trueSoln);
- // Doesn't have any parameters really.
- BSONElement child = keepObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ BSONElement el = testSoln["keep"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj keepObj = el.Obj();
- return solutionMatches(child.Obj(), kn->children[0]);
+ // Doesn't have any parameters really.
+ BSONElement child = keepObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- else if (STAGE_SHARDING_FILTER == trueSoln->getType()) {
- const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(trueSoln);
- BSONElement el = testSoln["sharding_filter"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj keepObj = el.Obj();
+ return solutionMatches(child.Obj(), kn->children[0]);
+ } else if (STAGE_SHARDING_FILTER == trueSoln->getType()) {
+ const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(trueSoln);
- BSONElement child = keepObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ BSONElement el = testSoln["sharding_filter"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj keepObj = el.Obj();
- return solutionMatches(child.Obj(), fn->children[0]);
+ BSONElement child = keepObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- return false;
+ return solutionMatches(child.Obj(), fn->children[0]);
}
+ return false;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_test_lib.h b/src/mongo/db/query/query_planner_test_lib.h
index f1d6e267512..0a1931d1828 100644
--- a/src/mongo/db/query/query_planner_test_lib.h
+++ b/src/mongo/db/query/query_planner_test_lib.h
@@ -41,20 +41,20 @@
namespace mongo {
- class QueryPlannerTestLib {
- public:
- /**
- * @param testSoln -- a BSON representation of a query solution
- * @param trueSoln -- the root node of a query solution tree
- *
- * Returns true if the BSON representation matches the actual
- * tree, otherwise returns false.
- */
- static bool solutionMatches(const BSONObj& testSoln, const QuerySolutionNode* trueSoln);
+class QueryPlannerTestLib {
+public:
+ /**
+ * @param testSoln -- a BSON representation of a query solution
+ * @param trueSoln -- the root node of a query solution tree
+ *
+ * Returns true if the BSON representation matches the actual
+ * tree, otherwise returns false.
+ */
+ static bool solutionMatches(const BSONObj& testSoln, const QuerySolutionNode* trueSoln);
- static bool solutionMatches(const std::string& testSoln, const QuerySolutionNode* trueSoln) {
- return solutionMatches(fromjson(testSoln), trueSoln);
- }
- };
+ static bool solutionMatches(const std::string& testSoln, const QuerySolutionNode* trueSoln) {
+ return solutionMatches(fromjson(testSoln), trueSoln);
+ }
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_text_test.cpp b/src/mongo/db/query/query_planner_text_test.cpp
index c1cbc292d94..4512536768b 100644
--- a/src/mongo/db/query/query_planner_text_test.cpp
+++ b/src/mongo/db/query/query_planner_text_test.cpp
@@ -35,309 +35,371 @@
namespace {
- using namespace mongo;
-
- //
- // Text
- // Creating an FTS index {a:1, b:"text", c:1} actually
- // creates an index with spec {a:1, _fts: "text", _ftsx: 1, c:1}.
- // So, the latter is what we pass in to the planner.
- //
- // PS. You can also do {a:1, b:"text", d:"text", c:1} and it will create an index with the same
- // key pattern.
- //
-
- // Basic test that it works.
- TEST_F(QueryPlannerTest, SimpleText) {
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$text: {$search: 'blah'}}"));
-
- assertNumSolutions(1);
- assertSolutionExists("{text: {search: 'blah'}}");
- }
-
- // If you create an index {a:1, b: "text"} you can't use it for queries on just 'a'.
- TEST_F(QueryPlannerTest, CantUseTextUnlessHaveTextPred) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{a:1}"));
-
- // No table scans allowed so there is no solution.
- assertNumSolutions(0);
- }
-
- // But if you create an index {a:1, b:"text"} you can use it if it has a pred on 'a'
- // and a text query.
- TEST_F(QueryPlannerTest, HaveOKPrefixOnTextIndex) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
-
- runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
- assertSolutionExists("{text: {prefix: {a:1}, search: 'blah'}}}}");
-
- // TODO: Do we want to $or a collection scan with a text search?
- // runQuery(fromjson("{$or: [{b:1}, {a:1, $text: {$search: 'blah'}}]}"));
- // assertNumSolutions(1);
-
- runQuery(fromjson("{$or: [{_id:1}, {a:1, $text: {$search: 'blah'}}]}"));
- assertNumSolutions(1);
- }
-
- // But the prefixes must be points.
- TEST_F(QueryPlannerTest, HaveBadPrefixOnTextIndex) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runInvalidQuery(fromjson("{a:{$gt: 1}, $text:{$search: 'blah'}}"));
-
- runInvalidQuery(fromjson("{$text: {$search: 'blah'}}"));
-
- runInvalidQuery(fromjson("{$or: [{a:1}, {$text: {$search: 'blah'}}]}"));
- }
-
- // There can be more than one prefix, but they all require points.
- TEST_F(QueryPlannerTest, ManyPrefixTextIndex) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "b" << 1 << "_fts" << "text" << "_ftsx" << 1));
-
- // Both points.
- runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
- assertSolutionExists("{text: {prefix: {a:1, b:1}, search: 'blah'}}");
- assertNumSolutions(1);
-
- // Missing a.
- runInvalidQuery(fromjson("{b:1, $text:{$search: 'blah'}}"));
-
- // Missing b.
- runInvalidQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
-
- // a is not a point
- runInvalidQuery(fromjson("{a:{$gt: 1}, b:1, $text:{$search: 'blah'}}"));
-
- // b is not a point
- runInvalidQuery(fromjson("{a:1, b:{$gt: 1}, $text:{$search: 'blah'}}"));
- }
-
- // And, suffixes. They're optional and don't need to be points.
- TEST_F(QueryPlannerTest, SuffixOptional) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1 << "b" << 1));
-
- runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
- assertSolutionExists("{text: {prefix: {a:1}, search: 'blah'}}}}");
-
- runQuery(fromjson("{a:1, b:{$gt: 7}, $text:{$search: 'blah'}}"));
- assertSolutionExists("{text: {prefix: {a:1}, filter: {b: {$gt: 7}}, search: 'blah'}}}}");
- assertNumSolutions(1);
- }
-
- TEST_F(QueryPlannerTest, RemoveFromSubtree) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1 << "b" << 1));
-
- runQuery(fromjson("{a:1, $or: [{a:1}, {b:7}], $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
-
- assertSolutionExists("{fetch: {filter: {$or:[{a:1},{b:7}]},"
- "node: {text: {prefix: {a:1}, search: 'blah'}}}}");
- }
-
- // Text is quite often multikey. None of the prefixes can be arrays, and suffixes are indexed
- // as-is, so we should compound even if it's multikey.
- TEST_F(QueryPlannerTest, CompoundPrefixEvenIfMultikey) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "b" << 1 << "_fts" << "text" << "_ftsx" << 1), true);
-
- // Both points.
- runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
- assertSolutionExists("{text: {prefix: {a:1, b:1}, search: 'blah'}}");
- }
-
- TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafPrefix) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1 << "b" << 1));
-
- // 'a' is not an EQ so it doesn't compound w/the text pred. We also shouldn't use the text
- // index to satisfy it w/o the text query.
- runInvalidQuery(fromjson("{a:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
- }
-
- TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafSuffixNoPrefix) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1 << "b" << 1));
-
- runQuery(fromjson("{b:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
- }
-
- TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndex) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$and: [{a: 3}, {$text: {$search: 'foo'}}], a: 3}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{text: {prefix: {a:3}, search: 'foo'}}");
- }
-
- // SERVER-15639: Test that predicates on index prefix fields which are not assigned to the index
- // prefix are correctly included in the solution node filter.
- TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndexAndMultiplePredsOnIndexPrefix) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$and: [{a: 1}, {a: 2}, {$text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{text: {prefix: {a: 1}, search: 'foo', filter: {a: 2}}}");
- }
-
- // SERVER-13039: Test that we don't generate invalid solutions when the TEXT node
- // is buried beneath a logical node.
- TEST_F(QueryPlannerTest, TextInsideOrBasic) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{a: 0, $or: [{_id: 1}, {$text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:0}, node: {or: {nodes: ["
- "{text: {search: 'foo'}}, "
- "{ixscan: {filter: null, pattern: {_id: 1}}}]}}}}");
- }
-
- // SERVER-13039
- TEST_F(QueryPlannerTest, TextInsideOrWithAnotherOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$and: [{$or: [{a: 3}, {a: 4}]}, "
- "{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {$or: [{a: 3}, {a: 4}]}, node: "
- "{or: {nodes: ["
- "{text: {search: 'foo'}}, "
- "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}");
- }
-
- // SERVER-13039
- TEST_F(QueryPlannerTest, TextInsideOrOfAnd) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
- "{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}, bounds: "
- "{a: [[2,Infinity,false,true]]}}}, "
- "{fetch: {filter: {a:{$gt:3}}, node: "
- "{text: {search: 'foo'}}}}]}}}}");
- }
-
- // SERVER-13039
- TEST_F(QueryPlannerTest, TextInsideAndOrAnd) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{a: 1, $or: [{a:2}, {b:2}, "
- "{a: 1, $text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:1}, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}}, "
- "{fetch: {filter: {a:1}, node: {text: {search: 'foo'}}}}, "
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- // SERVER-13039
- TEST_F(QueryPlannerTest, TextInsideAndOrAndOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
- "{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
- "{a: 6}]}], "
- "a: 5}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:5}, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1}}}, "
- "{fetch: {filter: {a:{$gt:3}}, node: {or: {nodes: ["
- "{text: {search: 'foo'}}, "
- "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}]}}}}");
- }
-
- // If only one branch of the $or can be indexed, then no indexed
- // solutions are generated, even if one branch is $text.
- TEST_F(QueryPlannerTest, TextInsideOrOneBranchNotIndexed) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{a: 1, $or: [{b: 2}, {$text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(0);
- }
-
- // If the unindexable $or is not the one containing the $text predicate,
- // then we should still be able to generate an indexed solution.
- TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$and: [{$or: [{a: 1}, {b: 1}]}, "
- "{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {$or:[{a:1},{b:1}]}, node: {or: {nodes: ["
- "{text: {search: 'foo'}}, "
- "{ixscan: {filter: null, pattern: {a:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
-
- // Mandatory text index is used, and geo predicate becomes a filter.
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {text: {search: 'foo'}}}}");
- }
-
- // SERVER-13960: $text beneath $or with exact predicates.
- TEST_F(QueryPlannerTest, OrTextExact) {
- addIndex(BSON("pre" << 1 << "_fts" << "text" << "_ftsx" << 1));
- addIndex(BSON("other" << 1));
- runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: 2}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{text: {search: 'dave', prefix: {pre: 3}}},"
- "{ixscan: {filter: null, pattern: {other: 1}}}]}}}}");
- }
-
- // SERVER-13960: $text beneath $or with an inexact covered predicate.
- TEST_F(QueryPlannerTest, OrTextInexactCovered) {
- addIndex(BSON("pre" << 1 << "_fts" << "text" << "_ftsx" << 1));
- addIndex(BSON("other" << 1));
- runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: /bar/}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{text: {search: 'dave', prefix: {pre: 3}}},"
- "{ixscan: {filter: {$or: [{other: /bar/}]}, "
- "pattern: {other: 1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, TextCaseSensitive) {
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$text: {$search: 'blah', $caseSensitive: true}}"));
-
- assertNumSolutions(1);
- assertSolutionExists("{text: {search: 'blah', caseSensitive: true}}");
- }
+using namespace mongo;
+
+//
+// Text
+// Creating an FTS index {a:1, b:"text", c:1} actually
+// creates an index with spec {a:1, _fts: "text", _ftsx: 1, c:1}.
+// So, the latter is what we pass in to the planner.
+//
+// PS. You can also do {a:1, b:"text", d:"text", c:1} and it will create an index with the same
+// key pattern.
+//
+
+// Basic test that it works.
+TEST_F(QueryPlannerTest, SimpleText) {
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{$text: {$search: 'blah'}}"));
+
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {search: 'blah'}}");
+}
+
+// If you create an index {a:1, b: "text"} you can't use it for queries on just 'a'.
+TEST_F(QueryPlannerTest, CantUseTextUnlessHaveTextPred) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{a:1}"));
+
+ // No table scans allowed so there is no solution.
+ assertNumSolutions(0);
+}
+
+// But if you create an index {a:1, b:"text"} you can use it if it has a pred on 'a'
+// and a text query.
+TEST_F(QueryPlannerTest, HaveOKPrefixOnTextIndex) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+
+ runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {prefix: {a:1}, search: 'blah'}}}}");
+
+ // TODO: Do we want to $or a collection scan with a text search?
+ // runQuery(fromjson("{$or: [{b:1}, {a:1, $text: {$search: 'blah'}}]}"));
+ // assertNumSolutions(1);
+
+ runQuery(fromjson("{$or: [{_id:1}, {a:1, $text: {$search: 'blah'}}]}"));
+ assertNumSolutions(1);
+}
+
+// But the prefixes must be points.
+TEST_F(QueryPlannerTest, HaveBadPrefixOnTextIndex) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runInvalidQuery(fromjson("{a:{$gt: 1}, $text:{$search: 'blah'}}"));
+
+ runInvalidQuery(fromjson("{$text: {$search: 'blah'}}"));
+
+ runInvalidQuery(fromjson("{$or: [{a:1}, {$text: {$search: 'blah'}}]}"));
+}
+
+// There can be more than one prefix, but they all require points.
+TEST_F(QueryPlannerTest, ManyPrefixTextIndex) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+
+ // Both points.
+ runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
+ assertSolutionExists("{text: {prefix: {a:1, b:1}, search: 'blah'}}");
+ assertNumSolutions(1);
+
+ // Missing a.
+ runInvalidQuery(fromjson("{b:1, $text:{$search: 'blah'}}"));
+
+ // Missing b.
+ runInvalidQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
+
+ // a is not a point
+ runInvalidQuery(fromjson("{a:{$gt: 1}, b:1, $text:{$search: 'blah'}}"));
+
+ // b is not a point
+ runInvalidQuery(fromjson("{a:1, b:{$gt: 1}, $text:{$search: 'blah'}}"));
+}
+
+// And, suffixes. They're optional and don't need to be points.
+TEST_F(QueryPlannerTest, SuffixOptional) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1 << "b" << 1));
+
+ runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {prefix: {a:1}, search: 'blah'}}}}");
+
+ runQuery(fromjson("{a:1, b:{$gt: 7}, $text:{$search: 'blah'}}"));
+ assertSolutionExists("{text: {prefix: {a:1}, filter: {b: {$gt: 7}}, search: 'blah'}}}}");
+ assertNumSolutions(1);
+}
+
+TEST_F(QueryPlannerTest, RemoveFromSubtree) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1 << "b" << 1));
+
+ runQuery(fromjson("{a:1, $or: [{a:1}, {b:7}], $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+
+ assertSolutionExists(
+ "{fetch: {filter: {$or:[{a:1},{b:7}]},"
+ "node: {text: {prefix: {a:1}, search: 'blah'}}}}");
+}
+
+// Text is quite often multikey. None of the prefixes can be arrays, and suffixes are indexed
+// as-is, so we should compound even if it's multikey.
+TEST_F(QueryPlannerTest, CompoundPrefixEvenIfMultikey) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1),
+ true);
+
+ // Both points.
+ runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {prefix: {a:1, b:1}, search: 'blah'}}");
+}
+
+TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafPrefix) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1 << "b" << 1));
+
+ // 'a' is not an EQ so it doesn't compound w/the text pred. We also shouldn't use the text
+ // index to satisfy it w/o the text query.
+ runInvalidQuery(fromjson("{a:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
+}
+
+TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafSuffixNoPrefix) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1 << "b" << 1));
+
+ runQuery(fromjson("{b:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+}
+
+TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndex) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{$and: [{a: 3}, {$text: {$search: 'foo'}}], a: 3}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{text: {prefix: {a:3}, search: 'foo'}}");
+}
+
+// SERVER-15639: Test that predicates on index prefix fields which are not assigned to the index
+// prefix are correctly included in the solution node filter.
+TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndexAndMultiplePredsOnIndexPrefix) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{$and: [{a: 1}, {a: 2}, {$text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{text: {prefix: {a: 1}, search: 'foo', filter: {a: 2}}}");
+}
+
+// SERVER-13039: Test that we don't generate invalid solutions when the TEXT node
+// is buried beneath a logical node.
+TEST_F(QueryPlannerTest, TextInsideOrBasic) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{a: 0, $or: [{_id: 1}, {$text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:0}, node: {or: {nodes: ["
+ "{text: {search: 'foo'}}, "
+ "{ixscan: {filter: null, pattern: {_id: 1}}}]}}}}");
+}
+
+// SERVER-13039
+TEST_F(QueryPlannerTest, TextInsideOrWithAnotherOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$and: [{$or: [{a: 3}, {a: 4}]}, "
+ "{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {$or: [{a: 3}, {a: 4}]}, node: "
+ "{or: {nodes: ["
+ "{text: {search: 'foo'}}, "
+ "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}");
+}
+
+// SERVER-13039
+TEST_F(QueryPlannerTest, TextInsideOrOfAnd) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$or: [{a: {$gt: 1, $gt: 2}}, "
+ "{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}, bounds: "
+ "{a: [[2,Infinity,false,true]]}}}, "
+ "{fetch: {filter: {a:{$gt:3}}, node: "
+ "{text: {search: 'foo'}}}}]}}}}");
+}
+
+// SERVER-13039
+TEST_F(QueryPlannerTest, TextInsideAndOrAnd) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{a: 1, $or: [{a:2}, {b:2}, "
+ "{a: 1, $text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}}, "
+ "{fetch: {filter: {a:1}, node: {text: {search: 'foo'}}}}, "
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+// SERVER-13039
+TEST_F(QueryPlannerTest, TextInsideAndOrAndOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$or: [{a: {$gt: 1, $gt: 2}}, "
+ "{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
+ "{a: 6}]}], "
+ "a: 5}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:5}, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1}}}, "
+ "{fetch: {filter: {a:{$gt:3}}, node: {or: {nodes: ["
+ "{text: {search: 'foo'}}, "
+ "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}]}}}}");
+}
+
+// If only one branch of the $or can be indexed, then no indexed
+// solutions are generated, even if one branch is $text.
+TEST_F(QueryPlannerTest, TextInsideOrOneBranchNotIndexed) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{a: 1, $or: [{b: 2}, {$text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(0);
+}
+
+// If the unindexable $or is not the one containing the $text predicate,
+// then we should still be able to generate an indexed solution.
+TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$and: [{$or: [{a: 1}, {b: 1}]}, "
+ "{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {$or:[{a:1},{b:1}]}, node: {or: {nodes: ["
+ "{text: {search: 'foo'}}, "
+ "{ixscan: {filter: null, pattern: {a:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
+
+ // Mandatory text index is used, and geo predicate becomes a filter.
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {text: {search: 'foo'}}}}");
+}
+
+// SERVER-13960: $text beneath $or with exact predicates.
+TEST_F(QueryPlannerTest, OrTextExact) {
+ addIndex(BSON("pre" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ addIndex(BSON("other" << 1));
+ runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: 2}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{text: {search: 'dave', prefix: {pre: 3}}},"
+ "{ixscan: {filter: null, pattern: {other: 1}}}]}}}}");
+}
+
+// SERVER-13960: $text beneath $or with an inexact covered predicate.
+TEST_F(QueryPlannerTest, OrTextInexactCovered) {
+ addIndex(BSON("pre" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ addIndex(BSON("other" << 1));
+ runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: /bar/}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{text: {search: 'dave', prefix: {pre: 3}}},"
+ "{ixscan: {filter: {$or: [{other: /bar/}]}, "
+ "pattern: {other: 1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, TextCaseSensitive) {
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{$text: {$search: 'blah', $caseSensitive: true}}"));
+
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {search: 'blah', caseSensitive: true}}");
+}
} // namespace
diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp
index c6b2f34fcb8..487f34ebcf9 100644
--- a/src/mongo/db/query/query_settings.cpp
+++ b/src/mongo/db/query/query_settings.cpp
@@ -32,132 +32,137 @@
namespace mongo {
- using std::vector;
-
- //
- // HintOverride
- //
-
- AllowedIndices::AllowedIndices(const std::vector<BSONObj>& indexKeyPatterns) {
- for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin();
- i != indexKeyPatterns.end(); ++i) {
- const BSONObj& indexKeyPattern = *i;
- this->indexKeyPatterns.push_back(indexKeyPattern.getOwned());
- }
+using std::vector;
+
+//
+// HintOverride
+//
+
+AllowedIndices::AllowedIndices(const std::vector<BSONObj>& indexKeyPatterns) {
+ for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin();
+ i != indexKeyPatterns.end();
+ ++i) {
+ const BSONObj& indexKeyPattern = *i;
+ this->indexKeyPatterns.push_back(indexKeyPattern.getOwned());
}
-
- AllowedIndices::~AllowedIndices() { }
-
- //
- // AllowedIndexEntry
- //
-
- AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query, const BSONObj& sort,
- const BSONObj& projection,
- const std::vector<BSONObj>& indexKeyPatterns)
- : query(query.getOwned()),
- sort(sort.getOwned()),
- projection(projection.getOwned()) {
- for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin();
- i != indexKeyPatterns.end(); ++i) {
- const BSONObj& indexKeyPattern = *i;
- this->indexKeyPatterns.push_back(indexKeyPattern.getOwned());
- }
- }
-
- AllowedIndexEntry::~AllowedIndexEntry() { }
-
- AllowedIndexEntry* AllowedIndexEntry::clone() const {
- AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexKeyPatterns);
- return entry;
+}
+
+AllowedIndices::~AllowedIndices() {}
+
+//
+// AllowedIndexEntry
+//
+
+AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection,
+ const std::vector<BSONObj>& indexKeyPatterns)
+ : query(query.getOwned()), sort(sort.getOwned()), projection(projection.getOwned()) {
+ for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin();
+ i != indexKeyPatterns.end();
+ ++i) {
+ const BSONObj& indexKeyPattern = *i;
+ this->indexKeyPatterns.push_back(indexKeyPattern.getOwned());
}
+}
- //
- // QuerySettings
- //
-
- QuerySettings::QuerySettings() { }
-
- QuerySettings::~QuerySettings() {
- _clear();
- }
+AllowedIndexEntry::~AllowedIndexEntry() {}
- bool QuerySettings::getAllowedIndices(const PlanCacheKey& key,
- AllowedIndices** allowedIndicesOut) const {
- invariant(allowedIndicesOut);
+AllowedIndexEntry* AllowedIndexEntry::clone() const {
+ AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexKeyPatterns);
+ return entry;
+}
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- AllowedIndexEntryMap::const_iterator cacheIter = _allowedIndexEntryMap.find(key);
+//
+// QuerySettings
+//
- // Nothing to do if key does not exist in query settings.
- if (cacheIter == _allowedIndexEntryMap.end()) {
- *allowedIndicesOut = NULL;
- return false;
- }
+QuerySettings::QuerySettings() {}
- AllowedIndexEntry* entry = cacheIter->second;
+QuerySettings::~QuerySettings() {
+ _clear();
+}
- // Create a AllowedIndices from entry.
- *allowedIndicesOut = new AllowedIndices(entry->indexKeyPatterns);
+bool QuerySettings::getAllowedIndices(const PlanCacheKey& key,
+ AllowedIndices** allowedIndicesOut) const {
+ invariant(allowedIndicesOut);
- return true;
- }
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ AllowedIndexEntryMap::const_iterator cacheIter = _allowedIndexEntryMap.find(key);
- std::vector<AllowedIndexEntry*> QuerySettings::getAllAllowedIndices() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- vector<AllowedIndexEntry*> entries;
- for (AllowedIndexEntryMap::const_iterator i = _allowedIndexEntryMap.begin(); i != _allowedIndexEntryMap.end(); ++i) {
- AllowedIndexEntry* entry = i->second;
- entries.push_back(entry->clone());
- }
- return entries;
+ // Nothing to do if key does not exist in query settings.
+ if (cacheIter == _allowedIndexEntryMap.end()) {
+ *allowedIndicesOut = NULL;
+ return false;
}
- void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
- const PlanCacheKey& key,
- const std::vector<BSONObj>& indexes) {
- const LiteParsedQuery& lpq = canonicalQuery.getParsed();
- const BSONObj& query = lpq.getFilter();
- const BSONObj& sort = lpq.getSort();
- const BSONObj& projection = lpq.getProj();
- AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexes);
-
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
- // Replace existing entry.
- if (i != _allowedIndexEntryMap.end()) {
- AllowedIndexEntry* entry = i->second;
- delete entry;
- }
- _allowedIndexEntryMap[key] = entry;
- }
+ AllowedIndexEntry* entry = cacheIter->second;
- void QuerySettings::removeAllowedIndices(const PlanCacheKey& key) {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
+ // Create a AllowedIndices from entry.
+ *allowedIndicesOut = new AllowedIndices(entry->indexKeyPatterns);
- // Nothing to do if key does not exist in query settings.
- if (i == _allowedIndexEntryMap.end()) {
- return;
- }
+ return true;
+}
- // Free up resources and delete entry.
+std::vector<AllowedIndexEntry*> QuerySettings::getAllAllowedIndices() const {
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ vector<AllowedIndexEntry*> entries;
+ for (AllowedIndexEntryMap::const_iterator i = _allowedIndexEntryMap.begin();
+ i != _allowedIndexEntryMap.end();
+ ++i) {
+ AllowedIndexEntry* entry = i->second;
+ entries.push_back(entry->clone());
+ }
+ return entries;
+}
+
+void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
+ const PlanCacheKey& key,
+ const std::vector<BSONObj>& indexes) {
+ const LiteParsedQuery& lpq = canonicalQuery.getParsed();
+ const BSONObj& query = lpq.getFilter();
+ const BSONObj& sort = lpq.getSort();
+ const BSONObj& projection = lpq.getProj();
+ AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexes);
+
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
+ // Replace existing entry.
+ if (i != _allowedIndexEntryMap.end()) {
AllowedIndexEntry* entry = i->second;
- _allowedIndexEntryMap.erase(i);
delete entry;
}
+ _allowedIndexEntryMap[key] = entry;
+}
- void QuerySettings::clearAllowedIndices() {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- _clear();
+void QuerySettings::removeAllowedIndices(const PlanCacheKey& key) {
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
+
+ // Nothing to do if key does not exist in query settings.
+ if (i == _allowedIndexEntryMap.end()) {
+ return;
}
- void QuerySettings::_clear() {
- for (AllowedIndexEntryMap::const_iterator i = _allowedIndexEntryMap.begin(); i != _allowedIndexEntryMap.end(); ++i) {
- AllowedIndexEntry* entry = i->second;
- delete entry;
- }
- _allowedIndexEntryMap.clear();
+ // Free up resources and delete entry.
+ AllowedIndexEntry* entry = i->second;
+ _allowedIndexEntryMap.erase(i);
+ delete entry;
+}
+
+void QuerySettings::clearAllowedIndices() {
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ _clear();
+}
+
+void QuerySettings::_clear() {
+ for (AllowedIndexEntryMap::const_iterator i = _allowedIndexEntryMap.begin();
+ i != _allowedIndexEntryMap.end();
+ ++i) {
+ AllowedIndexEntry* entry = i->second;
+ delete entry;
}
+ _allowedIndexEntryMap.clear();
+}
} // namespace mongo
diff --git a/src/mongo/db/query/query_settings.h b/src/mongo/db/query/query_settings.h
index 29449167580..e1125320471 100644
--- a/src/mongo/db/query/query_settings.h
+++ b/src/mongo/db/query/query_settings.h
@@ -41,108 +41,111 @@
namespace mongo {
+/**
+ * Holds allowed indices.
+ */
+class AllowedIndices {
+private:
+ MONGO_DISALLOW_COPYING(AllowedIndices);
+
+public:
+ AllowedIndices(const std::vector<BSONObj>& indexKeyPatterns);
+ ~AllowedIndices();
+
+ // These are the index key patterns that
+ // we will use to override the indexes retrieved from
+ // the index catalog.
+ std::vector<BSONObj> indexKeyPatterns;
+};
+
+/**
+ * Value type for query settings.
+ * Holds:
+ * query shape (query, sort, projection)
+ * vector of index specs
+ */
+class AllowedIndexEntry {
+private:
+ MONGO_DISALLOW_COPYING(AllowedIndexEntry);
+
+public:
+ AllowedIndexEntry(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection,
+ const std::vector<BSONObj>& indexKeyPatterns);
+ ~AllowedIndexEntry();
+ AllowedIndexEntry* clone() const;
+
+ // _query, _sort and _projection collectively
+ // represent the query shape that we are storing hint overrides for.
+ BSONObj query;
+ BSONObj sort;
+ BSONObj projection;
+
+ // These are the index key patterns that
+ // we will use to override the indexes retrieved from
+ // the index catalog.
+ std::vector<BSONObj> indexKeyPatterns;
+};
+
+/**
+ * Holds the index filters in a collection.
+ */
+class QuerySettings {
+private:
+ MONGO_DISALLOW_COPYING(QuerySettings);
+
+public:
+ QuerySettings();
+
+ ~QuerySettings();
+
/**
- * Holds allowed indices.
+ * Returns true and fills out allowedIndicesOut if a hint is set in the query settings
+ * for the query.
+ * Returns false and sets allowedIndicesOut to NULL otherwise.
+ * Caller owns AllowedIndices.
*/
- class AllowedIndices {
- private:
- MONGO_DISALLOW_COPYING(AllowedIndices);
- public:
- AllowedIndices(const std::vector<BSONObj>& indexKeyPatterns);
- ~AllowedIndices();
-
- // These are the index key patterns that
- // we will use to override the indexes retrieved from
- // the index catalog.
- std::vector<BSONObj> indexKeyPatterns;
- };
+ bool getAllowedIndices(const PlanCacheKey& query, AllowedIndices** allowedIndicesOut) const;
/**
- * Value type for query settings.
- * Holds:
- * query shape (query, sort, projection)
- * vector of index specs
+ * Returns copies all overrides for the collection..
+ * Caller owns overrides in vector.
*/
- class AllowedIndexEntry {
- private:
- MONGO_DISALLOW_COPYING(AllowedIndexEntry);
- public:
- AllowedIndexEntry(const BSONObj& query, const BSONObj& sort,
- const BSONObj& projection,
- const std::vector<BSONObj>& indexKeyPatterns);
- ~AllowedIndexEntry();
- AllowedIndexEntry* clone() const;
-
- // _query, _sort and _projection collectively
- // represent the query shape that we are storing hint overrides for.
- BSONObj query;
- BSONObj sort;
- BSONObj projection;
-
- // These are the index key patterns that
- // we will use to override the indexes retrieved from
- // the index catalog.
- std::vector<BSONObj> indexKeyPatterns;
- };
+ std::vector<AllowedIndexEntry*> getAllAllowedIndices() const;
+
+ /**
+ * Adds or replaces entry in query settings.
+ * If existing entry is found for the same key,
+ * frees resources for existing entry before replacing.
+ */
+ void setAllowedIndices(const CanonicalQuery& canonicalQuery,
+ const PlanCacheKey& key,
+ const std::vector<BSONObj>& indexes);
+
+ /**
+ * Removes single entry from query settings. No effect if query shape is not found.
+ */
+ void removeAllowedIndices(const PlanCacheKey& canonicalQuery);
+
+ /**
+ * Clears all allowed indices from query settings.
+ */
+ void clearAllowedIndices();
+
+private:
+ /**
+ * Clears entries without acquiring mutex.
+ */
+ void _clear();
+
+ typedef unordered_map<PlanCacheKey, AllowedIndexEntry*> AllowedIndexEntryMap;
+ AllowedIndexEntryMap _allowedIndexEntryMap;
/**
- * Holds the index filters in a collection.
+ * Protects data in query settings.
*/
- class QuerySettings {
- private:
- MONGO_DISALLOW_COPYING(QuerySettings);
- public:
- QuerySettings();
-
- ~QuerySettings();
-
- /**
- * Returns true and fills out allowedIndicesOut if a hint is set in the query settings
- * for the query.
- * Returns false and sets allowedIndicesOut to NULL otherwise.
- * Caller owns AllowedIndices.
- */
- bool getAllowedIndices(const PlanCacheKey& query,
- AllowedIndices** allowedIndicesOut) const;
-
- /**
- * Returns copies all overrides for the collection..
- * Caller owns overrides in vector.
- */
- std::vector<AllowedIndexEntry*> getAllAllowedIndices() const;
-
- /**
- * Adds or replaces entry in query settings.
- * If existing entry is found for the same key,
- * frees resources for existing entry before replacing.
- */
- void setAllowedIndices(const CanonicalQuery& canonicalQuery,
- const PlanCacheKey& key,
- const std::vector<BSONObj>& indexes);
-
- /**
- * Removes single entry from query settings. No effect if query shape is not found.
- */
- void removeAllowedIndices(const PlanCacheKey& canonicalQuery);
-
- /**
- * Clears all allowed indices from query settings.
- */
- void clearAllowedIndices();
-
- private:
- /**
- * Clears entries without acquiring mutex.
- */
- void _clear();
-
- typedef unordered_map<PlanCacheKey, AllowedIndexEntry*> AllowedIndexEntryMap;
- AllowedIndexEntryMap _allowedIndexEntryMap;
-
- /**
- * Protects data in query settings.
- */
- mutable stdx::mutex _mutex;
- };
+ mutable stdx::mutex _mutex;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp
index 18e66ccc503..85b0cb7933c 100644
--- a/src/mongo/db/query/query_solution.cpp
+++ b/src/mongo/db/query/query_solution.cpp
@@ -35,840 +35,842 @@
namespace mongo {
- using std::set;
-
- string QuerySolutionNode::toString() const {
- mongoutils::str::stream ss;
- appendToString(&ss, 0);
- return ss;
- }
-
- // static
- void QuerySolutionNode::addIndent(mongoutils::str::stream* ss, int level) {
- for (int i = 0; i < level; ++i) {
- *ss << "---";
- }
- }
-
- void QuerySolutionNode::addCommon(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent + 1);
- *ss << "fetched = " << fetched() << '\n';
- addIndent(ss, indent + 1);
- *ss << "sortedByDiskLoc = " << sortedByDiskLoc() << '\n';
+using std::set;
+
+string QuerySolutionNode::toString() const {
+ mongoutils::str::stream ss;
+ appendToString(&ss, 0);
+ return ss;
+}
+
+// static
+void QuerySolutionNode::addIndent(mongoutils::str::stream* ss, int level) {
+ for (int i = 0; i < level; ++i) {
+ *ss << "---";
+ }
+}
+
+void QuerySolutionNode::addCommon(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent + 1);
+ *ss << "fetched = " << fetched() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "sortedByDiskLoc = " << sortedByDiskLoc() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "getSort = [";
+ for (BSONObjSet::const_iterator it = getSort().begin(); it != getSort().end(); it++) {
+ *ss << it->toString() << ", ";
+ }
+ *ss << "]" << '\n';
+}
+
+//
+// TextNode
+//
+
+void TextNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "TEXT\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "query = " << query << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "language = " << language << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "caseSensitive= " << caseSensitive << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "indexPrefix = " << indexPrefix.toString() << '\n';
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "getSort = [";
- for (BSONObjSet::const_iterator it = getSort().begin(); it != getSort().end(); it++) {
- *ss << it->toString() << ", ";
- }
- *ss << "]" << '\n';
+ *ss << " filter = " << filter->toString();
}
+ addCommon(ss, indent);
+}
- //
- // TextNode
- //
+QuerySolutionNode* TextNode::clone() const {
+ TextNode* copy = new TextNode();
+ cloneBaseData(copy);
- void TextNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "TEXT\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
- addIndent(ss, indent + 1);
- *ss << "query = " << query << '\n';
- addIndent(ss, indent + 1);
- *ss << "language = " << language << '\n';
- addIndent(ss, indent + 1);
- *ss << "caseSensitive= " << caseSensitive << '\n';
- addIndent(ss, indent + 1);
- *ss << "indexPrefix = " << indexPrefix.toString() << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString();
- }
- addCommon(ss, indent);
- }
-
- QuerySolutionNode* TextNode::clone() const {
- TextNode* copy = new TextNode();
- cloneBaseData(copy);
+ copy->_sort = this->_sort;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->query = this->query;
+ copy->language = this->language;
+ copy->caseSensitive = this->caseSensitive;
+ copy->indexPrefix = this->indexPrefix;
- copy->_sort = this->_sort;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->query = this->query;
- copy->language = this->language;
- copy->caseSensitive = this->caseSensitive;
- copy->indexPrefix = this->indexPrefix;
+ return copy;
+}
- return copy;
- }
-
- //
- // CollectionScanNode
- //
+//
+// CollectionScanNode
+//
- CollectionScanNode::CollectionScanNode() : tailable(false), direction(1), maxScan(0) { }
+CollectionScanNode::CollectionScanNode() : tailable(false), direction(1), maxScan(0) {}
- void CollectionScanNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "COLLSCAN\n";
+void CollectionScanNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "COLLSCAN\n";
+ addIndent(ss, indent + 1);
+ *ss << "ns = " << name << '\n';
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "ns = " << name << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << "filter = " << filter->toString();
- }
- addCommon(ss, indent);
+ *ss << "filter = " << filter->toString();
}
+ addCommon(ss, indent);
+}
- QuerySolutionNode* CollectionScanNode::clone() const {
- CollectionScanNode* copy = new CollectionScanNode();
- cloneBaseData(copy);
+QuerySolutionNode* CollectionScanNode::clone() const {
+ CollectionScanNode* copy = new CollectionScanNode();
+ cloneBaseData(copy);
- copy->_sort = this->_sort;
- copy->name = this->name;
- copy->tailable = this->tailable;
- copy->direction = this->direction;
- copy->maxScan = this->maxScan;
+ copy->_sort = this->_sort;
+ copy->name = this->name;
+ copy->tailable = this->tailable;
+ copy->direction = this->direction;
+ copy->maxScan = this->maxScan;
- return copy;
- }
+ return copy;
+}
- //
- // AndHashNode
- //
+//
+// AndHashNode
+//
- AndHashNode::AndHashNode() { }
+AndHashNode::AndHashNode() {}
- AndHashNode::~AndHashNode() { }
+AndHashNode::~AndHashNode() {}
- void AndHashNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "AND_HASH\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString() << '\n';
- }
- addCommon(ss, indent);
- for (size_t i = 0; i < children.size(); ++i) {
- addIndent(ss, indent + 1);
- *ss << "Child " << i << ":\n";
- children[i]->appendToString(ss, indent + 1);
- }
+void AndHashNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "AND_HASH\n";
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ *ss << " filter = " << filter->toString() << '\n';
+ }
+ addCommon(ss, indent);
+ for (size_t i = 0; i < children.size(); ++i) {
+ addIndent(ss, indent + 1);
+ *ss << "Child " << i << ":\n";
+ children[i]->appendToString(ss, indent + 1);
}
+}
- bool AndHashNode::fetched() const {
- // Any WSM output from this stage came from all children stages. If any child provides
- // fetched data, we merge that fetched data into the WSM we output.
- for (size_t i = 0; i < children.size(); ++i) {
- if (children[i]->fetched()) {
- return true;
- }
+bool AndHashNode::fetched() const {
+ // Any WSM output from this stage came from all children stages. If any child provides
+ // fetched data, we merge that fetched data into the WSM we output.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (children[i]->fetched()) {
+ return true;
}
- return false;
}
+ return false;
+}
- bool AndHashNode::hasField(const string& field) const {
- // Any WSM output from this stage came from all children stages. Therefore we have all
- // fields covered in our children.
- for (size_t i = 0; i < children.size(); ++i) {
- if (children[i]->hasField(field)) {
- return true;
- }
+bool AndHashNode::hasField(const string& field) const {
+ // Any WSM output from this stage came from all children stages. Therefore we have all
+ // fields covered in our children.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (children[i]->hasField(field)) {
+ return true;
}
- return false;
}
+ return false;
+}
- QuerySolutionNode* AndHashNode::clone() const {
- AndHashNode* copy = new AndHashNode();
- cloneBaseData(copy);
+QuerySolutionNode* AndHashNode::clone() const {
+ AndHashNode* copy = new AndHashNode();
+ cloneBaseData(copy);
- copy->_sort = this->_sort;
+ copy->_sort = this->_sort;
- return copy;
- }
+ return copy;
+}
- //
- // AndSortedNode
- //
+//
+// AndSortedNode
+//
- AndSortedNode::AndSortedNode() { }
+AndSortedNode::AndSortedNode() {}
- AndSortedNode::~AndSortedNode() { }
+AndSortedNode::~AndSortedNode() {}
- void AndSortedNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "AND_SORTED\n";
- addCommon(ss, indent);
- for (size_t i = 0; i < children.size(); ++i) {
- addIndent(ss, indent + 1);
- *ss << "Child " << i << ":\n";
- children[i]->appendToString(ss, indent + 1);
- }
+void AndSortedNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "AND_SORTED\n";
+ addCommon(ss, indent);
+ for (size_t i = 0; i < children.size(); ++i) {
+ addIndent(ss, indent + 1);
+ *ss << "Child " << i << ":\n";
+ children[i]->appendToString(ss, indent + 1);
}
+}
- bool AndSortedNode::fetched() const {
- // Any WSM output from this stage came from all children stages. If any child provides
- // fetched data, we merge that fetched data into the WSM we output.
- for (size_t i = 0; i < children.size(); ++i) {
- if (children[i]->fetched()) {
- return true;
- }
+bool AndSortedNode::fetched() const {
+ // Any WSM output from this stage came from all children stages. If any child provides
+ // fetched data, we merge that fetched data into the WSM we output.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (children[i]->fetched()) {
+ return true;
}
- return false;
}
+ return false;
+}
- bool AndSortedNode::hasField(const string& field) const {
- // Any WSM output from this stage came from all children stages. Therefore we have all
- // fields covered in our children.
- for (size_t i = 0; i < children.size(); ++i) {
- if (children[i]->hasField(field)) {
- return true;
- }
+bool AndSortedNode::hasField(const string& field) const {
+ // Any WSM output from this stage came from all children stages. Therefore we have all
+ // fields covered in our children.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (children[i]->hasField(field)) {
+ return true;
}
- return false;
}
+ return false;
+}
- QuerySolutionNode* AndSortedNode::clone() const {
- AndSortedNode* copy = new AndSortedNode();
- cloneBaseData(copy);
+QuerySolutionNode* AndSortedNode::clone() const {
+ AndSortedNode* copy = new AndSortedNode();
+ cloneBaseData(copy);
- copy->_sort = this->_sort;
+ copy->_sort = this->_sort;
- return copy;
- }
+ return copy;
+}
- //
- // OrNode
- //
-
- OrNode::OrNode() : dedup(true) { }
+//
+// OrNode
+//
- OrNode::~OrNode() { }
+OrNode::OrNode() : dedup(true) {}
- void OrNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "OR\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString() << '\n';
- }
- addCommon(ss, indent);
- for (size_t i = 0; i < children.size(); ++i) {
- addIndent(ss, indent + 1);
- *ss << "Child " << i << ":\n";
- children[i]->appendToString(ss, indent + 2);
- *ss << '\n';
- }
- }
+OrNode::~OrNode() {}
- bool OrNode::fetched() const {
- // Any WSM output from this stage came exactly one child stage. Given that we don't know
- // what child stage it came from, we require that all children provide fetched data in order
- // to guarantee that our output is fetched.
- for (size_t i = 0; i < children.size(); ++i) {
- if (!children[i]->fetched()) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Any WSM output from this stage came from exactly one child stage. Therefore, if
- * we want to guarantee that any output has a certain field, all of our children must
- * have that field.
- */
- bool OrNode::hasField(const string& field) const {
- for (size_t i = 0; i < children.size(); ++i) {
- if (!children[i]->hasField(field)) {
- return false;
- }
- }
- return true;
+void OrNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "OR\n";
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ *ss << " filter = " << filter->toString() << '\n';
}
-
- QuerySolutionNode* OrNode::clone() const {
- OrNode* copy = new OrNode();
- cloneBaseData(copy);
-
- copy->_sort = this->_sort;
- copy->dedup = this->dedup;
-
- return copy;
+ addCommon(ss, indent);
+ for (size_t i = 0; i < children.size(); ++i) {
+ addIndent(ss, indent + 1);
+ *ss << "Child " << i << ":\n";
+ children[i]->appendToString(ss, indent + 2);
+ *ss << '\n';
}
+}
- //
- // MergeSortNode
- //
-
- MergeSortNode::MergeSortNode() : dedup(true) { }
-
- MergeSortNode::~MergeSortNode() { }
-
- void MergeSortNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "MERGE_SORT\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString() << '\n';
- }
- addCommon(ss, indent);
- for (size_t i = 0; i < children.size(); ++i) {
- addIndent(ss, indent + 1);
- *ss << "Child " << i << ":\n";
- children[i]->appendToString(ss, indent + 2);
- *ss << '\n';
+bool OrNode::fetched() const {
+ // Any WSM output from this stage came exactly one child stage. Given that we don't know
+ // what child stage it came from, we require that all children provide fetched data in order
+ // to guarantee that our output is fetched.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (!children[i]->fetched()) {
+ return false;
}
}
+ return true;
+}
- bool MergeSortNode::fetched() const {
- // Any WSM output from this stage came exactly one child stage. Given that we don't know
- // what child stage it came from, we require that all children provide fetched data in order
- // to guarantee that our output is fetched.
- for (size_t i = 0; i < children.size(); ++i) {
- if (!children[i]->fetched()) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Any WSM output from this stage came from exactly one child stage. Therefore, if
- * we want to guarantee that any output has a certain field, all of our children must
- * have that field.
- */
- bool MergeSortNode::hasField(const string& field) const {
- for (size_t i = 0; i < children.size(); ++i) {
- if (!children[i]->hasField(field)) {
- return false;
- }
+/**
+ * Any WSM output from this stage came from exactly one child stage. Therefore, if
+ * we want to guarantee that any output has a certain field, all of our children must
+ * have that field.
+ */
+bool OrNode::hasField(const string& field) const {
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (!children[i]->hasField(field)) {
+ return false;
}
- return true;
}
+ return true;
+}
- QuerySolutionNode* MergeSortNode::clone() const {
- MergeSortNode* copy = new MergeSortNode();
- cloneBaseData(copy);
+QuerySolutionNode* OrNode::clone() const {
+ OrNode* copy = new OrNode();
+ cloneBaseData(copy);
- copy->_sorts = this->_sorts;
- copy->dedup = this->dedup;
- copy->sort = this->sort;
+ copy->_sort = this->_sort;
+ copy->dedup = this->dedup;
- return copy;
- }
-
- //
- // FetchNode
- //
-
- FetchNode::FetchNode() { }
-
- void FetchNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "FETCH\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- StringBuilder sb;
- *ss << "filter:\n";
- filter->debugString(sb, indent + 2);
- *ss << sb.str();
- }
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
+ return copy;
+}
- QuerySolutionNode* FetchNode::clone() const {
- FetchNode* copy = new FetchNode();
- cloneBaseData(copy);
+//
+// MergeSortNode
+//
- copy->_sorts = this->_sorts;
+MergeSortNode::MergeSortNode() : dedup(true) {}
- return copy;
- }
+MergeSortNode::~MergeSortNode() {}
- //
- // IndexScanNode
- //
-
- IndexScanNode::IndexScanNode()
- : indexIsMultiKey(false), direction(1), maxScan(0), addKeyMetadata(false) { }
-
- void IndexScanNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "IXSCAN\n";
+void MergeSortNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "MERGE_SORT\n";
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << "filter = " << filter->toString();
- }
- addIndent(ss, indent + 1);
- *ss << "direction = " << direction << '\n';
- addIndent(ss, indent + 1);
- *ss << "bounds = " << bounds.toString() << '\n';
- addCommon(ss, indent);
+ *ss << " filter = " << filter->toString() << '\n';
}
-
- bool IndexScanNode::hasField(const string& field) const {
- // There is no covering in a multikey index because you don't know whether or not the field
- // in the key was extracted from an array in the original document.
- if (indexIsMultiKey) { return false; }
-
- // Custom index access methods may return non-exact key data - this function is currently
- // used for covering exact key data only.
- if (IndexNames::BTREE != IndexNames::findPluginName(indexKeyPattern)) { return false; }
-
- BSONObjIterator it(indexKeyPattern);
- while (it.more()) {
- if (field == it.next().fieldName()) {
- return true;
- }
- }
- return false;
+ addCommon(ss, indent);
+ for (size_t i = 0; i < children.size(); ++i) {
+ addIndent(ss, indent + 1);
+ *ss << "Child " << i << ":\n";
+ children[i]->appendToString(ss, indent + 2);
+ *ss << '\n';
}
+}
- bool IndexScanNode::sortedByDiskLoc() const {
- // Indices use RecordId as an additional key after the actual index key.
- // Therefore, if we're only examining one index key, the output is sorted
- // by RecordId.
-
- // If it's a simple range query, it's easy to determine if the range is a point.
- if (bounds.isSimpleRange) {
- return 0 == bounds.startKey.woCompare(bounds.endKey, indexKeyPattern);
- }
-
- // If it's a more complex bounds query, we make sure that each field is a point.
- for (size_t i = 0; i < bounds.fields.size(); ++i) {
- const OrderedIntervalList& oil = bounds.fields[i];
- if (1 != oil.intervals.size()) {
- return false;
- }
- const Interval& interval = oil.intervals[0];
- if (0 != interval.start.woCompare(interval.end, false)) {
- return false;
- }
+bool MergeSortNode::fetched() const {
+ // Any WSM output from this stage came exactly one child stage. Given that we don't know
+ // what child stage it came from, we require that all children provide fetched data in order
+ // to guarantee that our output is fetched.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (!children[i]->fetched()) {
+ return false;
}
-
- return true;
}
+ return true;
+}
- void IndexScanNode::computeProperties() {
- _sorts.clear();
-
- BSONObj sortPattern = QueryPlannerAnalysis::getSortPattern(indexKeyPattern);
- if (direction == -1) {
- sortPattern = QueryPlannerCommon::reverseSortObj(sortPattern);
- }
-
- _sorts.insert(sortPattern);
-
- const int nFields = sortPattern.nFields();
- if (nFields > 1) {
- // We're sorted not only by sortPattern but also by all prefixes of it.
- for (int i = 0; i < nFields; ++i) {
- // Make obj out of fields [0,i]
- BSONObjIterator it(sortPattern);
- BSONObjBuilder prefixBob;
- for (int j = 0; j <= i; ++j) {
- prefixBob.append(it.next());
- }
- _sorts.insert(prefixBob.obj());
- }
- }
-
- // If we are using the index {a:1, b:1} to answer the predicate {a: 10}, it's sorted
- // both by the index key pattern and by the pattern {b: 1}.
-
- // See if there are any fields with equalities for bounds. We can drop these
- // from any sort orders created.
- set<string> equalityFields;
- if (!bounds.isSimpleRange) {
- // Figure out how many fields are point intervals.
- for (size_t i = 0; i < bounds.fields.size(); ++i) {
- const OrderedIntervalList& oil = bounds.fields[i];
- if (oil.intervals.size() != 1) {
- continue;
- }
- const Interval& ival = oil.intervals[0];
- if (!ival.isPoint()) {
- continue;
- }
- equalityFields.insert(oil.name);
- }
- }
-
- if (equalityFields.empty()) {
- return;
- }
-
- // TODO: Each field in equalityFields could be dropped from the sort order since it is
- // a point interval. The full set of sort orders is as follows:
- // For each sort in _sorts:
- // For each drop in powerset(equalityFields):
- // Remove fields in 'drop' from 'sort' and add resulting sort to output.
- //
- // Since this involves a powerset, we don't generate the full set of possibilities.
- // Instead, we generate sort orders by removing possible contiguous prefixes of equality
- // predicates. For example, if the key pattern is {a: 1, b: 1, c: 1, d: 1, e: 1}
- // and and there are equality predicates on 'a', 'b', and 'c', then here we add the sort
- // orders {b: 1, c: 1, d: 1, e: 1} and {c: 1, d: 1, e: 1}. (We also end up adding
- // {d: 1, e: 1} and {d: 1}, but this is done later on.)
- BSONObjIterator it(sortPattern);
- BSONObjBuilder suffixBob;
- while (it.more()) {
- BSONElement elt = it.next();
- // TODO: string slowness. fix when bounds are stringdata not string.
- if (equalityFields.end() == equalityFields.find(string(elt.fieldName()))) {
- suffixBob.append(elt);
- // This field isn't a point interval, can't drop.
- break;
- }
-
- // We add the sort obtained by dropping 'elt' and all preceding elements from the index
- // key pattern.
- BSONObjIterator droppedPrefixIt = it;
- BSONObjBuilder droppedPrefixBob;
- while (droppedPrefixIt.more()) {
- droppedPrefixBob.append(droppedPrefixIt.next());
- }
- _sorts.insert(droppedPrefixBob.obj());
- }
-
- while (it.more()) {
- suffixBob.append(it.next());
- }
-
- // We've found the suffix following the contiguous prefix of equality fields.
- // Ex. For index {a: 1, b: 1, c: 1, d: 1} and query {a: 3, b: 5}, this suffix
- // of the key pattern is {c: 1, d: 1}.
- //
- // Now we have to add all prefixes of this suffix as possible sort orders.
- // Ex. Continuing the example from above, we have to include sort orders
- // {c: 1} and {c: 1, d: 1}.
- BSONObj filterPointsObj = suffixBob.obj();
- for (int i = 0; i < filterPointsObj.nFields(); ++i) {
- // Make obj out of fields [0,i]
- BSONObjIterator it(filterPointsObj);
- BSONObjBuilder prefixBob;
- for (int j = 0; j <= i; ++j) {
- prefixBob.append(it.next());
- }
- _sorts.insert(prefixBob.obj());
+/**
+ * Any WSM output from this stage came from exactly one child stage. Therefore, if
+ * we want to guarantee that any output has a certain field, all of our children must
+ * have that field.
+ */
+bool MergeSortNode::hasField(const string& field) const {
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (!children[i]->hasField(field)) {
+ return false;
}
}
+ return true;
+}
- QuerySolutionNode* IndexScanNode::clone() const {
- IndexScanNode* copy = new IndexScanNode();
- cloneBaseData(copy);
+QuerySolutionNode* MergeSortNode::clone() const {
+ MergeSortNode* copy = new MergeSortNode();
+ cloneBaseData(copy);
- copy->_sorts = this->_sorts;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->indexIsMultiKey = this->indexIsMultiKey;
- copy->direction = this->direction;
- copy->maxScan = this->maxScan;
- copy->addKeyMetadata = this->addKeyMetadata;
- copy->bounds = this->bounds;
+ copy->_sorts = this->_sorts;
+ copy->dedup = this->dedup;
+ copy->sort = this->sort;
- return copy;
- }
-
- //
- // ProjectionNode
- //
-
- void ProjectionNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "PROJ\n";
- addIndent(ss, indent + 1);
- *ss << "proj = " << projection.toString() << '\n';
- addIndent(ss, indent + 1);
- if (DEFAULT == projType) {
- *ss << "type = DEFAULT\n";
- }
- else if (COVERED_ONE_INDEX == projType) {
- *ss << "type = COVERED_ONE_INDEX\n";
- }
- else {
- invariant(SIMPLE_DOC == projType);
- *ss << "type = SIMPLE_DOC\n";
- }
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
-
- QuerySolutionNode* ProjectionNode::clone() const {
- ProjectionNode* copy = new ProjectionNode();
- cloneBaseData(copy);
+ return copy;
+}
- copy->_sorts = this->_sorts;
- copy->fullExpression = this->fullExpression;
+//
+// FetchNode
+//
- // This MatchExpression* is owned by the canonical query, not by the
- // ProjectionNode. Just copying the pointer is fine.
- copy->projection = this->projection;
+FetchNode::FetchNode() {}
- return copy;
- }
-
- //
- // SortNode
- //
-
- void SortNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "SORT\n";
- addIndent(ss, indent + 1);
- *ss << "pattern = " << pattern.toString() << '\n';
- addIndent(ss, indent + 1);
- *ss << "query for bounds = " << query.toString() << '\n';
- addIndent(ss, indent + 1);
- *ss << "limit = " << limit << '\n';
- addCommon(ss, indent);
+void FetchNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "FETCH\n";
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
+ StringBuilder sb;
+ *ss << "filter:\n";
+ filter->debugString(sb, indent + 2);
+ *ss << sb.str();
}
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
- QuerySolutionNode* SortNode::clone() const {
- SortNode* copy = new SortNode();
- cloneBaseData(copy);
-
- copy->_sorts = this->_sorts;
- copy->pattern = this->pattern;
- copy->query = this->query;
- copy->limit = this->limit;
-
- return copy;
- }
-
- //
- // LimitNode
- //
-
+QuerySolutionNode* FetchNode::clone() const {
+ FetchNode* copy = new FetchNode();
+ cloneBaseData(copy);
- void LimitNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "LIMIT\n";
- addIndent(ss, indent + 1);
- *ss << "limit = " << limit << '\n';
- addIndent(ss, indent + 1);
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
+ copy->_sorts = this->_sorts;
- QuerySolutionNode* LimitNode::clone() const {
- LimitNode* copy = new LimitNode();
- cloneBaseData(copy);
+ return copy;
+}
- copy->limit = this->limit;
+//
+// IndexScanNode
+//
- return copy;
- }
-
- //
- // SkipNode
- //
+IndexScanNode::IndexScanNode()
+ : indexIsMultiKey(false), direction(1), maxScan(0), addKeyMetadata(false) {}
- void SkipNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "SKIP\n";
- addIndent(ss, indent + 1);
- *ss << "skip= " << skip << '\n';
- addCommon(ss, indent);
+void IndexScanNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "IXSCAN\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern << '\n';
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
+ *ss << "filter = " << filter->toString();
}
+ addIndent(ss, indent + 1);
+ *ss << "direction = " << direction << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "bounds = " << bounds.toString() << '\n';
+ addCommon(ss, indent);
+}
- QuerySolutionNode* SkipNode::clone() const {
- SkipNode* copy = new SkipNode();
- cloneBaseData(copy);
-
- copy->skip = this->skip;
-
- return copy;
- }
-
- //
- // GeoNear2DNode
- //
-
- void GeoNear2DNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "GEO_NEAR_2D\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
- addCommon(ss, indent);
- *ss << "nearQuery = " << nq->toString() << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString();
- }
+bool IndexScanNode::hasField(const string& field) const {
+ // There is no covering in a multikey index because you don't know whether or not the field
+ // in the key was extracted from an array in the original document.
+ if (indexIsMultiKey) {
+ return false;
}
- QuerySolutionNode* GeoNear2DNode::clone() const {
- GeoNear2DNode* copy = new GeoNear2DNode();
- cloneBaseData(copy);
-
- copy->_sorts = this->_sorts;
- copy->nq = this->nq;
- copy->baseBounds = this->baseBounds;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->addPointMeta = this->addPointMeta;
- copy->addDistMeta = this->addDistMeta;
-
- return copy;
+ // Custom index access methods may return non-exact key data - this function is currently
+ // used for covering exact key data only.
+ if (IndexNames::BTREE != IndexNames::findPluginName(indexKeyPattern)) {
+ return false;
}
- //
- // GeoNear2DSphereNode
- //
-
- void GeoNear2DSphereNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "GEO_NEAR_2DSPHERE\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
- addCommon(ss, indent);
- *ss << "baseBounds = " << baseBounds.toString() << '\n';
- addIndent(ss, indent + 1);
- *ss << "nearQuery = " << nq->toString() << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString();
+ BSONObjIterator it(indexKeyPattern);
+ while (it.more()) {
+ if (field == it.next().fieldName()) {
+ return true;
}
}
+ return false;
+}
- QuerySolutionNode* GeoNear2DSphereNode::clone() const {
- GeoNear2DSphereNode* copy = new GeoNear2DSphereNode();
- cloneBaseData(copy);
-
- copy->_sorts = this->_sorts;
- copy->nq = this->nq;
- copy->baseBounds = this->baseBounds;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->addPointMeta = this->addPointMeta;
- copy->addDistMeta = this->addDistMeta;
+bool IndexScanNode::sortedByDiskLoc() const {
+ // Indices use RecordId as an additional key after the actual index key.
+ // Therefore, if we're only examining one index key, the output is sorted
+ // by RecordId.
- return copy;
+ // If it's a simple range query, it's easy to determine if the range is a point.
+ if (bounds.isSimpleRange) {
+ return 0 == bounds.startKey.woCompare(bounds.endKey, indexKeyPattern);
}
- //
- // ShardingFilterNode
- //
-
- void ShardingFilterNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "SHARDING_FILTER\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- StringBuilder sb;
- *ss << "filter:\n";
- filter->debugString(sb, indent + 2);
- *ss << sb.str();
+ // If it's a more complex bounds query, we make sure that each field is a point.
+ for (size_t i = 0; i < bounds.fields.size(); ++i) {
+ const OrderedIntervalList& oil = bounds.fields[i];
+ if (1 != oil.intervals.size()) {
+ return false;
}
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
-
- QuerySolutionNode* ShardingFilterNode::clone() const {
- ShardingFilterNode* copy = new ShardingFilterNode();
- cloneBaseData(copy);
- return copy;
- }
-
- //
- // KeepMutationsNode
- //
-
- void KeepMutationsNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "KEEP_MUTATIONS\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- StringBuilder sb;
- *ss << "filter:\n";
- filter->debugString(sb, indent + 2);
- *ss << sb.str();
+ const Interval& interval = oil.intervals[0];
+ if (0 != interval.start.woCompare(interval.end, false)) {
+ return false;
}
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
-
- QuerySolutionNode* KeepMutationsNode::clone() const {
- KeepMutationsNode* copy = new KeepMutationsNode();
- cloneBaseData(copy);
-
- copy->sorts = this->sorts;
-
- return copy;
- }
-
- //
- // DistinctNode
- //
-
- void DistinctNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "DISTINCT\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern << '\n';
- addIndent(ss, indent + 1);
- *ss << "direction = " << direction << '\n';
- addIndent(ss, indent + 1);
- *ss << "bounds = " << bounds.toString() << '\n';
}
- QuerySolutionNode* DistinctNode::clone() const {
- DistinctNode* copy = new DistinctNode();
- cloneBaseData(copy);
+ return true;
+}
- copy->sorts = this->sorts;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->direction = this->direction;
- copy->bounds = this->bounds;
- copy->fieldNo = this->fieldNo;
+void IndexScanNode::computeProperties() {
+ _sorts.clear();
- return copy;
+ BSONObj sortPattern = QueryPlannerAnalysis::getSortPattern(indexKeyPattern);
+ if (direction == -1) {
+ sortPattern = QueryPlannerCommon::reverseSortObj(sortPattern);
}
- //
- // CountNode
- //
+ _sorts.insert(sortPattern);
- void CountNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "COUNT\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern << '\n';
- addIndent(ss, indent + 1);
- *ss << "startKey = " << startKey << '\n';
- addIndent(ss, indent + 1);
- *ss << "endKey = " << endKey << '\n';
+ const int nFields = sortPattern.nFields();
+ if (nFields > 1) {
+ // We're sorted not only by sortPattern but also by all prefixes of it.
+ for (int i = 0; i < nFields; ++i) {
+ // Make obj out of fields [0,i]
+ BSONObjIterator it(sortPattern);
+ BSONObjBuilder prefixBob;
+ for (int j = 0; j <= i; ++j) {
+ prefixBob.append(it.next());
+ }
+ _sorts.insert(prefixBob.obj());
+ }
}
- QuerySolutionNode* CountNode::clone() const {
- CountNode* copy = new CountNode();
- cloneBaseData(copy);
-
- copy->sorts = this->sorts;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->startKey = this->startKey;
- copy->startKeyInclusive = this->startKeyInclusive;
- copy->endKey = this->endKey;
- copy->endKeyInclusive = this->endKeyInclusive;
+ // If we are using the index {a:1, b:1} to answer the predicate {a: 10}, it's sorted
+ // both by the index key pattern and by the pattern {b: 1}.
- return copy;
- }
+ // See if there are any fields with equalities for bounds. We can drop these
+ // from any sort orders created.
+ set<string> equalityFields;
+ if (!bounds.isSimpleRange) {
+ // Figure out how many fields are point intervals.
+ for (size_t i = 0; i < bounds.fields.size(); ++i) {
+ const OrderedIntervalList& oil = bounds.fields[i];
+ if (oil.intervals.size() != 1) {
+ continue;
+ }
+ const Interval& ival = oil.intervals[0];
+ if (!ival.isPoint()) {
+ continue;
+ }
+ equalityFields.insert(oil.name);
+ }
+ }
+
+ if (equalityFields.empty()) {
+ return;
+ }
+
+ // TODO: Each field in equalityFields could be dropped from the sort order since it is
+ // a point interval. The full set of sort orders is as follows:
+ // For each sort in _sorts:
+ // For each drop in powerset(equalityFields):
+ // Remove fields in 'drop' from 'sort' and add resulting sort to output.
+ //
+ // Since this involves a powerset, we don't generate the full set of possibilities.
+ // Instead, we generate sort orders by removing possible contiguous prefixes of equality
+ // predicates. For example, if the key pattern is {a: 1, b: 1, c: 1, d: 1, e: 1}
+ // and and there are equality predicates on 'a', 'b', and 'c', then here we add the sort
+ // orders {b: 1, c: 1, d: 1, e: 1} and {c: 1, d: 1, e: 1}. (We also end up adding
+ // {d: 1, e: 1} and {d: 1}, but this is done later on.)
+ BSONObjIterator it(sortPattern);
+ BSONObjBuilder suffixBob;
+ while (it.more()) {
+ BSONElement elt = it.next();
+ // TODO: string slowness. fix when bounds are stringdata not string.
+ if (equalityFields.end() == equalityFields.find(string(elt.fieldName()))) {
+ suffixBob.append(elt);
+ // This field isn't a point interval, can't drop.
+ break;
+ }
+
+ // We add the sort obtained by dropping 'elt' and all preceding elements from the index
+ // key pattern.
+ BSONObjIterator droppedPrefixIt = it;
+ BSONObjBuilder droppedPrefixBob;
+ while (droppedPrefixIt.more()) {
+ droppedPrefixBob.append(droppedPrefixIt.next());
+ }
+ _sorts.insert(droppedPrefixBob.obj());
+ }
+
+ while (it.more()) {
+ suffixBob.append(it.next());
+ }
+
+ // We've found the suffix following the contiguous prefix of equality fields.
+ // Ex. For index {a: 1, b: 1, c: 1, d: 1} and query {a: 3, b: 5}, this suffix
+ // of the key pattern is {c: 1, d: 1}.
+ //
+ // Now we have to add all prefixes of this suffix as possible sort orders.
+ // Ex. Continuing the example from above, we have to include sort orders
+ // {c: 1} and {c: 1, d: 1}.
+ BSONObj filterPointsObj = suffixBob.obj();
+ for (int i = 0; i < filterPointsObj.nFields(); ++i) {
+ // Make obj out of fields [0,i]
+ BSONObjIterator it(filterPointsObj);
+ BSONObjBuilder prefixBob;
+ for (int j = 0; j <= i; ++j) {
+ prefixBob.append(it.next());
+ }
+ _sorts.insert(prefixBob.obj());
+ }
+}
+
+QuerySolutionNode* IndexScanNode::clone() const {
+ IndexScanNode* copy = new IndexScanNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->indexIsMultiKey = this->indexIsMultiKey;
+ copy->direction = this->direction;
+ copy->maxScan = this->maxScan;
+ copy->addKeyMetadata = this->addKeyMetadata;
+ copy->bounds = this->bounds;
+
+ return copy;
+}
+
+//
+// ProjectionNode
+//
+
+void ProjectionNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "PROJ\n";
+ addIndent(ss, indent + 1);
+ *ss << "proj = " << projection.toString() << '\n';
+ addIndent(ss, indent + 1);
+ if (DEFAULT == projType) {
+ *ss << "type = DEFAULT\n";
+ } else if (COVERED_ONE_INDEX == projType) {
+ *ss << "type = COVERED_ONE_INDEX\n";
+ } else {
+ invariant(SIMPLE_DOC == projType);
+ *ss << "type = SIMPLE_DOC\n";
+ }
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* ProjectionNode::clone() const {
+ ProjectionNode* copy = new ProjectionNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->fullExpression = this->fullExpression;
+
+ // This MatchExpression* is owned by the canonical query, not by the
+ // ProjectionNode. Just copying the pointer is fine.
+ copy->projection = this->projection;
+
+ return copy;
+}
+
+//
+// SortNode
+//
+
+void SortNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "SORT\n";
+ addIndent(ss, indent + 1);
+ *ss << "pattern = " << pattern.toString() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "query for bounds = " << query.toString() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "limit = " << limit << '\n';
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* SortNode::clone() const {
+ SortNode* copy = new SortNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->pattern = this->pattern;
+ copy->query = this->query;
+ copy->limit = this->limit;
+
+ return copy;
+}
+
+//
+// LimitNode
+//
+
+
+void LimitNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "LIMIT\n";
+ addIndent(ss, indent + 1);
+ *ss << "limit = " << limit << '\n';
+ addIndent(ss, indent + 1);
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* LimitNode::clone() const {
+ LimitNode* copy = new LimitNode();
+ cloneBaseData(copy);
+
+ copy->limit = this->limit;
+
+ return copy;
+}
+
+//
+// SkipNode
+//
+
+void SkipNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "SKIP\n";
+ addIndent(ss, indent + 1);
+ *ss << "skip= " << skip << '\n';
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* SkipNode::clone() const {
+ SkipNode* copy = new SkipNode();
+ cloneBaseData(copy);
+
+ copy->skip = this->skip;
+
+ return copy;
+}
+
+//
+// GeoNear2DNode
+//
+
+void GeoNear2DNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "GEO_NEAR_2D\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
+ addCommon(ss, indent);
+ *ss << "nearQuery = " << nq->toString() << '\n';
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ *ss << " filter = " << filter->toString();
+ }
+}
+
+QuerySolutionNode* GeoNear2DNode::clone() const {
+ GeoNear2DNode* copy = new GeoNear2DNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->nq = this->nq;
+ copy->baseBounds = this->baseBounds;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->addPointMeta = this->addPointMeta;
+ copy->addDistMeta = this->addDistMeta;
+
+ return copy;
+}
+
+//
+// GeoNear2DSphereNode
+//
+
+void GeoNear2DSphereNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "GEO_NEAR_2DSPHERE\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
+ addCommon(ss, indent);
+ *ss << "baseBounds = " << baseBounds.toString() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "nearQuery = " << nq->toString() << '\n';
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ *ss << " filter = " << filter->toString();
+ }
+}
+
+QuerySolutionNode* GeoNear2DSphereNode::clone() const {
+ GeoNear2DSphereNode* copy = new GeoNear2DSphereNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->nq = this->nq;
+ copy->baseBounds = this->baseBounds;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->addPointMeta = this->addPointMeta;
+ copy->addDistMeta = this->addDistMeta;
+
+ return copy;
+}
+
+//
+// ShardingFilterNode
+//
+
+void ShardingFilterNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "SHARDING_FILTER\n";
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ StringBuilder sb;
+ *ss << "filter:\n";
+ filter->debugString(sb, indent + 2);
+ *ss << sb.str();
+ }
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* ShardingFilterNode::clone() const {
+ ShardingFilterNode* copy = new ShardingFilterNode();
+ cloneBaseData(copy);
+ return copy;
+}
+
+//
+// KeepMutationsNode
+//
+
+void KeepMutationsNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "KEEP_MUTATIONS\n";
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ StringBuilder sb;
+ *ss << "filter:\n";
+ filter->debugString(sb, indent + 2);
+ *ss << sb.str();
+ }
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* KeepMutationsNode::clone() const {
+ KeepMutationsNode* copy = new KeepMutationsNode();
+ cloneBaseData(copy);
+
+ copy->sorts = this->sorts;
+
+ return copy;
+}
+
+//
+// DistinctNode
+//
+
+void DistinctNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "DISTINCT\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "direction = " << direction << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "bounds = " << bounds.toString() << '\n';
+}
+
+QuerySolutionNode* DistinctNode::clone() const {
+ DistinctNode* copy = new DistinctNode();
+ cloneBaseData(copy);
+
+ copy->sorts = this->sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->direction = this->direction;
+ copy->bounds = this->bounds;
+ copy->fieldNo = this->fieldNo;
+
+ return copy;
+}
+
+//
+// CountNode
+//
+
+void CountNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "COUNT\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "startKey = " << startKey << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "endKey = " << endKey << '\n';
+}
+
+QuerySolutionNode* CountNode::clone() const {
+ CountNode* copy = new CountNode();
+ cloneBaseData(copy);
+
+ copy->sorts = this->sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->startKey = this->startKey;
+ copy->startKeyInclusive = this->startKeyInclusive;
+ copy->endKey = this->endKey;
+ copy->endKeyInclusive = this->endKeyInclusive;
+
+ return copy;
+}
} // namespace mongo
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index 45a4b24ecea..cd8cbbbd25a 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -38,694 +38,847 @@
namespace mongo {
- using mongo::fts::FTSQuery;
+using mongo::fts::FTSQuery;
- class GeoNearExpression;
+class GeoNearExpression;
- /**
- * This is an abstract representation of a query plan. It can be transcribed into a tree of
- * PlanStages, which can then be handed to a PlanRunner for execution.
- */
- struct QuerySolutionNode {
- QuerySolutionNode() { }
- virtual ~QuerySolutionNode() {
- for (size_t i = 0; i < children.size(); ++i) {
- delete children[i];
- }
+/**
+ * This is an abstract representation of a query plan. It can be transcribed into a tree of
+ * PlanStages, which can then be handed to a PlanRunner for execution.
+ */
+struct QuerySolutionNode {
+ QuerySolutionNode() {}
+ virtual ~QuerySolutionNode() {
+ for (size_t i = 0; i < children.size(); ++i) {
+ delete children[i];
}
+ }
- /**
- * Return a std::string representation of this node and any children.
- */
- std::string toString() const;
-
- /**
- * What stage should this be transcribed to? See stage_types.h.
- */
- virtual StageType getType() const = 0;
+ /**
+ * Return a std::string representation of this node and any children.
+ */
+ std::string toString() const;
- /**
- * Internal function called by toString()
- *
- * TODO: Consider outputting into a BSONObj or builder thereof.
- */
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const = 0;
+ /**
+ * What stage should this be transcribed to? See stage_types.h.
+ */
+ virtual StageType getType() const = 0;
- //
- // Computed properties
- //
+ /**
+ * Internal function called by toString()
+ *
+ * TODO: Consider outputting into a BSONObj or builder thereof.
+ */
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const = 0;
- /**
- * Must be called before any properties are examined.
- */
- virtual void computeProperties() {
- for (size_t i = 0; i < children.size(); ++i) {
- children[i]->computeProperties();
- }
- }
+ //
+ // Computed properties
+ //
- /**
- * If true, one of these are true:
- * 1. All outputs are already fetched, or
- * 2. There is a projection in place and a fetch is not required.
- *
- * If false, a fetch needs to be placed above the root in order to provide results.
- *
- * Usage: To determine if every possible result that might reach the root
- * will be fully-fetched or not. We don't want any surplus fetches.
- */
- virtual bool fetched() const = 0;
-
- /**
- * Returns true if the tree rooted at this node provides data with the field name 'field'.
- * This data can come from any of the types of the WSM.
- *
- * Usage: If an index-only plan has all the fields we're interested in, we don't
- * have to fetch to show results with those fields.
- *
- * TODO: 'field' is probably more appropriate as a FieldRef or string.
- */
- virtual bool hasField(const std::string& field) const = 0;
-
- /**
- * Returns true if the tree rooted at this node provides data that is sorted by the
- * its location on disk.
- *
- * Usage: If all the children of an STAGE_AND_HASH have this property, we can compute the
- * AND faster by replacing the STAGE_AND_HASH with STAGE_AND_SORTED.
- */
- virtual bool sortedByDiskLoc() const = 0;
-
- /**
- * Return a BSONObjSet representing the possible sort orders of the data stream from this
- * node. If the data is not sorted in any particular fashion, returns an empty set.
- *
- * Usage:
- * 1. If our plan gives us a sort order, we don't have to add a sort stage.
- * 2. If all the children of an OR have the same sort order, we can maintain that
- * sort order with a STAGE_SORT_MERGE instead of STAGE_OR.
- */
- virtual const BSONObjSet& getSort() const = 0;
-
- /**
- * Make a deep copy.
- */
- virtual QuerySolutionNode* clone() const = 0;
-
- /**
- * Copy base query solution data from 'this' to 'other'.
- */
- void cloneBaseData(QuerySolutionNode* other) const {
- for (size_t i = 0; i < this->children.size(); i++) {
- other->children.push_back(this->children[i]->clone());
- }
- if (NULL != this->filter) {
- other->filter.reset(this->filter->shallowClone());
- }
+ /**
+ * Must be called before any properties are examined.
+ */
+ virtual void computeProperties() {
+ for (size_t i = 0; i < children.size(); ++i) {
+ children[i]->computeProperties();
}
+ }
- // These are owned here.
- std::vector<QuerySolutionNode*> children;
-
- // If a stage has a non-NULL filter all values outputted from that stage must pass that
- // filter.
- std::unique_ptr<MatchExpression> filter;
-
- protected:
- /**
- * Formatting helper used by toString().
- */
- static void addIndent(mongoutils::str::stream* ss, int level);
-
- /**
- * Every solution node has properties and this adds the debug info for the
- * properties.
- */
- void addCommon(mongoutils::str::stream* ss, int indent) const;
+ /**
+ * If true, one of these are true:
+ * 1. All outputs are already fetched, or
+ * 2. There is a projection in place and a fetch is not required.
+ *
+ * If false, a fetch needs to be placed above the root in order to provide results.
+ *
+ * Usage: To determine if every possible result that might reach the root
+ * will be fully-fetched or not. We don't want any surplus fetches.
+ */
+ virtual bool fetched() const = 0;
- private:
- MONGO_DISALLOW_COPYING(QuerySolutionNode);
- };
+ /**
+ * Returns true if the tree rooted at this node provides data with the field name 'field'.
+ * This data can come from any of the types of the WSM.
+ *
+ * Usage: If an index-only plan has all the fields we're interested in, we don't
+ * have to fetch to show results with those fields.
+ *
+ * TODO: 'field' is probably more appropriate as a FieldRef or string.
+ */
+ virtual bool hasField(const std::string& field) const = 0;
/**
- * A QuerySolution must be entirely self-contained and own everything inside of it.
+ * Returns true if the tree rooted at this node provides data that is sorted by the
+ * its location on disk.
*
- * A tree of stages may be built from a QuerySolution. The QuerySolution must outlive the tree
- * of stages.
+ * Usage: If all the children of an STAGE_AND_HASH have this property, we can compute the
+ * AND faster by replacing the STAGE_AND_HASH with STAGE_AND_SORTED.
*/
- struct QuerySolution {
- QuerySolution() : hasBlockingStage(false), indexFilterApplied(false) { }
+ virtual bool sortedByDiskLoc() const = 0;
- // Owned here.
- std::unique_ptr<QuerySolutionNode> root;
+ /**
+ * Return a BSONObjSet representing the possible sort orders of the data stream from this
+ * node. If the data is not sorted in any particular fashion, returns an empty set.
+ *
+ * Usage:
+ * 1. If our plan gives us a sort order, we don't have to add a sort stage.
+ * 2. If all the children of an OR have the same sort order, we can maintain that
+ * sort order with a STAGE_SORT_MERGE instead of STAGE_OR.
+ */
+ virtual const BSONObjSet& getSort() const = 0;
- // Any filters in root or below point into this object. Must be owned.
- BSONObj filterData;
+ /**
+ * Make a deep copy.
+ */
+ virtual QuerySolutionNode* clone() const = 0;
- // There are two known scenarios in which a query solution might potentially block:
- //
- // Sort stage:
- // If the solution has a sort stage, the sort wasn't provided by an index, so we might want
- // to scan an index to provide that sort in a non-blocking fashion.
- //
- // Hashed AND stage:
- // The hashed AND stage buffers data from multiple index scans and could block. In that case,
- // we would want to fall back on an alternate non-blocking solution.
- bool hasBlockingStage;
-
- // Runner executing this solution might be interested in knowing
- // if the planning process for this solution was based on filtered indices.
- bool indexFilterApplied;
-
- // Owned here. Used by the plan cache.
- std::unique_ptr<SolutionCacheData> cacheData;
-
- /**
- * Output a human-readable std::string representing the plan.
- */
- std::string toString() {
- if (NULL == root) {
- return "empty query solution";
- }
-
- mongoutils::str::stream ss;
- root->appendToString(&ss, 0);
- return ss;
+ /**
+ * Copy base query solution data from 'this' to 'other'.
+ */
+ void cloneBaseData(QuerySolutionNode* other) const {
+ for (size_t i = 0; i < this->children.size(); i++) {
+ other->children.push_back(this->children[i]->clone());
}
- private:
- MONGO_DISALLOW_COPYING(QuerySolution);
- };
-
- struct TextNode : public QuerySolutionNode {
- TextNode() { }
- virtual ~TextNode() { }
-
- virtual StageType getType() const { return STAGE_TEXT; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- // Text's return is LOC_AND_UNOWNED_OBJ or LOC_AND_OWNED_OBJ so it's fetched and has all
- // fields.
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return _sort; }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sort;
-
- BSONObj indexKeyPattern;
- std::string query;
- std::string language;
- bool caseSensitive;
-
- // "Prefix" fields of a text index can handle equality predicates. We group them with the
- // text node while creating the text leaf node and convert them into a BSONObj index prefix
- // when we finish the text leaf node.
- BSONObj indexPrefix;
- };
-
- struct CollectionScanNode : public QuerySolutionNode {
- CollectionScanNode();
- virtual ~CollectionScanNode() { }
-
- virtual StageType getType() const { return STAGE_COLLSCAN; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return _sort; }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sort;
-
- // Name of the namespace.
- std::string name;
-
- // Should we make a tailable cursor?
- bool tailable;
-
- int direction;
-
- // maxScan option to .find() limits how many docs we look at.
- int maxScan;
- };
-
- struct AndHashNode : public QuerySolutionNode {
- AndHashNode();
- virtual ~AndHashNode();
-
- virtual StageType getType() const { return STAGE_AND_HASH; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const;
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return children.back()->getSort(); }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sort;
- };
-
- struct AndSortedNode : public QuerySolutionNode {
- AndSortedNode();
- virtual ~AndSortedNode();
-
- virtual StageType getType() const { return STAGE_AND_SORTED; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const;
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const { return true; }
- const BSONObjSet& getSort() const { return _sort; }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sort;
- };
-
- struct OrNode : public QuerySolutionNode {
- OrNode();
- virtual ~OrNode();
-
- virtual StageType getType() const { return STAGE_OR; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const;
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const {
- // Even if our children are sorted by their diskloc or other fields, we don't maintain
- // any order on the output.
- return false;
+ if (NULL != this->filter) {
+ other->filter.reset(this->filter->shallowClone());
}
- const BSONObjSet& getSort() const { return _sort; }
-
- QuerySolutionNode* clone() const;
+ }
- BSONObjSet _sort;
-
- bool dedup;
- };
+ // These are owned here.
+ std::vector<QuerySolutionNode*> children;
- struct MergeSortNode : public QuerySolutionNode {
- MergeSortNode();
- virtual ~MergeSortNode();
+ // If a stage has a non-NULL filter all values outputted from that stage must pass that
+ // filter.
+ std::unique_ptr<MatchExpression> filter;
- virtual StageType getType() const { return STAGE_SORT_MERGE; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const;
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const { return false; }
-
- const BSONObjSet& getSort() const { return _sorts; }
-
- QuerySolutionNode* clone() const;
-
- virtual void computeProperties() {
- for (size_t i = 0; i < children.size(); ++i) {
- children[i]->computeProperties();
- }
- _sorts.clear();
- _sorts.insert(sort);
- }
-
- BSONObjSet _sorts;
-
- BSONObj sort;
- bool dedup;
- };
-
- struct FetchNode : public QuerySolutionNode {
- FetchNode();
- virtual ~FetchNode() { }
-
- virtual StageType getType() const { return STAGE_FETCH; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
- const BSONObjSet& getSort() const { return children[0]->getSort(); }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sorts;
- };
-
- struct IndexScanNode : public QuerySolutionNode {
- IndexScanNode();
- virtual ~IndexScanNode() { }
-
- virtual void computeProperties();
-
- virtual StageType getType() const { return STAGE_IXSCAN; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return false; }
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const;
- const BSONObjSet& getSort() const { return _sorts; }
-
- QuerySolutionNode* clone() const;
+protected:
+ /**
+ * Formatting helper used by toString().
+ */
+ static void addIndent(mongoutils::str::stream* ss, int level);
- BSONObjSet _sorts;
+ /**
+ * Every solution node has properties and this adds the debug info for the
+ * properties.
+ */
+ void addCommon(mongoutils::str::stream* ss, int indent) const;
- BSONObj indexKeyPattern;
- bool indexIsMultiKey;
+private:
+ MONGO_DISALLOW_COPYING(QuerySolutionNode);
+};
- int direction;
+/**
+ * A QuerySolution must be entirely self-contained and own everything inside of it.
+ *
+ * A tree of stages may be built from a QuerySolution. The QuerySolution must outlive the tree
+ * of stages.
+ */
+struct QuerySolution {
+ QuerySolution() : hasBlockingStage(false), indexFilterApplied(false) {}
- // maxScan option to .find() limits how many docs we look at.
- int maxScan;
+ // Owned here.
+ std::unique_ptr<QuerySolutionNode> root;
- // If there's a 'returnKey' projection we add key metadata.
- bool addKeyMetadata;
+ // Any filters in root or below point into this object. Must be owned.
+ BSONObj filterData;
- // BIG NOTE:
- // If you use simple bounds, we'll use whatever index access method the keypattern implies.
- // If you use the complex bounds, we force Btree access.
- // The complex bounds require Btree access.
- IndexBounds bounds;
- };
+ // There are two known scenarios in which a query solution might potentially block:
+ //
+ // Sort stage:
+ // If the solution has a sort stage, the sort wasn't provided by an index, so we might want
+ // to scan an index to provide that sort in a non-blocking fashion.
+ //
+ // Hashed AND stage:
+ // The hashed AND stage buffers data from multiple index scans and could block. In that case,
+ // we would want to fall back on an alternate non-blocking solution.
+ bool hasBlockingStage;
- struct ProjectionNode : public QuerySolutionNode {
- /**
- * We have a few implementations of the projection functionality. The most general
- * implementation 'DEFAULT' is much slower than the fast-path implementations
- * below. We only really have all the information available to choose a projection
- * implementation at planning time.
- */
- enum ProjectionType {
- // This is the most general implementation of the projection functionality. It handles
- // every case.
- DEFAULT,
-
- // This is a fast-path for when the projection is fully covered by one index.
- COVERED_ONE_INDEX,
-
- // This is a fast-path for when the projection only has inclusions on non-dotted fields.
- SIMPLE_DOC,
- };
-
- ProjectionNode() : fullExpression(NULL), projType(DEFAULT) { }
-
- virtual ~ProjectionNode() { }
-
- virtual StageType getType() const { return STAGE_PROJECTION; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- /**
- * Data from the projection node is considered fetch iff the child provides fetched data.
- */
- bool fetched() const { return children[0]->fetched(); }
-
- bool hasField(const std::string& field) const {
- // TODO: Returning false isn't always the right answer -- we may either be including
- // certain fields, or we may be dropping fields (in which case hasField returns true).
- //
- // Given that projection sits on top of everything else in .find() it doesn't matter
- // what we do here.
- return false;
- }
+ // Runner executing this solution might be interested in knowing
+ // if the planning process for this solution was based on filtered indices.
+ bool indexFilterApplied;
- bool sortedByDiskLoc() const {
- // Projections destroy the RecordId. By returning true here, this kind of implies that a
- // fetch could still be done upstream.
- //
- // Perhaps this should be false to not imply that there *is* a RecordId? Kind of a
- // corner case.
- return children[0]->sortedByDiskLoc();
- }
+ // Owned here. Used by the plan cache.
+ std::unique_ptr<SolutionCacheData> cacheData;
- const BSONObjSet& getSort() const {
- // TODO: If we're applying a projection that maintains sort order, the prefix of the
- // sort order we project is the sort order.
- return _sorts;
+ /**
+ * Output a human-readable std::string representing the plan.
+ */
+ std::string toString() {
+ if (NULL == root) {
+ return "empty query solution";
}
- QuerySolutionNode* clone() const;
+ mongoutils::str::stream ss;
+ root->appendToString(&ss, 0);
+ return ss;
+ }
+
+private:
+ MONGO_DISALLOW_COPYING(QuerySolution);
+};
+
+struct TextNode : public QuerySolutionNode {
+ TextNode() {}
+ virtual ~TextNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_TEXT;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ // Text's return is LOC_AND_UNOWNED_OBJ or LOC_AND_OWNED_OBJ so it's fetched and has all
+ // fields.
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sort;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sort;
+
+ BSONObj indexKeyPattern;
+ std::string query;
+ std::string language;
+ bool caseSensitive;
+
+ // "Prefix" fields of a text index can handle equality predicates. We group them with the
+ // text node while creating the text leaf node and convert them into a BSONObj index prefix
+ // when we finish the text leaf node.
+ BSONObj indexPrefix;
+};
+
+struct CollectionScanNode : public QuerySolutionNode {
+ CollectionScanNode();
+ virtual ~CollectionScanNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_COLLSCAN;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sort;
+ }
+
+ QuerySolutionNode* clone() const;
- BSONObjSet _sorts;
+ BSONObjSet _sort;
+
+ // Name of the namespace.
+ std::string name;
+
+ // Should we make a tailable cursor?
+ bool tailable;
+
+ int direction;
+
+ // maxScan option to .find() limits how many docs we look at.
+ int maxScan;
+};
+
+struct AndHashNode : public QuerySolutionNode {
+ AndHashNode();
+ virtual ~AndHashNode();
+
+ virtual StageType getType() const {
+ return STAGE_AND_HASH;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const;
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return children.back()->getSort();
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sort;
+};
+
+struct AndSortedNode : public QuerySolutionNode {
+ AndSortedNode();
+ virtual ~AndSortedNode();
+
+ virtual StageType getType() const {
+ return STAGE_AND_SORTED;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const;
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const {
+ return true;
+ }
+ const BSONObjSet& getSort() const {
+ return _sort;
+ }
- // The full query tree. Needed when we have positional operators.
- // Owned in the CanonicalQuery, not here.
- MatchExpression* fullExpression;
+ QuerySolutionNode* clone() const;
- // Given that we don't yet have a MatchExpression analogue for the expression language, we
- // use a BSONObj.
- BSONObj projection;
+ BSONObjSet _sort;
+};
- // What implementation of the projection algorithm should we use?
- ProjectionType projType;
+struct OrNode : public QuerySolutionNode {
+ OrNode();
+ virtual ~OrNode();
- // Only meaningful if projType == COVERED_ONE_INDEX. This is the key pattern of the index
- // supplying our covered data. We can pre-compute which fields to include and cache that
- // data for later if we know we only have one index.
- BSONObj coveredKeyObj;
- };
-
- struct SortNode : public QuerySolutionNode {
- SortNode() : limit(0) { }
- virtual ~SortNode() { }
+ virtual StageType getType() const {
+ return STAGE_OR;
+ }
- virtual StageType getType() const { return STAGE_SORT; }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+ bool fetched() const;
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const {
+ // Even if our children are sorted by their diskloc or other fields, we don't maintain
+ // any order on the output.
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sort;
+ }
- bool fetched() const { return children[0]->fetched(); }
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
- bool sortedByDiskLoc() const { return false; }
+ QuerySolutionNode* clone() const;
- const BSONObjSet& getSort() const { return _sorts; }
+ BSONObjSet _sort;
- QuerySolutionNode* clone() const;
+ bool dedup;
+};
- virtual void computeProperties() {
- for (size_t i = 0; i < children.size(); ++i) {
- children[i]->computeProperties();
- }
- _sorts.clear();
- _sorts.insert(pattern);
- }
+struct MergeSortNode : public QuerySolutionNode {
+ MergeSortNode();
+ virtual ~MergeSortNode();
- BSONObjSet _sorts;
+ virtual StageType getType() const {
+ return STAGE_SORT_MERGE;
+ }
- BSONObj pattern;
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
- BSONObj query;
-
- // Sum of both limit and skip count in the parsed query.
- size_t limit;
- };
+ bool fetched() const;
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const {
+ return false;
+ }
- struct LimitNode : public QuerySolutionNode {
- LimitNode() { }
- virtual ~LimitNode() { }
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
- virtual StageType getType() const { return STAGE_LIMIT; }
+ QuerySolutionNode* clone() const;
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return children[0]->fetched(); }
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
- bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
- const BSONObjSet& getSort() const { return children[0]->getSort(); }
-
- QuerySolutionNode* clone() const;
-
- int limit;
- };
-
- struct SkipNode : public QuerySolutionNode {
- SkipNode() { }
- virtual ~SkipNode() { }
-
- virtual StageType getType() const { return STAGE_SKIP; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return children[0]->fetched(); }
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
- bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
- const BSONObjSet& getSort() const { return children[0]->getSort(); }
-
- QuerySolutionNode* clone() const;
+ virtual void computeProperties() {
+ for (size_t i = 0; i < children.size(); ++i) {
+ children[i]->computeProperties();
+ }
+ _sorts.clear();
+ _sorts.insert(sort);
+ }
- int skip;
- };
+ BSONObjSet _sorts;
- // This is a standalone stage.
- struct GeoNear2DNode : public QuerySolutionNode {
- GeoNear2DNode() : addPointMeta(false), addDistMeta(false) { }
- virtual ~GeoNear2DNode() { }
+ BSONObj sort;
+ bool dedup;
+};
- virtual StageType getType() const { return STAGE_GEO_NEAR_2D; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+struct FetchNode : public QuerySolutionNode {
+ FetchNode();
+ virtual ~FetchNode() {}
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return _sorts; }
+ virtual StageType getType() const {
+ return STAGE_FETCH;
+ }
- QuerySolutionNode* clone() const;
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
- BSONObjSet _sorts;
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return children[0]->sortedByDiskLoc();
+ }
+ const BSONObjSet& getSort() const {
+ return children[0]->getSort();
+ }
- // Not owned here
- const GeoNearExpression* nq;
- IndexBounds baseBounds;
+ QuerySolutionNode* clone() const;
- BSONObj indexKeyPattern;
- bool addPointMeta;
- bool addDistMeta;
- };
+ BSONObjSet _sorts;
+};
- // This is actually its own standalone stage.
- struct GeoNear2DSphereNode : public QuerySolutionNode {
- GeoNear2DSphereNode() : addPointMeta(false), addDistMeta(false) { }
- virtual ~GeoNear2DSphereNode() { }
+struct IndexScanNode : public QuerySolutionNode {
+ IndexScanNode();
+ virtual ~IndexScanNode() {}
- virtual StageType getType() const { return STAGE_GEO_NEAR_2DSPHERE; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+ virtual void computeProperties();
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return _sorts; }
+ virtual StageType getType() const {
+ return STAGE_IXSCAN;
+ }
- QuerySolutionNode* clone() const;
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
- BSONObjSet _sorts;
+ bool fetched() const {
+ return false;
+ }
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const;
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
- // Not owned here
- const GeoNearExpression* nq;
- IndexBounds baseBounds;
+ QuerySolutionNode* clone() const;
- BSONObj indexKeyPattern;
- bool addPointMeta;
- bool addDistMeta;
- };
+ BSONObjSet _sorts;
- //
- // Internal nodes used to provide functionality
- //
+ BSONObj indexKeyPattern;
+ bool indexIsMultiKey;
- /**
- * If we're answering a query on a sharded cluster, docs must be checked against the shard key
- * to ensure that we don't return data that shouldn't be there. This must be done prior to
- * projection, and in fact should be done as early as possible to avoid propagating stale data
- * through the pipeline.
- */
- struct ShardingFilterNode : public QuerySolutionNode {
- ShardingFilterNode() { }
- virtual ~ShardingFilterNode() { }
+ int direction;
- virtual StageType getType() const { return STAGE_SHARDING_FILTER; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+ // maxScan option to .find() limits how many docs we look at.
+ int maxScan;
- bool fetched() const { return children[0]->fetched(); }
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
- bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
- const BSONObjSet& getSort() const { return children[0]->getSort(); }
+ // If there's a 'returnKey' projection we add key metadata.
+ bool addKeyMetadata;
- QuerySolutionNode* clone() const;
- };
+ // BIG NOTE:
+ // If you use simple bounds, we'll use whatever index access method the keypattern implies.
+ // If you use the complex bounds, we force Btree access.
+ // The complex bounds require Btree access.
+ IndexBounds bounds;
+};
+struct ProjectionNode : public QuerySolutionNode {
/**
- * If documents mutate or are deleted during a query, we can (in some cases) fetch them
- * and still return them. This stage merges documents that have been mutated or deleted
- * into the query result stream.
+ * We have a few implementations of the projection functionality. The most general
+ * implementation 'DEFAULT' is much slower than the fast-path implementations
+ * below. We only really have all the information available to choose a projection
+ * implementation at planning time.
*/
- struct KeepMutationsNode : public QuerySolutionNode {
- KeepMutationsNode() { }
- virtual ~KeepMutationsNode() { }
-
- virtual StageType getType() const { return STAGE_KEEP_MUTATIONS; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- // Any flagged results are OWNED_OBJ and therefore we're covered if our child is.
- bool fetched() const { return children[0]->fetched(); }
-
- // Any flagged results are OWNED_OBJ and as such they'll have any field we need.
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
+ enum ProjectionType {
+ // This is the most general implementation of the projection functionality. It handles
+ // every case.
+ DEFAULT,
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return sorts; }
+ // This is a fast-path for when the projection is fully covered by one index.
+ COVERED_ONE_INDEX,
- QuerySolutionNode* clone() const;
-
- // Since we merge in flagged results we have no sort order.
- BSONObjSet sorts;
+ // This is a fast-path for when the projection only has inclusions on non-dotted fields.
+ SIMPLE_DOC,
};
- /**
- * Distinct queries only want one value for a given field. We run an index scan but
- * *always* skip over the current key to the next key.
- */
- struct DistinctNode : public QuerySolutionNode {
- DistinctNode() { }
- virtual ~DistinctNode() { }
-
- virtual StageType getType() const { return STAGE_DISTINCT_SCAN; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- // This stage is created "on top" of normal planning and as such the properties
- // below don't really matter.
- bool fetched() const { return false; }
- bool hasField(const std::string& field) const { return !indexKeyPattern[field].eoo(); }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return sorts; }
+ ProjectionNode() : fullExpression(NULL), projType(DEFAULT) {}
- QuerySolutionNode* clone() const;
+ virtual ~ProjectionNode() {}
- BSONObjSet sorts;
+ virtual StageType getType() const {
+ return STAGE_PROJECTION;
+ }
- BSONObj indexKeyPattern;
- int direction;
- IndexBounds bounds;
- // We are distinct-ing over the 'fieldNo'-th field of 'indexKeyPattern'.
- int fieldNo;
- };
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
/**
- * Some count queries reduce to counting how many keys are between two entries in a
- * Btree.
+ * Data from the projection node is considered fetch iff the child provides fetched data.
*/
- struct CountNode : public QuerySolutionNode {
- CountNode() { }
- virtual ~CountNode() { }
-
- virtual StageType getType() const { return STAGE_COUNT_SCAN; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return false; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return sorts; }
+ bool fetched() const {
+ return children[0]->fetched();
+ }
- QuerySolutionNode* clone() const;
+ bool hasField(const std::string& field) const {
+ // TODO: Returning false isn't always the right answer -- we may either be including
+ // certain fields, or we may be dropping fields (in which case hasField returns true).
+ //
+ // Given that projection sits on top of everything else in .find() it doesn't matter
+ // what we do here.
+ return false;
+ }
+
+ bool sortedByDiskLoc() const {
+ // Projections destroy the RecordId. By returning true here, this kind of implies that a
+ // fetch could still be done upstream.
+ //
+ // Perhaps this should be false to not imply that there *is* a RecordId? Kind of a
+ // corner case.
+ return children[0]->sortedByDiskLoc();
+ }
+
+ const BSONObjSet& getSort() const {
+ // TODO: If we're applying a projection that maintains sort order, the prefix of the
+ // sort order we project is the sort order.
+ return _sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sorts;
+
+ // The full query tree. Needed when we have positional operators.
+ // Owned in the CanonicalQuery, not here.
+ MatchExpression* fullExpression;
+
+ // Given that we don't yet have a MatchExpression analogue for the expression language, we
+ // use a BSONObj.
+ BSONObj projection;
+
+ // What implementation of the projection algorithm should we use?
+ ProjectionType projType;
+
+ // Only meaningful if projType == COVERED_ONE_INDEX. This is the key pattern of the index
+ // supplying our covered data. We can pre-compute which fields to include and cache that
+ // data for later if we know we only have one index.
+ BSONObj coveredKeyObj;
+};
+
+struct SortNode : public QuerySolutionNode {
+ SortNode() : limit(0) {}
+ virtual ~SortNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_SORT;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ virtual void computeProperties() {
+ for (size_t i = 0; i < children.size(); ++i) {
+ children[i]->computeProperties();
+ }
+ _sorts.clear();
+ _sorts.insert(pattern);
+ }
+
+ BSONObjSet _sorts;
+
+ BSONObj pattern;
+
+ BSONObj query;
+
+ // Sum of both limit and skip count in the parsed query.
+ size_t limit;
+};
+
+struct LimitNode : public QuerySolutionNode {
+ LimitNode() {}
+ virtual ~LimitNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_LIMIT;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+ bool sortedByDiskLoc() const {
+ return children[0]->sortedByDiskLoc();
+ }
+ const BSONObjSet& getSort() const {
+ return children[0]->getSort();
+ }
+
+ QuerySolutionNode* clone() const;
+
+ int limit;
+};
+
+struct SkipNode : public QuerySolutionNode {
+ SkipNode() {}
+ virtual ~SkipNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_SKIP;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+ bool sortedByDiskLoc() const {
+ return children[0]->sortedByDiskLoc();
+ }
+ const BSONObjSet& getSort() const {
+ return children[0]->getSort();
+ }
+
+ QuerySolutionNode* clone() const;
+
+ int skip;
+};
+
+// This is a standalone stage.
+struct GeoNear2DNode : public QuerySolutionNode {
+ GeoNear2DNode() : addPointMeta(false), addDistMeta(false) {}
+ virtual ~GeoNear2DNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_GEO_NEAR_2D;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sorts;
+
+ // Not owned here
+ const GeoNearExpression* nq;
+ IndexBounds baseBounds;
+
+ BSONObj indexKeyPattern;
+ bool addPointMeta;
+ bool addDistMeta;
+};
+
+// This is actually its own standalone stage.
+struct GeoNear2DSphereNode : public QuerySolutionNode {
+ GeoNear2DSphereNode() : addPointMeta(false), addDistMeta(false) {}
+ virtual ~GeoNear2DSphereNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_GEO_NEAR_2DSPHERE;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sorts;
+
+ // Not owned here
+ const GeoNearExpression* nq;
+ IndexBounds baseBounds;
+
+ BSONObj indexKeyPattern;
+ bool addPointMeta;
+ bool addDistMeta;
+};
+
+//
+// Internal nodes used to provide functionality
+//
- BSONObjSet sorts;
+/**
+ * If we're answering a query on a sharded cluster, docs must be checked against the shard key
+ * to ensure that we don't return data that shouldn't be there. This must be done prior to
+ * projection, and in fact should be done as early as possible to avoid propagating stale data
+ * through the pipeline.
+ */
+struct ShardingFilterNode : public QuerySolutionNode {
+ ShardingFilterNode() {}
+ virtual ~ShardingFilterNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_SHARDING_FILTER;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+ bool sortedByDiskLoc() const {
+ return children[0]->sortedByDiskLoc();
+ }
+ const BSONObjSet& getSort() const {
+ return children[0]->getSort();
+ }
+
+ QuerySolutionNode* clone() const;
+};
- BSONObj indexKeyPattern;
+/**
+ * If documents mutate or are deleted during a query, we can (in some cases) fetch them
+ * and still return them. This stage merges documents that have been mutated or deleted
+ * into the query result stream.
+ */
+struct KeepMutationsNode : public QuerySolutionNode {
+ KeepMutationsNode() {}
+ virtual ~KeepMutationsNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_KEEP_MUTATIONS;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ // Any flagged results are OWNED_OBJ and therefore we're covered if our child is.
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+
+ // Any flagged results are OWNED_OBJ and as such they'll have any field we need.
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ // Since we merge in flagged results we have no sort order.
+ BSONObjSet sorts;
+};
- BSONObj startKey;
- bool startKeyInclusive;
+/**
+ * Distinct queries only want one value for a given field. We run an index scan but
+ * *always* skip over the current key to the next key.
+ */
+struct DistinctNode : public QuerySolutionNode {
+ DistinctNode() {}
+ virtual ~DistinctNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_DISTINCT_SCAN;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ // This stage is created "on top" of normal planning and as such the properties
+ // below don't really matter.
+ bool fetched() const {
+ return false;
+ }
+ bool hasField(const std::string& field) const {
+ return !indexKeyPattern[field].eoo();
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet sorts;
+
+ BSONObj indexKeyPattern;
+ int direction;
+ IndexBounds bounds;
+ // We are distinct-ing over the 'fieldNo'-th field of 'indexKeyPattern'.
+ int fieldNo;
+};
- BSONObj endKey;
- bool endKeyInclusive;
- };
+/**
+ * Some count queries reduce to counting how many keys are between two entries in a
+ * Btree.
+ */
+struct CountNode : public QuerySolutionNode {
+ CountNode() {}
+ virtual ~CountNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_COUNT_SCAN;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return false;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet sorts;
+
+ BSONObj indexKeyPattern;
+
+ BSONObj startKey;
+ bool startKeyInclusive;
+
+ BSONObj endKey;
+ bool endKeyInclusive;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_yield.cpp b/src/mongo/db/query/query_yield.cpp
index 5bd1733f0c6..4e0d463a83e 100644
--- a/src/mongo/db/query/query_yield.cpp
+++ b/src/mongo/db/query/query_yield.cpp
@@ -36,40 +36,40 @@
namespace mongo {
- // static
- void QueryYield::yieldAllLocks(OperationContext* txn, RecordFetcher* fetcher) {
- // Things have to happen here in a specific order:
- // 1) Tell the RecordFetcher to do any setup which needs to happen inside locks
- // 2) Release lock mgr locks
- // 3) Go to sleep
- // 4) Touch the record we're yielding on, if there is one (RecordFetcher::fetch)
- // 5) Reacquire lock mgr locks
+// static
+void QueryYield::yieldAllLocks(OperationContext* txn, RecordFetcher* fetcher) {
+ // Things have to happen here in a specific order:
+ // 1) Tell the RecordFetcher to do any setup which needs to happen inside locks
+ // 2) Release lock mgr locks
+ // 3) Go to sleep
+ // 4) Touch the record we're yielding on, if there is one (RecordFetcher::fetch)
+ // 5) Reacquire lock mgr locks
- Locker* locker = txn->lockState();
+ Locker* locker = txn->lockState();
- Locker::LockSnapshot snapshot;
+ Locker::LockSnapshot snapshot;
- if (fetcher) {
- fetcher->setup();
- }
-
- // Nothing was unlocked, just return, yielding is pointless.
- if (!locker->saveLockStateAndUnlock(&snapshot)) {
- return;
- }
+ if (fetcher) {
+ fetcher->setup();
+ }
- // Top-level locks are freed, release any potential low-level (storage engine-specific
- // locks). If we are yielding, we are at a safe place to do so.
- txn->recoveryUnit()->abandonSnapshot();
+ // Nothing was unlocked, just return, yielding is pointless.
+ if (!locker->saveLockStateAndUnlock(&snapshot)) {
+ return;
+ }
- // Track the number of yields in CurOp.
- CurOp::get(txn)->yielded();
+ // Top-level locks are freed, release any potential low-level (storage engine-specific
+ // locks). If we are yielding, we are at a safe place to do so.
+ txn->recoveryUnit()->abandonSnapshot();
- if (fetcher) {
- fetcher->fetch();
- }
+ // Track the number of yields in CurOp.
+ CurOp::get(txn)->yielded();
- locker->restoreLockState(snapshot);
+ if (fetcher) {
+ fetcher->fetch();
}
-} // namespace mongo
+ locker->restoreLockState(snapshot);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/query_yield.h b/src/mongo/db/query/query_yield.h
index 14f018368af..a6db563d195 100644
--- a/src/mongo/db/query/query_yield.h
+++ b/src/mongo/db/query/query_yield.h
@@ -30,24 +30,23 @@
namespace mongo {
- class OperationContext;
- class RecordFetcher;
+class OperationContext;
+class RecordFetcher;
+/**
+ * See the documentation for yieldAllLocks(...).
+ */
+class QueryYield {
+ QueryYield();
+
+public:
/**
- * See the documentation for yieldAllLocks(...).
+ * If not in a nested context, unlocks all locks, suggests to the operating system to
+ * switch to another thread, and then reacquires all locks.
+ *
+ * If in a nested context (eg DBDirectClient), does nothing.
*/
- class QueryYield {
- QueryYield();
-
- public:
-
- /**
- * If not in a nested context, unlocks all locks, suggests to the operating system to
- * switch to another thread, and then reacquires all locks.
- *
- * If in a nested context (eg DBDirectClient), does nothing.
- */
- static void yieldAllLocks(OperationContext* txn, RecordFetcher* fetcher);
- };
+ static void yieldAllLocks(OperationContext* txn, RecordFetcher* fetcher);
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index ba6a439dba4..c5a922a6aaf 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -55,294 +55,297 @@
namespace mongo {
- using std::unique_ptr;
-
- PlanStage* buildStages(OperationContext* txn,
- Collection* collection,
- const QuerySolution& qsol,
- const QuerySolutionNode* root,
- WorkingSet* ws) {
- if (STAGE_COLLSCAN == root->getType()) {
- const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root);
- CollectionScanParams params;
- params.collection = collection;
- params.tailable = csn->tailable;
- params.direction = (csn->direction == 1) ? CollectionScanParams::FORWARD
- : CollectionScanParams::BACKWARD;
- params.maxScan = csn->maxScan;
- return new CollectionScan(txn, params, ws, csn->filter.get());
+using std::unique_ptr;
+
+PlanStage* buildStages(OperationContext* txn,
+ Collection* collection,
+ const QuerySolution& qsol,
+ const QuerySolutionNode* root,
+ WorkingSet* ws) {
+ if (STAGE_COLLSCAN == root->getType()) {
+ const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root);
+ CollectionScanParams params;
+ params.collection = collection;
+ params.tailable = csn->tailable;
+ params.direction =
+ (csn->direction == 1) ? CollectionScanParams::FORWARD : CollectionScanParams::BACKWARD;
+ params.maxScan = csn->maxScan;
+ return new CollectionScan(txn, params, ws, csn->filter.get());
+ } else if (STAGE_IXSCAN == root->getType()) {
+ const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
+
+ if (NULL == collection) {
+ warning() << "Can't ixscan null namespace";
+ return NULL;
}
- else if (STAGE_IXSCAN == root->getType()) {
- const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
-
- if (NULL == collection) {
- warning() << "Can't ixscan null namespace";
- return NULL;
- }
- IndexScanParams params;
+ IndexScanParams params;
- params.descriptor =
- collection->getIndexCatalog()->findIndexByKeyPattern( txn, ixn->indexKeyPattern );
- if ( params.descriptor == NULL ) {
- warning() << "Can't find index " << ixn->indexKeyPattern.toString()
- << "in namespace " << collection->ns() << endl;
- return NULL;
- }
+ params.descriptor =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, ixn->indexKeyPattern);
+ if (params.descriptor == NULL) {
+ warning() << "Can't find index " << ixn->indexKeyPattern.toString() << "in namespace "
+ << collection->ns() << endl;
+ return NULL;
+ }
- params.bounds = ixn->bounds;
- params.direction = ixn->direction;
- params.maxScan = ixn->maxScan;
- params.addKeyMetadata = ixn->addKeyMetadata;
- return new IndexScan(txn, params, ws, ixn->filter.get());
+ params.bounds = ixn->bounds;
+ params.direction = ixn->direction;
+ params.maxScan = ixn->maxScan;
+ params.addKeyMetadata = ixn->addKeyMetadata;
+ return new IndexScan(txn, params, ws, ixn->filter.get());
+ } else if (STAGE_FETCH == root->getType()) {
+ const FetchNode* fn = static_cast<const FetchNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_FETCH == root->getType()) {
- const FetchNode* fn = static_cast<const FetchNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new FetchStage(txn, ws, childStage, fn->filter.get(), collection);
+ return new FetchStage(txn, ws, childStage, fn->filter.get(), collection);
+ } else if (STAGE_SORT == root->getType()) {
+ const SortNode* sn = static_cast<const SortNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_SORT == root->getType()) {
- const SortNode* sn = static_cast<const SortNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
- if (NULL == childStage) { return NULL; }
- SortStageParams params;
- params.collection = collection;
- params.pattern = sn->pattern;
- params.query = sn->query;
- params.limit = sn->limit;
- return new SortStage(params, ws, childStage);
+ SortStageParams params;
+ params.collection = collection;
+ params.pattern = sn->pattern;
+ params.query = sn->query;
+ params.limit = sn->limit;
+ return new SortStage(params, ws, childStage);
+ } else if (STAGE_PROJECTION == root->getType()) {
+ const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, pn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_PROJECTION == root->getType()) {
- const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, pn->children[0], ws);
- if (NULL == childStage) { return NULL; }
-
- ProjectionStageParams params(WhereCallbackReal(txn, collection->ns().db()));
- params.projObj = pn->projection;
-
- // Stuff the right data into the params depending on what proj impl we use.
- if (ProjectionNode::DEFAULT == pn->projType) {
- params.fullExpression = pn->fullExpression;
- params.projImpl = ProjectionStageParams::NO_FAST_PATH;
- }
- else if (ProjectionNode::COVERED_ONE_INDEX == pn->projType) {
- params.projImpl = ProjectionStageParams::COVERED_ONE_INDEX;
- params.coveredKeyObj = pn->coveredKeyObj;
- invariant(!pn->coveredKeyObj.isEmpty());
- }
- else {
- invariant(ProjectionNode::SIMPLE_DOC == pn->projType);
- params.projImpl = ProjectionStageParams::SIMPLE_DOC;
- }
- return new ProjectionStage(params, ws, childStage);
+ ProjectionStageParams params(WhereCallbackReal(txn, collection->ns().db()));
+ params.projObj = pn->projection;
+
+ // Stuff the right data into the params depending on what proj impl we use.
+ if (ProjectionNode::DEFAULT == pn->projType) {
+ params.fullExpression = pn->fullExpression;
+ params.projImpl = ProjectionStageParams::NO_FAST_PATH;
+ } else if (ProjectionNode::COVERED_ONE_INDEX == pn->projType) {
+ params.projImpl = ProjectionStageParams::COVERED_ONE_INDEX;
+ params.coveredKeyObj = pn->coveredKeyObj;
+ invariant(!pn->coveredKeyObj.isEmpty());
+ } else {
+ invariant(ProjectionNode::SIMPLE_DOC == pn->projType);
+ params.projImpl = ProjectionStageParams::SIMPLE_DOC;
}
- else if (STAGE_LIMIT == root->getType()) {
- const LimitNode* ln = static_cast<const LimitNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, ln->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new LimitStage(ln->limit, ws, childStage);
- }
- else if (STAGE_SKIP == root->getType()) {
- const SkipNode* sn = static_cast<const SkipNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new SkipStage(sn->skip, ws, childStage);
+
+ return new ProjectionStage(params, ws, childStage);
+ } else if (STAGE_LIMIT == root->getType()) {
+ const LimitNode* ln = static_cast<const LimitNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, ln->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_AND_HASH == root->getType()) {
- const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- unique_ptr<AndHashStage> ret(new AndHashStage(ws, collection));
- for (size_t i = 0; i < ahn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, qsol, ahn->children[i], ws);
- if (NULL == childStage) { return NULL; }
- ret->addChild(childStage);
- }
- return ret.release();
+ return new LimitStage(ln->limit, ws, childStage);
+ } else if (STAGE_SKIP == root->getType()) {
+ const SkipNode* sn = static_cast<const SkipNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_OR == root->getType()) {
- const OrNode * orn = static_cast<const OrNode*>(root);
- unique_ptr<OrStage> ret(new OrStage(ws, orn->dedup, orn->filter.get()));
- for (size_t i = 0; i < orn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, qsol, orn->children[i], ws);
- if (NULL == childStage) { return NULL; }
- ret->addChild(childStage);
+ return new SkipStage(sn->skip, ws, childStage);
+ } else if (STAGE_AND_HASH == root->getType()) {
+ const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
+ unique_ptr<AndHashStage> ret(new AndHashStage(ws, collection));
+ for (size_t i = 0; i < ahn->children.size(); ++i) {
+ PlanStage* childStage = buildStages(txn, collection, qsol, ahn->children[i], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- return ret.release();
+ ret->addChild(childStage);
}
- else if (STAGE_AND_SORTED == root->getType()) {
- const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- unique_ptr<AndSortedStage> ret(new AndSortedStage(ws, collection));
- for (size_t i = 0; i < asn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, qsol, asn->children[i], ws);
- if (NULL == childStage) { return NULL; }
- ret->addChild(childStage);
+ return ret.release();
+ } else if (STAGE_OR == root->getType()) {
+ const OrNode* orn = static_cast<const OrNode*>(root);
+ unique_ptr<OrStage> ret(new OrStage(ws, orn->dedup, orn->filter.get()));
+ for (size_t i = 0; i < orn->children.size(); ++i) {
+ PlanStage* childStage = buildStages(txn, collection, qsol, orn->children[i], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- return ret.release();
+ ret->addChild(childStage);
}
- else if (STAGE_SORT_MERGE == root->getType()) {
- const MergeSortNode* msn = static_cast<const MergeSortNode*>(root);
- MergeSortStageParams params;
- params.dedup = msn->dedup;
- params.pattern = msn->sort;
- unique_ptr<MergeSortStage> ret(new MergeSortStage(params, ws, collection));
- for (size_t i = 0; i < msn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, qsol, msn->children[i], ws);
- if (NULL == childStage) { return NULL; }
- ret->addChild(childStage);
+ return ret.release();
+ } else if (STAGE_AND_SORTED == root->getType()) {
+ const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
+ unique_ptr<AndSortedStage> ret(new AndSortedStage(ws, collection));
+ for (size_t i = 0; i < asn->children.size(); ++i) {
+ PlanStage* childStage = buildStages(txn, collection, qsol, asn->children[i], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- return ret.release();
+ ret->addChild(childStage);
}
- else if (STAGE_GEO_NEAR_2D == root->getType()) {
- const GeoNear2DNode* node = static_cast<const GeoNear2DNode*>(root);
-
- GeoNearParams params;
- params.nearQuery = node->nq;
- params.baseBounds = node->baseBounds;
- params.filter = node->filter.get();
- params.addPointMeta = node->addPointMeta;
- params.addDistMeta = node->addDistMeta;
-
- IndexDescriptor* twoDIndex = collection->getIndexCatalog()->findIndexByKeyPattern(txn,
- node->indexKeyPattern);
-
- if (twoDIndex == NULL) {
- warning() << "Can't find 2D index " << node->indexKeyPattern.toString()
- << "in namespace " << collection->ns() << endl;
+ return ret.release();
+ } else if (STAGE_SORT_MERGE == root->getType()) {
+ const MergeSortNode* msn = static_cast<const MergeSortNode*>(root);
+ MergeSortStageParams params;
+ params.dedup = msn->dedup;
+ params.pattern = msn->sort;
+ unique_ptr<MergeSortStage> ret(new MergeSortStage(params, ws, collection));
+ for (size_t i = 0; i < msn->children.size(); ++i) {
+ PlanStage* childStage = buildStages(txn, collection, qsol, msn->children[i], ws);
+ if (NULL == childStage) {
return NULL;
}
+ ret->addChild(childStage);
+ }
+ return ret.release();
+ } else if (STAGE_GEO_NEAR_2D == root->getType()) {
+ const GeoNear2DNode* node = static_cast<const GeoNear2DNode*>(root);
+
+ GeoNearParams params;
+ params.nearQuery = node->nq;
+ params.baseBounds = node->baseBounds;
+ params.filter = node->filter.get();
+ params.addPointMeta = node->addPointMeta;
+ params.addDistMeta = node->addDistMeta;
+
+ IndexDescriptor* twoDIndex =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, node->indexKeyPattern);
+
+ if (twoDIndex == NULL) {
+ warning() << "Can't find 2D index " << node->indexKeyPattern.toString()
+ << "in namespace " << collection->ns() << endl;
+ return NULL;
+ }
- GeoNear2DStage* nearStage = new GeoNear2DStage(params, txn, ws, collection, twoDIndex);
+ GeoNear2DStage* nearStage = new GeoNear2DStage(params, txn, ws, collection, twoDIndex);
- return nearStage;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == root->getType()) {
- const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(root);
-
- GeoNearParams params;
- params.nearQuery = node->nq;
- params.baseBounds = node->baseBounds;
- params.filter = node->filter.get();
- params.addPointMeta = node->addPointMeta;
- params.addDistMeta = node->addDistMeta;
-
- IndexDescriptor* s2Index = collection->getIndexCatalog()->findIndexByKeyPattern(txn,
- node->indexKeyPattern);
-
- if (s2Index == NULL) {
- warning() << "Can't find 2DSphere index " << node->indexKeyPattern.toString()
- << "in namespace " << collection->ns() << endl;
- return NULL;
- }
+ return nearStage;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == root->getType()) {
+ const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(root);
- return new GeoNear2DSphereStage(params, txn, ws, collection, s2Index);
+ GeoNearParams params;
+ params.nearQuery = node->nq;
+ params.baseBounds = node->baseBounds;
+ params.filter = node->filter.get();
+ params.addPointMeta = node->addPointMeta;
+ params.addDistMeta = node->addDistMeta;
+
+ IndexDescriptor* s2Index =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, node->indexKeyPattern);
+
+ if (s2Index == NULL) {
+ warning() << "Can't find 2DSphere index " << node->indexKeyPattern.toString()
+ << "in namespace " << collection->ns() << endl;
+ return NULL;
}
- else if (STAGE_TEXT == root->getType()) {
- const TextNode* node = static_cast<const TextNode*>(root);
- if (NULL == collection) {
- warning() << "Null collection for text";
- return NULL;
- }
- vector<IndexDescriptor*> idxMatches;
- collection->getIndexCatalog()->findIndexByType(txn, "text", idxMatches);
- if (1 != idxMatches.size()) {
- warning() << "No text index, or more than one text index";
- return NULL;
- }
- IndexDescriptor* index = idxMatches[0];
- const FTSAccessMethod* fam =
- static_cast<FTSAccessMethod*>( collection->getIndexCatalog()->getIndex( index ) );
- TextStageParams params(fam->getSpec());
-
- //params.collection = collection;
- params.index = index;
- params.spec = fam->getSpec();
- params.indexPrefix = node->indexPrefix;
-
- const std::string& language = ("" == node->language
- ? fam->getSpec().defaultLanguage().str()
- : node->language);
-
- Status parseStatus = params.query.parse(node->query,
- language,
- node->caseSensitive,
- fam->getSpec().getTextIndexVersion());
- if (!parseStatus.isOK()) {
- warning() << "Can't parse text search query";
- return NULL;
- }
+ return new GeoNear2DSphereStage(params, txn, ws, collection, s2Index);
+ } else if (STAGE_TEXT == root->getType()) {
+ const TextNode* node = static_cast<const TextNode*>(root);
- return new TextStage(txn, params, ws, node->filter.get());
+ if (NULL == collection) {
+ warning() << "Null collection for text";
+ return NULL;
}
- else if (STAGE_SHARDING_FILTER == root->getType()) {
- const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()),
- ws, childStage);
+ vector<IndexDescriptor*> idxMatches;
+ collection->getIndexCatalog()->findIndexByType(txn, "text", idxMatches);
+ if (1 != idxMatches.size()) {
+ warning() << "No text index, or more than one text index";
+ return NULL;
}
- else if (STAGE_KEEP_MUTATIONS == root->getType()) {
- const KeepMutationsNode* km = static_cast<const KeepMutationsNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, km->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new KeepMutationsStage(km->filter.get(), ws, childStage);
+ IndexDescriptor* index = idxMatches[0];
+ const FTSAccessMethod* fam =
+ static_cast<FTSAccessMethod*>(collection->getIndexCatalog()->getIndex(index));
+ TextStageParams params(fam->getSpec());
+
+ // params.collection = collection;
+ params.index = index;
+ params.spec = fam->getSpec();
+ params.indexPrefix = node->indexPrefix;
+
+ const std::string& language =
+ ("" == node->language ? fam->getSpec().defaultLanguage().str() : node->language);
+
+ Status parseStatus = params.query.parse(
+ node->query, language, node->caseSensitive, fam->getSpec().getTextIndexVersion());
+ if (!parseStatus.isOK()) {
+ warning() << "Can't parse text search query";
+ return NULL;
}
- else if (STAGE_DISTINCT_SCAN == root->getType()) {
- const DistinctNode* dn = static_cast<const DistinctNode*>(root);
- if (NULL == collection) {
- warning() << "Can't distinct-scan null namespace";
- return NULL;
- }
-
- DistinctParams params;
-
- params.descriptor =
- collection->getIndexCatalog()->findIndexByKeyPattern(txn, dn->indexKeyPattern);
- params.direction = dn->direction;
- params.bounds = dn->bounds;
- params.fieldNo = dn->fieldNo;
- return new DistinctScan(txn, params, ws);
+ return new TextStage(txn, params, ws, node->filter.get());
+ } else if (STAGE_SHARDING_FILTER == root->getType()) {
+ const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_COUNT_SCAN == root->getType()) {
- const CountNode* cn = static_cast<const CountNode*>(root);
+ return new ShardFilterStage(
+ shardingState.getCollectionMetadata(collection->ns()), ws, childStage);
+ } else if (STAGE_KEEP_MUTATIONS == root->getType()) {
+ const KeepMutationsNode* km = static_cast<const KeepMutationsNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, km->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
+ }
+ return new KeepMutationsStage(km->filter.get(), ws, childStage);
+ } else if (STAGE_DISTINCT_SCAN == root->getType()) {
+ const DistinctNode* dn = static_cast<const DistinctNode*>(root);
- if (NULL == collection) {
- warning() << "Can't fast-count null namespace (collection null)";
- return NULL;
- }
+ if (NULL == collection) {
+ warning() << "Can't distinct-scan null namespace";
+ return NULL;
+ }
- CountScanParams params;
+ DistinctParams params;
- params.descriptor =
- collection->getIndexCatalog()->findIndexByKeyPattern(txn, cn->indexKeyPattern);
- params.startKey = cn->startKey;
- params.startKeyInclusive = cn->startKeyInclusive;
- params.endKey = cn->endKey;
- params.endKeyInclusive = cn->endKeyInclusive;
+ params.descriptor =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, dn->indexKeyPattern);
+ params.direction = dn->direction;
+ params.bounds = dn->bounds;
+ params.fieldNo = dn->fieldNo;
+ return new DistinctScan(txn, params, ws);
+ } else if (STAGE_COUNT_SCAN == root->getType()) {
+ const CountNode* cn = static_cast<const CountNode*>(root);
- return new CountScan(txn, params, ws);
- }
- else {
- mongoutils::str::stream ss;
- root->appendToString(&ss, 0);
- string nodeStr(ss);
- warning() << "Can't build exec tree for node " << nodeStr << endl;
+ if (NULL == collection) {
+ warning() << "Can't fast-count null namespace (collection null)";
return NULL;
}
- }
- // static (this one is used for Cached and MultiPlanStage)
- bool StageBuilder::build(OperationContext* txn,
- Collection* collection,
- const QuerySolution& solution,
- WorkingSet* wsIn,
- PlanStage** rootOut) {
- if (NULL == wsIn || NULL == rootOut) { return false; }
- QuerySolutionNode* solutionNode = solution.root.get();
- if (NULL == solutionNode) { return false; }
- return NULL != (*rootOut = buildStages(txn, collection, solution, solutionNode, wsIn));
+ CountScanParams params;
+
+ params.descriptor =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, cn->indexKeyPattern);
+ params.startKey = cn->startKey;
+ params.startKeyInclusive = cn->startKeyInclusive;
+ params.endKey = cn->endKey;
+ params.endKeyInclusive = cn->endKeyInclusive;
+
+ return new CountScan(txn, params, ws);
+ } else {
+ mongoutils::str::stream ss;
+ root->appendToString(&ss, 0);
+ string nodeStr(ss);
+ warning() << "Can't build exec tree for node " << nodeStr << endl;
+ return NULL;
+ }
+}
+
+// static (this one is used for Cached and MultiPlanStage)
+bool StageBuilder::build(OperationContext* txn,
+ Collection* collection,
+ const QuerySolution& solution,
+ WorkingSet* wsIn,
+ PlanStage** rootOut) {
+ if (NULL == wsIn || NULL == rootOut) {
+ return false;
+ }
+ QuerySolutionNode* solutionNode = solution.root.get();
+ if (NULL == solutionNode) {
+ return false;
}
+ return NULL != (*rootOut = buildStages(txn, collection, solution, solutionNode, wsIn));
+}
} // namespace mongo
diff --git a/src/mongo/db/query/stage_builder.h b/src/mongo/db/query/stage_builder.h
index c9c88e800bd..c490b9a974d 100644
--- a/src/mongo/db/query/stage_builder.h
+++ b/src/mongo/db/query/stage_builder.h
@@ -34,26 +34,26 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
+/**
+ * The StageBuilder converts a QuerySolution to an executable tree of PlanStage(s).
+ */
+class StageBuilder {
+public:
/**
- * The StageBuilder converts a QuerySolution to an executable tree of PlanStage(s).
+ * Turns 'solution' into an executable tree of PlanStage(s).
+ *
+ * Returns true if the PlanStage tree was built successfully. The root of the tree is in
+ * *rootOut and the WorkingSet that the tree uses is in wsIn.
+ *
+ * Returns false otherwise. *rootOut and *wsOut are invalid.
*/
- class StageBuilder {
- public:
- /**
- * Turns 'solution' into an executable tree of PlanStage(s).
- *
- * Returns true if the PlanStage tree was built successfully. The root of the tree is in
- * *rootOut and the WorkingSet that the tree uses is in wsIn.
- *
- * Returns false otherwise. *rootOut and *wsOut are invalid.
- */
- static bool build(OperationContext* txn,
- Collection* collection,
- const QuerySolution& solution,
- WorkingSet* wsIn,
- PlanStage** rootOut);
- };
+ static bool build(OperationContext* txn,
+ Collection* collection,
+ const QuerySolution& solution,
+ WorkingSet* wsIn,
+ PlanStage** rootOut);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/stage_types.h b/src/mongo/db/query/stage_types.h
index 273da805283..5a4981dc81b 100644
--- a/src/mongo/db/query/stage_types.h
+++ b/src/mongo/db/query/stage_types.h
@@ -30,72 +30,72 @@
namespace mongo {
- /**
- * These map to implementations of the PlanStage interface, all of which live in db/exec/
- */
- enum StageType {
- STAGE_AND_HASH,
- STAGE_AND_SORTED,
- STAGE_CACHED_PLAN,
- STAGE_COLLSCAN,
+/**
+ * These map to implementations of the PlanStage interface, all of which live in db/exec/
+ */
+enum StageType {
+ STAGE_AND_HASH,
+ STAGE_AND_SORTED,
+ STAGE_CACHED_PLAN,
+ STAGE_COLLSCAN,
- // This stage sits at the root of the query tree and counts up the number of results
- // returned by its child.
- STAGE_COUNT,
+ // This stage sits at the root of the query tree and counts up the number of results
+ // returned by its child.
+ STAGE_COUNT,
- // If we're running a .count(), the query is fully covered by one ixscan, and the ixscan is
- // from one key to another, we can just skip through the keys without bothering to examine
- // them.
- STAGE_COUNT_SCAN,
+ // If we're running a .count(), the query is fully covered by one ixscan, and the ixscan is
+ // from one key to another, we can just skip through the keys without bothering to examine
+ // them.
+ STAGE_COUNT_SCAN,
- STAGE_DELETE,
+ STAGE_DELETE,
- // If we're running a distinct, we only care about one value for each key. The distinct
- // scan stage is an ixscan with some key-skipping behvaior that only distinct uses.
- STAGE_DISTINCT_SCAN,
+ // If we're running a distinct, we only care about one value for each key. The distinct
+ // scan stage is an ixscan with some key-skipping behvaior that only distinct uses.
+ STAGE_DISTINCT_SCAN,
- // Dummy stage used for receiving notifications of deletions during chunk migration.
- STAGE_NOTIFY_DELETE,
+ // Dummy stage used for receiving notifications of deletions during chunk migration.
+ STAGE_NOTIFY_DELETE,
- STAGE_EOF,
+ STAGE_EOF,
- // This is more of an "internal-only" stage where we try to keep docs that were mutated
- // during query execution.
- STAGE_KEEP_MUTATIONS,
+ // This is more of an "internal-only" stage where we try to keep docs that were mutated
+ // during query execution.
+ STAGE_KEEP_MUTATIONS,
- STAGE_FETCH,
+ STAGE_FETCH,
- // The two $geoNear impls imply a fetch+sort and must be stages.
- STAGE_GEO_NEAR_2D,
- STAGE_GEO_NEAR_2DSPHERE,
+ // The two $geoNear impls imply a fetch+sort and must be stages.
+ STAGE_GEO_NEAR_2D,
+ STAGE_GEO_NEAR_2DSPHERE,
- STAGE_GROUP,
+ STAGE_GROUP,
- STAGE_IDHACK,
- STAGE_IXSCAN,
- STAGE_LIMIT,
+ STAGE_IDHACK,
+ STAGE_IXSCAN,
+ STAGE_LIMIT,
- // Implements parallelCollectionScan.
- STAGE_MULTI_ITERATOR,
+ // Implements parallelCollectionScan.
+ STAGE_MULTI_ITERATOR,
- STAGE_MULTI_PLAN,
- STAGE_OPLOG_START,
- STAGE_OR,
- STAGE_PROJECTION,
+ STAGE_MULTI_PLAN,
+ STAGE_OPLOG_START,
+ STAGE_OR,
+ STAGE_PROJECTION,
- // Stage for running aggregation pipelines.
- STAGE_PIPELINE_PROXY,
+ // Stage for running aggregation pipelines.
+ STAGE_PIPELINE_PROXY,
- STAGE_QUEUED_DATA,
- STAGE_SHARDING_FILTER,
- STAGE_SKIP,
- STAGE_SORT,
- STAGE_SORT_MERGE,
- STAGE_SUBPLAN,
- STAGE_TEXT,
- STAGE_UNKNOWN,
+ STAGE_QUEUED_DATA,
+ STAGE_SHARDING_FILTER,
+ STAGE_SKIP,
+ STAGE_SORT,
+ STAGE_SORT_MERGE,
+ STAGE_SUBPLAN,
+ STAGE_TEXT,
+ STAGE_UNKNOWN,
- STAGE_UPDATE,
- };
+ STAGE_UPDATE,
+};
} // namespace mongo